From a561a2a484049cc63fd0959646ef5c1cb8f3c2e6 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Thu, 11 Jan 2024 13:46:49 +0100 Subject: [PATCH 01/43] [PyOV] Legacy IE Python API removal (#20908) * [PyOV] Legacy IE Python API removal * clean configs &cmake * remove ngraph * remove from yml * try to fix build * clean up * remove old api from python snippets and docs (#170) * remove old api from python snippets * pysnippets removal p2 * remove ngraph snippet * remove old api from docs * remove migration snippets * remove compatibility req after refactoring on master * fix after merge * fix after merge * Update docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst --- .github/workflows/code_style.yml | 2 - .github/workflows/coverage.yml | 2 - .github/workflows/fedora.yml | 1 - .github/workflows/linux.yml | 2 - .github/workflows/linux_arm64.yml | 2 - .github/workflows/linux_riscv.yml | 1 - .github/workflows/mac.yml | 3 - .github/workflows/mac_arm64.yml | 3 - .github/workflows/py_checks.yml | 50 - .github/workflows/windows.yml | 1 - src/bindings/python/CMakeLists.txt | 38 +- src/bindings/python/README.md | 1 - src/bindings/python/docs/build.md | 1 - src/bindings/python/setup.cfg | 1 - .../src/compatibility/ngraph/__init__.py | 222 -- .../src/compatibility/ngraph/exceptions.py | 16 - .../src/compatibility/ngraph/helpers.py | 22 - .../src/compatibility/ngraph/impl/__init__.py | 51 - .../compatibility/ngraph/impl/op/__init__.py | 23 - .../ngraph/impl/op/util/__init__.py | 16 - .../ngraph/impl/passes/__init__.py | 6 - .../compatibility/ngraph/opset1/__init__.py | 111 - .../src/compatibility/ngraph/opset1/ops.py | 2772 ----------------- .../compatibility/ngraph/opset10/__init__.py | 177 -- .../src/compatibility/ngraph/opset10/ops.py | 173 - .../compatibility/ngraph/opset11/__init__.py | 177 -- .../src/compatibility/ngraph/opset11/ops.py | 107 - .../compatibility/ngraph/opset2/__init__.py | 117 - .../src/compatibility/ngraph/opset2/ops.py | 175 -- .../compatibility/ngraph/opset3/__init__.py | 133 - .../src/compatibility/ngraph/opset3/ops.py | 609 ---- .../compatibility/ngraph/opset4/__init__.py | 143 - .../src/compatibility/ngraph/opset4/ops.py | 407 --- .../compatibility/ngraph/opset5/__init__.py | 150 - .../src/compatibility/ngraph/opset5/ops.py | 426 --- .../compatibility/ngraph/opset6/__init__.py | 152 - .../src/compatibility/ngraph/opset6/ops.py | 145 - .../compatibility/ngraph/opset7/__init__.py | 156 - .../src/compatibility/ngraph/opset7/ops.py | 157 - .../compatibility/ngraph/opset8/__init__.py | 167 - .../src/compatibility/ngraph/opset8/ops.py | 772 ----- .../compatibility/ngraph/opset9/__init__.py | 173 - .../src/compatibility/ngraph/opset9/ops.py | 326 -- .../src/compatibility/ngraph/opset_utils.py | 21 - .../compatibility/ngraph/utils/__init__.py | 4 - .../ngraph/utils/broadcasting.py | 34 - .../compatibility/ngraph/utils/decorators.py | 52 - .../ngraph/utils/input_validation.py | 124 - .../ngraph/utils/node_factory.py | 165 - .../compatibility/ngraph/utils/reduction.py | 23 - .../ngraph/utils/tensor_iterator_types.py | 180 -- .../src/compatibility/ngraph/utils/types.py | 144 - .../python/src/compatibility/openvino/.bandit | 2 - .../src/compatibility/openvino/CMakeLists.txt | 59 - .../src/compatibility/openvino/__init__.py | 62 - .../openvino/cmake/CythonConfig.cmake | 91 - .../openvino/cmake/UseCython.cmake | 298 -- .../openvino/inference_engine/CMakeLists.txt | 105 - .../openvino/inference_engine/__init__.py | 43 - .../openvino/inference_engine/constants.pyx | 94 - .../openvino/inference_engine/ie_api.pxd | 95 - .../openvino/inference_engine/ie_api.pyx | 1854 ----------- .../openvino/inference_engine/ie_api_impl.cpp | 680 ---- .../openvino/inference_engine/ie_api_impl.hpp | 210 -- .../inference_engine/ie_api_impl_defs.pxd | 236 -- .../openvino/requirements-dev.txt | 1 - .../src/compatibility/openvino/setup.cfg | 24 - .../src/compatibility/pyngraph/CMakeLists.txt | 85 - .../src/compatibility/pyngraph/axis_set.cpp | 43 - .../src/compatibility/pyngraph/axis_set.hpp | 11 - .../compatibility/pyngraph/axis_vector.cpp | 22 - .../compatibility/pyngraph/axis_vector.hpp | 11 - .../src/compatibility/pyngraph/coordinate.cpp | 22 - .../src/compatibility/pyngraph/coordinate.hpp | 11 - .../pyngraph/coordinate_diff.cpp | 39 - .../pyngraph/coordinate_diff.hpp | 11 - .../pyngraph/dict_attribute_visitor.cpp | 348 --- .../pyngraph/dict_attribute_visitor.hpp | 131 - .../src/compatibility/pyngraph/dimension.cpp | 174 -- .../src/compatibility/pyngraph/dimension.hpp | 11 - .../pyngraph/discrete_type_info.cpp | 44 - .../pyngraph/discrete_type_info.hpp | 11 - .../src/compatibility/pyngraph/function.cpp | 314 -- .../src/compatibility/pyngraph/function.hpp | 11 - .../src/compatibility/pyngraph/node.cpp | 309 -- .../src/compatibility/pyngraph/node.hpp | 11 - .../compatibility/pyngraph/node_factory.cpp | 108 - .../compatibility/pyngraph/node_factory.hpp | 11 - .../src/compatibility/pyngraph/node_input.cpp | 81 - .../src/compatibility/pyngraph/node_input.hpp | 11 - .../compatibility/pyngraph/node_output.cpp | 81 - .../compatibility/pyngraph/node_output.hpp | 11 - .../compatibility/pyngraph/ops/constant.cpp | 144 - .../compatibility/pyngraph/ops/constant.hpp | 11 - .../compatibility/pyngraph/ops/parameter.cpp | 41 - .../compatibility/pyngraph/ops/parameter.hpp | 11 - .../src/compatibility/pyngraph/ops/result.cpp | 22 - .../src/compatibility/pyngraph/ops/result.hpp | 11 - .../ops/util/arithmetic_reduction.cpp | 27 - .../ops/util/arithmetic_reduction.hpp | 11 - .../util/binary_elementwise_arithmetic.cpp | 18 - .../util/binary_elementwise_arithmetic.hpp | 11 - .../util/binary_elementwise_comparison.cpp | 18 - .../util/binary_elementwise_comparison.hpp | 11 - .../ops/util/binary_elementwise_logical.cpp | 17 - .../ops/util/binary_elementwise_logical.hpp | 11 - .../pyngraph/ops/util/index_reduction.cpp | 32 - .../pyngraph/ops/util/index_reduction.hpp | 11 - .../pyngraph/ops/util/op_annotations.cpp | 20 - .../pyngraph/ops/util/op_annotations.hpp | 11 - .../ops/util/regmodule_pyngraph_op_util.cpp | 20 - .../ops/util/regmodule_pyngraph_op_util.hpp | 18 - .../ops/util/unary_elementwise_arithmetic.cpp | 18 - .../ops/util/unary_elementwise_arithmetic.hpp | 11 - .../compatibility/pyngraph/partial_shape.cpp | 220 -- .../compatibility/pyngraph/partial_shape.hpp | 11 - .../pyngraph/passes/frontend_manager.cpp | 42 - .../compatibility/pyngraph/passes/manager.hpp | 11 - .../passes/regmodule_pyngraph_passes.cpp | 14 - .../passes/regmodule_pyngraph_passes.hpp | 12 - .../src/compatibility/pyngraph/pyngraph.cpp | 59 - .../src/compatibility/pyngraph/rt_map.cpp | 40 - .../src/compatibility/pyngraph/rt_map.hpp | 11 - .../src/compatibility/pyngraph/shape.cpp | 47 - .../src/compatibility/pyngraph/shape.hpp | 11 - .../src/compatibility/pyngraph/strides.cpp | 37 - .../src/compatibility/pyngraph/strides.hpp | 11 - .../pyngraph/types/element_type.cpp | 52 - .../pyngraph/types/element_type.hpp | 23 - .../types/regmodule_pyngraph_types.cpp | 13 - .../types/regmodule_pyngraph_types.hpp | 13 - .../src/compatibility/pyngraph/util.cpp | 64 - .../src/compatibility/pyngraph/util.hpp | 11 - .../src/compatibility/pyngraph/variant.cpp | 36 - .../src/compatibility/pyngraph/variant.hpp | 17 - 135 files changed, 2 insertions(+), 15970 deletions(-) delete mode 100644 src/bindings/python/src/compatibility/ngraph/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/exceptions.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/helpers.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/impl/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/impl/op/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset1/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset1/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset10/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset10/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset11/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset11/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset2/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset2/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset3/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset3/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset4/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset4/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset5/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset5/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset6/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset6/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset7/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset7/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset8/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset8/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset9/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset9/ops.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/opset_utils.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/utils/__init__.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/utils/broadcasting.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/utils/decorators.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/utils/input_validation.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/utils/node_factory.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/utils/reduction.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py delete mode 100644 src/bindings/python/src/compatibility/ngraph/utils/types.py delete mode 100644 src/bindings/python/src/compatibility/openvino/.bandit delete mode 100644 src/bindings/python/src/compatibility/openvino/CMakeLists.txt delete mode 100644 src/bindings/python/src/compatibility/openvino/__init__.py delete mode 100644 src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake delete mode 100644 src/bindings/python/src/compatibility/openvino/cmake/UseCython.cmake delete mode 100644 src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt delete mode 100644 src/bindings/python/src/compatibility/openvino/inference_engine/__init__.py delete mode 100644 src/bindings/python/src/compatibility/openvino/inference_engine/constants.pyx delete mode 100644 src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pxd delete mode 100644 src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pyx delete mode 100644 src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.cpp delete mode 100644 src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.hpp delete mode 100644 src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl_defs.pxd delete mode 100644 src/bindings/python/src/compatibility/openvino/requirements-dev.txt delete mode 100644 src/bindings/python/src/compatibility/openvino/setup.cfg delete mode 100644 src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt delete mode 100644 src/bindings/python/src/compatibility/pyngraph/axis_set.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/axis_set.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/axis_vector.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/axis_vector.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/coordinate.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/coordinate.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/dimension.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/dimension.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/discrete_type_info.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/discrete_type_info.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/function.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/function.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/node.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/node.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/node_factory.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/node_factory.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/node_input.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/node_input.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/node_output.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/node_output.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/constant.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/constant.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/result.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/result.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/partial_shape.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/partial_shape.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/passes/manager.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/pyngraph.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/rt_map.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/rt_map.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/shape.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/shape.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/strides.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/strides.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/types/element_type.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/types/element_type.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/util.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/util.hpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/variant.cpp delete mode 100644 src/bindings/python/src/compatibility/pyngraph/variant.hpp diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml index dc584a9799079c..585ee997e3b585 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/code_style.yml @@ -24,8 +24,6 @@ jobs: run: | python3 -m pip install --upgrade pip python3 -m pip install -r ./src/bindings/python/requirements.txt - # Add for -DENABLE_PYTHON=ON, no cython - python3 -m pip install -r ./src/bindings/python/src/compatibility/openvino/requirements-dev.txt # Run cmake with -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT in order to enable codestyle check for ITT collector - name: CMake configure diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 50114d986b23c6..9e38183cf186ea 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -41,8 +41,6 @@ jobs: python3 -m pip install --upgrade pip python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/wheel/requirements-dev.txt python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/requirements.txt - # For running Python API tests - python3 -m pip install -r ${{ github.workspace }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running Paddle frontend unit tests python3 -m pip install -r ${{ github.workspace }}/src/frontends/paddle/tests/requirements.txt # For running ONNX frontend unit tests diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 19d32ef74e07c9..84434981be989d 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -94,7 +94,6 @@ jobs: python3 -m pip install -U pip # For Python API: build and wheel packaging python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index f16b97d20c5c3b..51a59b35f51cd6 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -128,7 +128,6 @@ jobs: run: | # For Python API: build and wheel packaging python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt @@ -204,7 +203,6 @@ jobs: run: | /usr/bin/python3.8 -m pip install -U pip /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt cmake -UPYTHON* \ -DENABLE_PYTHON_PACKAGING=ON \ -DENABLE_TESTS=OFF \ diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index b8d7709fd36a62..fd8403e0de6c53 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -127,7 +127,6 @@ jobs: run: | # For Python API: build and wheel packaging python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt @@ -203,7 +202,6 @@ jobs: run: | /usr/bin/python3.8 -m pip install -U pip /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt cmake -UPYTHON* \ -DENABLE_PYTHON_PACKAGING=ON \ -DENABLE_TESTS=OFF \ diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index 5db7ed22a02707..088fddccf1b210 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -108,7 +108,6 @@ jobs: python3 -m venv ${OPENVINO_BUILD_DIR}/env source ${OPENVINO_BUILD_DIR}/env/bin/activate python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt python3 -m pip install conan - name: Install RISC-V native debian packages diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index c61fe6a4a9cc83..0165980d1b2f57 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -116,9 +116,6 @@ jobs: python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt - # For running Python API tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt - # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml index 02d494c7c16522..64873a9b104138 100644 --- a/.github/workflows/mac_arm64.yml +++ b/.github/workflows/mac_arm64.yml @@ -115,9 +115,6 @@ jobs: python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt - # For running Python API tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt - # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt diff --git a/.github/workflows/py_checks.yml b/.github/workflows/py_checks.yml index 80d6cad5243af3..56b58faacf0c16 100644 --- a/.github/workflows/py_checks.yml +++ b/.github/workflows/py_checks.yml @@ -53,42 +53,6 @@ jobs: name: samples_diff path: samples_diff.diff - # IE Python API Flake code-style - - name: Run flake8 on IE Python API - run: python -m flake8 ./ --config=setup.cfg - working-directory: src/bindings/python/src/compatibility/openvino - - - name: Create code style diff for IE Python API - if: failure() - run: | - python -m black -l 160 -S ./ - git diff > ie_python_diff.diff - working-directory: src/bindings/python/src/compatibility/openvino - - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: ie_python_diff - path: ie_python_diff.diff - - # nGraph Python API Flake code-style - - name: Run flake8 on nGraph Python API - run: python -m flake8 ./src/compatibility/ngraph --config=setup.cfg - working-directory: src/bindings/python - - - name: Create code style diff for nGraph Python API - if: failure() - run: | - python -m black -l 160 -S ./ - git diff > pyngraph_diff.diff - working-directory: src/bindings/python/src/compatibility/ngraph - - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: pyngraph_diff - path: pyngraph_diff.diff - # Python API 2.0 Flake code-style - name: Run flake8 on Python API 2.0 run: python -m flake8 ./src/openvino --config=setup.cfg @@ -131,25 +95,11 @@ jobs: run: python -m flake8 tests/ --config=setup.cfg working-directory: src/bindings/python - # IE Python API mypy check - - name: Run mypy on IE Python API - run: python -m mypy ./ --config-file ./setup.cfg - working-directory: src/bindings/python/src/compatibility/openvino - - # nGraph Python API mypy check - - name: Run mypy on nGraph Python API - run: python -m mypy ./src/compatibility/ngraph --config-file ./setup.cfg - working-directory: src/bindings/python - # Python API 2.0 mypy check - name: Run mypy on Python API 2.0 run: python -m mypy ./src/openvino --config-file ./setup.cfg working-directory: src/bindings/python - - name: Run Bandit - run: python -m bandit -r ./ -f screen - working-directory: src/bindings/python/src/compatibility/openvino - # layer_tests Flake code-style - name: Run flake8 on python tests in openvino/tests/layer_tests run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index a294f6fb747000..06b36f7725d1ca 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -99,7 +99,6 @@ jobs: run: | # For Python API: build and wheel packaging python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index 30abedbe2c30b7..2e2d6786bfa476 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -62,8 +62,6 @@ endif() # Check python requirements # -set(ie_build_python_req "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/requirements-dev.txt") - function(ov_check_python_build_conditions) # user explicitly specified ENABLE_PYTHON=ON if(ENABLE_PYTHON) @@ -94,26 +92,6 @@ function(ov_check_python_build_conditions) message(${message_mode} "Python 3.x Interpreter and Development.Module components are not found. OpenVINO Python API will be turned off (ENABLE_PYTHON is OFF)") endif() - # check for Cython requirement for build IE API 1.0 - ov_check_pip_packages(REQUIREMENTS_FILE ${ie_build_python_req} - RESULT_VAR ie_build_python_req_FOUND - WARNING_MESSAGE "install python3 -m pip install -r ${ie_build_python_req} for IE API 1.0 requirements" - MESSAGE_MODE TRACE) - - # cython can be installed as a debian package, so pip requirements can be unsatisfied - # so, let's check to find cython anyway - if(NOT ie_build_python_req_FOUND) - find_package(Cython QUIET - PATHS "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/cmake" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - if(CYTHON_VERSION VERSION_GREATER_EQUAL 0.29) - set(ie_build_python_req_FOUND ON) - else() - message(${message_mode} "Python requirements '${ie_build_python_req}' are missed, IE Python API 1.0 will not be built (ENABLE_PYTHON is OFF)") - endif() - endif() - if(NOT OV_GENERATOR_MULTI_CONFIG AND CMAKE_BUILD_TYPE STREQUAL "Debug" AND CMAKE_DEBUG_POSTFIX) set(python_debug ON) message(${message_mode} "Building python bindings in debug configuration is not supported on your platform (ENABLE_PYTHON is OFF)") @@ -121,15 +99,12 @@ function(ov_check_python_build_conditions) set(python_debug OFF) endif() - if((Python3_Development.Module_FOUND OR Python3_Development_FOUND) AND - ie_build_python_req_FOUND AND NOT python_debug) + if((Python3_Development.Module_FOUND OR Python3_Development_FOUND) AND NOT python_debug) set(ENABLE_PYTHON_DEFAULT ON PARENT_SCOPE) else() set(ENABLE_PYTHON_DEFAULT OFF PARENT_SCOPE) endif() - # to disable API 1.0 - set(ie_build_python_req_FOUND ${ie_build_python_req_FOUND} PARENT_SCOPE) endfunction() ov_check_python_build_conditions() @@ -155,7 +130,6 @@ function(ov_check_init_files_alignment init_files) endfunction() set(INIT_FILES_RUNTIME "${OpenVINOPython_SOURCE_DIR}/src/openvino/__init__.py" - "${OpenVINOPython_SOURCE_DIR}/src/compatibility/openvino/__init__.py" "${OpenVINO_SOURCE_DIR}/tools/ovc/openvino/__init__.py" "${OpenVINO_SOURCE_DIR}/tools/benchmark_tool/openvino/__init__.py") @@ -209,7 +183,7 @@ ov_dependent_option(ENABLE_WHEEL "Build wheel packages for PyPI" ${ENABLE_WHEEL_ if(NOT ENABLE_PYTHON) if(CMAKE_SOURCE_DIR STREQUAL OpenVINOPython_SOURCE_DIR) - message(FATAL_ERROR "Python OpenVINO API build requirements are not satisfied. Please, install ${ie_build_python_req}") + message(FATAL_ERROR "Python OpenVINO API build requirements are not satisfied.") else() return() endif() @@ -236,15 +210,8 @@ if(NOT pybind11_FOUND) add_subdirectory(thirdparty/pybind11 EXCLUDE_FROM_ALL) endif() -add_subdirectory(src/compatibility/pyngraph) add_subdirectory(src/pyopenvino) -if(ie_build_python_req_FOUND) - add_subdirectory(src/compatibility/openvino) -else() - message(WARNING "NOTE: Python API for OpenVINO 1.0 is disabled") -endif() - # # Packaging # @@ -312,7 +279,6 @@ macro(ov_define_setup_py_dependencies) endif() endforeach() - file(GLOB_RECURSE compat_ngraph_py_files ${OpenVINOPython_SOURCE_DIR}/src/compatibility/*.py) file(GLOB_RECURSE openvino_py_files ${OpenVINOPython_SOURCE_DIR}/src/openvino/*.py) list(APPEND ov_setup_py_deps diff --git a/src/bindings/python/README.md b/src/bindings/python/README.md index 620bf87c375ce2..c741f14776a68a 100644 --- a/src/bindings/python/README.md +++ b/src/bindings/python/README.md @@ -21,7 +21,6 @@ OpenVINO PYTHON API has the following structure: * [docs](./docs/) - folder that contains developer documentation and code examples. * [src](./src/) - folder with all source files for Python API. - * [src/compatibility](./src/compatibility/) - sources for compatibility API, including older modules like `ngraph` and `openvino.inference_engine`. * [src/openvino](./src/openvino/) - Python sources. * [src/openvino/preprocess](./src/openvino/preprocess/) - Torchvision to OpenVINO preprocessing converter. * [src/pyopenvino](./src/pyopenvino/) - C++ sources. diff --git a/src/bindings/python/docs/build.md b/src/bindings/python/docs/build.md index 4786332647bd6b..d0ab2e5f5f4e57 100644 --- a/src/bindings/python/docs/build.md +++ b/src/bindings/python/docs/build.md @@ -41,7 +41,6 @@ OpenVINO can be built based on specific virtual environments such as [venv](http cd pip install -r src/bindings/python/requirements.txt pip install -r src/bindings/python/requirements_test.txt - pip install -r src/bindings/python/src/compatibility/openvino/requirements-dev.txt ``` If `-DENABLE_WHEEL=ON` flag is present in `cmake` command, additionally install wheel requirements: ``` diff --git a/src/bindings/python/setup.cfg b/src/bindings/python/setup.cfg index dd8b0a75c27814..7fc407ad56e7a1 100644 --- a/src/bindings/python/setup.cfg +++ b/src/bindings/python/setup.cfg @@ -67,7 +67,6 @@ docstring-convention = google enable-extensions = G per-file-ignores = src/openvino/runtime/*/ops.py: VNE001,VNE003 - src/compatibility/ngraph/*: C101,C812,C819,CCE001,E800,N806,P101,RST201,RST202,RST203,RST206,VNE001,VNE003 src/openvino/preprocess/torchvision/*: N801, VNE001 *__init__.py: F401 diff --git a/src/bindings/python/src/compatibility/ngraph/__init__.py b/src/bindings/python/src/compatibility/ngraph/__init__.py deleted file mode 100644 index 53f10b7c60a549..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/__init__.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""ngraph module namespace, exposing factory functions for all ops and other classes.""" -# noqa: F401 - -try: - from ngraph.impl import util - - __version__ = util.get_ngraph_version_string() -except ImportError: - __version__ = "0.0.0.dev0" - - -from ngraph.impl import Dimension -from ngraph.impl import Function -from ngraph.impl import Node -from ngraph.impl import PartialShape -from ngraph.helpers import function_from_cnn -from ngraph.helpers import function_to_cnn -from ngraph.opset11 import absolute -from ngraph.opset11 import absolute as abs -from ngraph.opset11 import acos -from ngraph.opset11 import acosh -from ngraph.opset11 import adaptive_avg_pool -from ngraph.opset11 import adaptive_max_pool -from ngraph.opset11 import add -from ngraph.opset11 import asin -from ngraph.opset11 import asinh -from ngraph.opset11 import assign -from ngraph.opset11 import atan -from ngraph.opset11 import atanh -from ngraph.opset11 import avg_pool -from ngraph.opset11 import batch_norm_inference -from ngraph.opset11 import batch_to_space -from ngraph.opset11 import binary_convolution -from ngraph.opset11 import broadcast -from ngraph.opset11 import bucketize -from ngraph.opset11 import ceiling -from ngraph.opset11 import ceiling as ceil -from ngraph.opset11 import clamp -from ngraph.opset11 import concat -from ngraph.opset11 import constant -from ngraph.opset11 import convert -from ngraph.opset11 import convert_like -from ngraph.opset11 import convolution -from ngraph.opset11 import convolution_backprop_data -from ngraph.opset11 import cos -from ngraph.opset11 import cosh -from ngraph.opset11 import ctc_greedy_decoder -from ngraph.opset11 import ctc_greedy_decoder_seq_len -from ngraph.opset11 import ctc_loss -from ngraph.opset11 import cum_sum -from ngraph.opset11 import cum_sum as cumsum -from ngraph.opset11 import deformable_convolution -from ngraph.opset11 import deformable_psroi_pooling -from ngraph.opset11 import depth_to_space -from ngraph.opset11 import detection_output -from ngraph.opset11 import dft -from ngraph.opset11 import divide -from ngraph.opset11 import einsum -from ngraph.opset11 import elu -from ngraph.opset11 import embedding_bag_offsets_sum -from ngraph.opset11 import embedding_bag_packed_sum -from ngraph.opset11 import embedding_segments_sum -from ngraph.opset11 import extract_image_patches -from ngraph.opset11 import equal -from ngraph.opset11 import erf -from ngraph.opset11 import exp -from ngraph.opset11 import eye -from ngraph.opset11 import fake_quantize -from ngraph.opset11 import floor -from ngraph.opset11 import floor_mod -from ngraph.opset11 import gather -from ngraph.opset11 import gather_elements -from ngraph.opset11 import gather_nd -from ngraph.opset11 import gather_tree -from ngraph.opset11 import gelu -from ngraph.opset11 import generate_proposals -from ngraph.opset11 import greater -from ngraph.opset11 import greater_equal -from ngraph.opset11 import grid_sample -from ngraph.opset11 import grn -from ngraph.opset11 import group_convolution -from ngraph.opset11 import group_convolution_backprop_data -from ngraph.opset11 import gru_cell -from ngraph.opset11 import gru_sequence -from ngraph.opset11 import hard_sigmoid -from ngraph.opset11 import hsigmoid -from ngraph.opset11 import hswish -from ngraph.opset11 import idft -from ngraph.opset11 import if_op -from ngraph.opset11 import interpolate -from ngraph.opset11 import irdft -from ngraph.opset11 import is_finite -from ngraph.opset11 import is_inf -from ngraph.opset11 import is_nan -from ngraph.opset11 import i420_to_bgr -from ngraph.opset11 import i420_to_rgb -from ngraph.opset11 import less -from ngraph.opset11 import less_equal -from ngraph.opset11 import log -from ngraph.opset11 import logical_and -from ngraph.opset11 import logical_not -from ngraph.opset11 import logical_or -from ngraph.opset11 import logical_xor -from ngraph.opset11 import log_softmax -from ngraph.opset11 import loop -from ngraph.opset11 import lrn -from ngraph.opset11 import lstm_cell -from ngraph.opset11 import lstm_sequence -from ngraph.opset11 import matmul -from ngraph.opset11 import matrix_nms -from ngraph.opset11 import max_pool -from ngraph.opset11 import maximum -from ngraph.opset11 import minimum -from ngraph.opset11 import mish -from ngraph.opset11 import mod -from ngraph.opset11 import multiclass_nms -from ngraph.opset11 import multiply -from ngraph.opset11 import mvn -from ngraph.opset11 import negative -from ngraph.opset11 import non_max_suppression -from ngraph.opset11 import non_zero -from ngraph.opset11 import normalize_l2 -from ngraph.opset11 import not_equal -from ngraph.opset11 import nv12_to_bgr -from ngraph.opset11 import nv12_to_rgb -from ngraph.opset11 import one_hot -from ngraph.opset11 import pad -from ngraph.opset11 import parameter -from ngraph.opset11 import power -from ngraph.opset11 import prelu -from ngraph.opset11 import prior_box -from ngraph.opset11 import prior_box_clustered -from ngraph.opset11 import psroi_pooling -from ngraph.opset11 import proposal -from ngraph.opset11 import random_uniform -from ngraph.opset11 import range -from ngraph.opset11 import rdft -from ngraph.opset11 import read_value -from ngraph.opset11 import reduce_l1 -from ngraph.opset11 import reduce_l2 -from ngraph.opset11 import reduce_logical_and -from ngraph.opset11 import reduce_logical_or -from ngraph.opset11 import reduce_max -from ngraph.opset11 import reduce_mean -from ngraph.opset11 import reduce_min -from ngraph.opset11 import reduce_prod -from ngraph.opset11 import reduce_sum -from ngraph.opset11 import region_yolo -from ngraph.opset11 import reorg_yolo -from ngraph.opset11 import relu -from ngraph.opset11 import reshape -from ngraph.opset11 import result -from ngraph.opset11 import reverse_sequence -from ngraph.opset11 import rnn_cell -from ngraph.opset11 import rnn_sequence -from ngraph.opset11 import roi_align -from ngraph.opset11 import roi_pooling -from ngraph.opset11 import roll -from ngraph.opset11 import round -from ngraph.opset11 import scatter_elements_update -from ngraph.opset11 import scatter_update -from ngraph.opset11 import select -from ngraph.opset11 import selu -from ngraph.opset11 import shape_of -from ngraph.opset11 import shuffle_channels -from ngraph.opset11 import sigmoid -from ngraph.opset11 import sign -from ngraph.opset11 import sin -from ngraph.opset11 import sinh -from ngraph.opset11 import slice -from ngraph.opset11 import softmax -from ngraph.opset11 import softplus -from ngraph.opset11 import softsign -from ngraph.opset11 import space_to_batch -from ngraph.opset11 import space_to_depth -from ngraph.opset11 import split -from ngraph.opset11 import sqrt -from ngraph.opset11 import squared_difference -from ngraph.opset11 import squeeze -from ngraph.opset11 import strided_slice -from ngraph.opset11 import subtract -from ngraph.opset11 import swish -from ngraph.opset11 import tan -from ngraph.opset11 import tanh -from ngraph.opset11 import tensor_iterator -from ngraph.opset11 import tile -from ngraph.opset11 import topk -from ngraph.opset11 import transpose -from ngraph.opset11 import unique -from ngraph.opset11 import unsqueeze -from ngraph.opset11 import variadic_split - -import warnings - -warnings.warn( - message="OpenVINO nGraph Python API is deprecated and will be removed in 2024.0 release." - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html", - category=FutureWarning, - stacklevel=2, -) - -# Extend Node class to support binary operators -Node.__add__ = add -Node.__sub__ = subtract -Node.__mul__ = multiply -Node.__div__ = divide -Node.__truediv__ = divide -Node.__radd__ = lambda left, right: add(right, left) -Node.__rsub__ = lambda left, right: subtract(right, left) -Node.__rmul__ = lambda left, right: multiply(right, left) -Node.__rdiv__ = lambda left, right: divide(right, left) -Node.__rtruediv__ = lambda left, right: divide(right, left) -Node.__eq__ = equal -Node.__ne__ = not_equal -Node.__lt__ = less -Node.__le__ = less_equal -Node.__gt__ = greater -Node.__ge__ = greater_equal diff --git a/src/bindings/python/src/compatibility/ngraph/exceptions.py b/src/bindings/python/src/compatibility/ngraph/exceptions.py deleted file mode 100644 index 1597b6ec662db1..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/exceptions.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""ngraph exceptions hierarchy. All exceptions are descendants of NgraphError.""" - - -class NgraphError(Exception): - """Base class for Ngraph exceptions.""" - - -class UserInputError(NgraphError): - """User provided unexpected input.""" - - -class NgraphTypeError(NgraphError, TypeError): - """Type mismatch error.""" diff --git a/src/bindings/python/src/compatibility/ngraph/helpers.py b/src/bindings/python/src/compatibility/ngraph/helpers.py deleted file mode 100644 index 236adec6d8b374..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/helpers.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""nGraph helper functions.""" - -from typing import Union - -from ngraph.impl import Function -from openvino.inference_engine import IENetwork # type: ignore - - -def function_from_cnn(cnn_network: IENetwork) -> Function: - """Get nGraph function from Inference Engine CNN network.""" - capsule = cnn_network._get_function_capsule() - ng_function = Function.from_capsule(capsule) - return ng_function - - -def function_to_cnn(ng_function: Function) -> Function: - """Get Inference Engine CNN network from nGraph function.""" - capsule = Function.to_capsule(ng_function) - return IENetwork(capsule) diff --git a/src/bindings/python/src/compatibility/ngraph/impl/__init__.py b/src/bindings/python/src/compatibility/ngraph/impl/__init__.py deleted file mode 100644 index f06340f795e93b..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/impl/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -Package: ngraph -Low level wrappers for the nGraph c++ api. -""" - -# flake8: noqa - -import os -import sys - -if sys.platform == "win32": - # Installer, yum, pip installs openvino dlls to the different directories - # and those paths need to be visible to the openvino modules - # - # If you're using a custom installation of openvino, - # add the location of openvino dlls to your system PATH. - # - # looking for the libs in the pip installation path by default. - openvino_libs = [os.path.join(os.path.dirname(__file__), '..', '..', '..'), os.path.join(os.path.dirname(__file__), '..', '..', 'openvino', 'libs')] - # setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable. - openvino_libs_installer = os.getenv('OPENVINO_LIB_PATHS') - if openvino_libs_installer: - openvino_libs.extend(openvino_libs_installer.split(';')) - for lib in openvino_libs: - lib_path = os.path.join(os.path.dirname(__file__), lib) - if os.path.isdir(lib_path): - # On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH. - if (3, 8) <= sys.version_info: - os.add_dll_directory(os.path.abspath(lib_path)) - else: - os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"] - -from _pyngraph import Dimension -from _pyngraph import Function -from _pyngraph import Input -from _pyngraph import Output -from _pyngraph import Node -from _pyngraph import Type -from _pyngraph import PartialShape -from _pyngraph import Shape -from _pyngraph import Strides -from _pyngraph import CoordinateDiff -from _pyngraph import AxisSet -from _pyngraph import AxisVector -from _pyngraph import Coordinate -from _pyngraph import Output -from _pyngraph import DiscreteTypeInfo -from _pyngraph import util diff --git a/src/bindings/python/src/compatibility/ngraph/impl/op/__init__.py b/src/bindings/python/src/compatibility/ngraph/impl/op/__init__.py deleted file mode 100644 index e854c97eab8ff6..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/impl/op/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -Package: ngraph.op -Low level wrappers for the nGraph c++ api in ngraph::op. -""" - -# flake8: noqa - -import numpy as np - -from _pyngraph.op import Constant - -"""Retrieve Constant inner data. - - Internally uses PyBind11 Numpy's buffer protocol. - - :return Numpy array containing internally stored constant data. -""" -Constant.get_data = lambda self: np.array(self, copy=True) - -from _pyngraph.op import Parameter diff --git a/src/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py b/src/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py deleted file mode 100644 index 85e838f0aab75f..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -Package: ngraph.op.util -Low level wrappers for the nGraph c++ api in ngraph::op::util. -""" -# flake8: noqa - -from _pyngraph.op.util import UnaryElementwiseArithmetic -from _pyngraph.op.util import BinaryElementwiseComparison -from _pyngraph.op.util import BinaryElementwiseArithmetic -from _pyngraph.op.util import BinaryElementwiseLogical -from _pyngraph.op.util import OpAnnotations -from _pyngraph.op.util import ArithmeticReduction -from _pyngraph.op.util import IndexReduction diff --git a/src/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py b/src/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py deleted file mode 100644 index 5ea0b0618b0a88..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# flake8: noqa - -from _pyngraph.passes import Manager diff --git a/src/bindings/python/src/compatibility/ngraph/opset1/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset1/__init__.py deleted file mode 100644 index 1d960ff8bbe350..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset1/__init__.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset1.ops import atan -from ngraph.opset1.ops import avg_pool -from ngraph.opset1.ops import batch_norm_inference -from ngraph.opset1.ops import binary_convolution -from ngraph.opset1.ops import broadcast -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset1.ops import gather_tree -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset1.ops import lrn -from ngraph.opset1.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset1.ops import negative -from ngraph.opset1.ops import non_max_suppression -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset1.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset1.ops import shape_of -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset1.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset1/ops.py b/src/bindings/python/src/compatibility/ngraph/opset1/ops.py deleted file mode 100644 index 72a2831d8b246f..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset1/ops.py +++ /dev/null @@ -1,2772 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, PartialShape, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - - -_get_node_factory_opset1 = partial(_get_node_factory, "opset1") - -# -------------------------------------------- ops ------------------------------------------------ - - -@unary_op -def absolute(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies f(x) = abs(x) to the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with Abs operation applied on it. - """ - return _get_node_factory_opset1().create("Abs", [node]) - - -@unary_op -def acos(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse cosine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arccos operation applied on it. - """ - return _get_node_factory_opset1().create("Acos", [node]) - - -@binary_op -def add( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies f(x) = A+B to the input nodes element-wise.""" - return _get_node_factory_opset1().create("Add", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def asin(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse sine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arcsin operation applied on it. - """ - return _get_node_factory_opset1().create("Asin", [node]) - - -@unary_op -def atan(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply inverse tangent function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arctan operation applied on it. - """ - return _get_node_factory_opset1().create("Atan", [node]) - - -@nameable_op -def avg_pool( - data_batch: NodeInput, - strides: List[int], - pads_begin: TensorShape, - pads_end: TensorShape, - kernel_shape: TensorShape, - exclude_pad: bool, - rounding_type: str = "floor", - auto_pad: Optional[str] = None, - name: Optional[str] = None, -) -> Node: - """Return average pooling node. - - :param data_batch: The input node providing data. - :param strides: The window movement strides. - :param pads_begin: The input data optional padding below filled with zeros. - :param pads_end: The input data optional padding below filled with zeros. - :param kernel_shape: The pooling window shape. - :param exclude_pad: Whether or not to include zero padding in average computations. - :param rounding_type: Determines used rounding schema when computing output shape. Acceptable - values are: ['floor', 'ceil'] - :param auto_pad: Determines how the padding is calculated. Acceptable values: - [None, 'same_upper', 'same_lower', 'valid'] - :param name: Optional name for the new output node. - - :return: New node with AvgPool operation applied on its data. - """ - if auto_pad is None: - auto_pad = "explicit" - return _get_node_factory_opset1().create( - "AvgPool", - [as_node(data_batch)], - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "kernel": kernel_shape, - "exclude-pad": exclude_pad, - "rounding_type": rounding_type.upper(), - "auto_pad": auto_pad.upper(), - }, - ) - - -@nameable_op -def batch_norm_inference( - data: NodeInput, - gamma: NodeInput, - beta: NodeInput, - mean: NodeInput, - variance: NodeInput, - epsilon: float, - name: Optional[str] = None, -) -> Node: - """Perform layer normalizes a input tensor by mean and variance with appling scale and offset. - - :param data: The input tensor with data for normalization. - :param gamma: The scalar scaling for normalized value. - :param beta: The bias added to the scaled normalized value. - :param mean: The value for mean normalization. - :param variance: The value for variance normalization. - :param epsilon: The number to be added to the variance to avoid division - by zero when normalizing a value. - :param name: The optional name of the output node. - :return: The new node which performs BatchNormInference. - """ - inputs = as_nodes(gamma, beta, data, mean, variance) - return _get_node_factory_opset1().create("BatchNormInference", inputs, {"epsilon": epsilon}) - - -@nameable_op -def binary_convolution( - data: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - mode: str, - pad_value: float, - auto_pad: str = "EXPLICIT", - name: Optional[str] = None, -) -> Node: - """Create node performing convolution with binary weights, binary input and integer output. - - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The kernel window movement strides. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param mode: Defines how input tensor 0/1 values and weights 0/1 are interpreted. - :param pad_value: Floating-point value used to fill pad area. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param name: The optional new name for output node. - :return: New node performing binary convolution operation. - """ - return _get_node_factory_opset1().create( - "BinaryConvolution", - as_nodes(data, filters), - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "mode": mode, - "pad_value": pad_value, - "auto_pad": auto_pad, - }, - ) - - -@nameable_op -def broadcast( - data: NodeInput, - target_shape: NodeInput, - axes_mapping: Optional[NodeInput] = None, - mode: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Create a node which broadcasts the input node's values along specified axes to a desired shape. - - :param data: The node with input tensor data. - :param target_shape: The node with a new shape we want to broadcast tensor to. - :param axes_mapping: The node with a axis positions (0-based) in the result - that are being broadcast. - :param mode: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: NUMPY, EXPLICIT. - :param name: Optional new name for output node. - :return: New node with broadcast shape. - """ - inputs = as_nodes(data, target_shape) - if mode.upper() == "EXPLICIT": - inputs.append(as_node(axes_mapping)) - return _get_node_factory_opset1().create("Broadcast", inputs, {"mode": mode.upper()}) - - -@nameable_op -def ctc_greedy_decoder( - data: NodeInput, - sequence_mask: NodeInput, - merge_repeated: bool = True, - name: Optional[str] = None, -) -> Node: - """Perform greedy decoding on the logits given in input (best path). - - :param data: Logits on which greedy decoding is performed. - :param sequence_mask: The tensor with sequence masks for each sequence in the batch. - :param merge_repeated: The flag for merging repeated labels during the CTC calculation. - :param name: Optional name for output node. - :return: The new node performing an CTCGreedyDecoder operation on input tensor. - """ - node_inputs = as_nodes(data, sequence_mask) - return _get_node_factory_opset1().create("CTCGreedyDecoder", node_inputs, {"ctc_merge_repeated": merge_repeated}) - - -@unary_op -def ceiling(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies ceiling to the input node element-wise. - - :param node: The node providing data to ceiling operation. - :param name: Optional name for output node. - :return: The node performing element-wise ceiling. - """ - return _get_node_factory_opset1().create("Ceiling", [node]) - - -@nameable_op -def clamp(data: NodeInput, min_value: ScalarData, max_value: ScalarData, name: Optional[str] = None) -> Node: - """Perform clamp element-wise on data from input node. - - :param data: Input tensor. One of: input node, array or scalar. - :param min_value: The lower bound of the range. Scalar value. - :param max_value: The upper bound of the range. Scalar value. - :param name: Optional output node name. - :return: The new node performing a clamp operation on its input data element-wise. - - Performs a clipping operation on an input value between a pair of boundary values. - - For each element in `data`, if the element's value is lower than `min_value`, - it will be replaced with `min_value`. If the value is higher than `max_value`, - it will be replaced by `max_value`. - Intermediate values of `data` are returned without change. - - Clamp uses the following logic: - - .. code-block:: python - - if data < min_value: - data=min_value - elif data > max_value: - data=max_value - """ - return _get_node_factory_opset1().create("Clamp", [as_node(data)], {"min": min_value, "max": max_value}) - - -@nameable_op -def concat(nodes: List[NodeInput], axis: int, name: Optional[str] = None) -> Node: - """Concatenate input nodes into single new node along specified axis. - - :param nodes: The nodes we want concatenate into single new node. - :param axis: The axis along which we want to concatenate input nodes. - :param name: The optional new name for output node. - :return: Return new node that is a concatenation of input nodes. - """ - return _get_node_factory_opset1().create("Concat", as_nodes(*nodes), {"axis": axis}) - - -@nameable_op -def constant(value: NumericData, dtype: Optional[NumericType] = None, name: Optional[str] = None) -> Constant: - """Create a Constant node from provided value. - - :param value: One of: array of values or scalar to initialize node with. - :param dtype: The data type of provided data. - :param name: Optional name for output node. - :return: The Constant node initialized with provided data. - """ - return make_constant_node(value, dtype) - - -@nameable_op -def convert(data: NodeInput, destination_type: Union[str, NumericType], name: Optional[str] = None) -> Node: - """Return node which casts input node values to specified type. - - :param data: Node which produces the input tensor. - :param destination_type: Provides the target type for the conversion. - :param name: Optional name for the output node. - :return: New node performing the conversion operation. - """ - if not isinstance(destination_type, str): - destination_type = get_element_type_str(destination_type) - return _get_node_factory_opset1().create("Convert", [as_node(data)], {"destination_type": destination_type.lower()}) - - -@binary_op -def convert_like(data: NodeInput, like: NodeInput, name: Optional[str] = None) -> Node: - """Return node which casts data node values to the type of another node. - - :param data: Node which produces the input tensor - :param like: Node which provides the target type information for the conversion - :param name: Optional name for the output node. - :return: New node performing the conversion operation. - """ - return _get_node_factory_opset1().create("ConvertLike", [data, like]) - - -@nameable_op -def convolution( - data: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - auto_pad: str = "EXPLICIT", - name: Optional[str] = None, -) -> Node: - """Return node performing batched convolution operation. - - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The kernel window movement strides. - :param pads_begin: The number of zero padding elements to add on each axis below 0 coordinate. - :param pads_end: The number of zero padding elements to add on each axis above max coordinate - :param dilations: The data batch dilation strides. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param name: The optional new name for output node. - :return: New node performing batched convolution operation. - """ - return _get_node_factory_opset1().create( - "Convolution", - as_nodes(data, filters), - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad, - }, - ) - - -@nameable_op -def convolution_backprop_data( - data: NodeInput, - filters: NodeInput, - strides: List[int], - output_shape: Optional[NodeInput] = None, - pads_begin: Optional[List[int]] = None, - pads_end: Optional[List[int]] = None, - dilations: Optional[List[int]] = None, - auto_pad: Optional[str] = None, - output_padding: Optional[List[int]] = None, - name: Optional[str] = None, -) -> Node: - """Create node performing a batched-convolution backprop data operation. - - :param data: The node producing data from forward-prop - :param filters: The node producing the filters from forward-prop. - :param output_shape: The node producing output delta. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) - in the filter. - :param name: The node name. - - :return: The node object representing ConvolutionBackpropData operation. - """ - spatial_dim_count = len(strides) - if pads_begin is None: - pads_begin = [0] * spatial_dim_count - if pads_end is None: - pads_end = [0] * spatial_dim_count - if dilations is None: - dilations = [1] * spatial_dim_count - if auto_pad is None: - auto_pad = "explicit" - if output_padding is None: - output_padding = [0] * spatial_dim_count - args = as_nodes(data, filters) - if output_shape is not None: - args.append(as_node(output_shape)) - - return _get_node_factory_opset1().create( - "ConvolutionBackpropData", - args, - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad.upper(), - "output_padding": output_padding, - }, - ) - - -@unary_op -def cos(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply cosine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with cos operation applied on it. - """ - return _get_node_factory_opset1().create("Cos", [node]) - - -@unary_op -def cosh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic cosine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with cosh operation applied on it. - """ - return _get_node_factory_opset1().create("Cosh", [node]) - - -@nameable_op -def deformable_convolution( - data: NodeInput, - deformable_values: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - auto_pad: str = "EXPLICIT", - group: int = 1, - deformable_group: int = 1, - name: Optional[str] = None, -) -> Node: - """Create node performing deformable convolution. - - :param data: The node providing data batch tensor. - :param filter: The node providing filters tensor. - :param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param group: The number of groups which both output and input should be split into. - :param deformable_group: The number of groups which deformable values and output should be split - into along the channel axis. - :param name: The optional new name for output node. - :return: New node performing deformable convolution operation. - """ - return _get_node_factory_opset1().create( - "DeformableConvolution", - as_nodes(data, deformable_values, filters), - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad, - "group": group, - "deformable_group": deformable_group, - }, - ) - - -@nameable_op -def deformable_psroi_pooling( - feature_maps: NodeInput, - coords: NodeInput, - output_dim: int, - spatial_scale: float, - group_size: int = 1, - mode: str = "bilinear_deformable", - spatial_bins_x: int = 1, - spatial_bins_y: int = 1, - trans_std: float = 1.0, - part_size: int = 1, - offsets: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return node performing DeformablePSROIPooling operation. - - DeformablePSROIPooling computes position-sensitive pooling - on regions of interest specified by input. - - :param feature_maps: 4D tensor with feature maps. - :param coords: 2D tensor describing box consisting of tuples: [batch_id, x_1, y_1, x_2, y_2]. - :param output_dim: A pooled output channel number. - :param spatial_scale: A multiplicative spatial scale factor to translate ROI. - :param group_size: The number of groups to encode position-sensitive score. - :param mode: Specifies mode for pooling. Range of values: ['bilinear_deformable']. - :param spatial_bins_x: Specifies numbers of bins to divide the input feature maps over width. - :param spatial_bins_y: Specifies numbers of bins to divide the input feature maps over height. - :param trans_std: The value that all transformation (offset) values are multiplied with. - :param part_size: The number of parts the output tensor spatial dimensions are divided into. - :param offsets: Optional node. 4D input blob with transformation values (offsets). - :param name: The optional new name for output node. - :return: New node performing DeformablePSROIPooling operation. - """ - node_inputs = as_nodes(feature_maps, coords) - if offsets is not None: - node_inputs.append(as_node(offsets)) - - return _get_node_factory_opset1().create( - "DeformablePSROIPooling", - node_inputs, - { - "output_dim": output_dim, - "spatial_scale": spatial_scale, - "group_size": group_size, - "mode": mode, - "spatial_bins_x": spatial_bins_x, - "spatial_bins_y": spatial_bins_y, - "trans_std": trans_std, - "part_size": part_size, - }, - ) - - -@nameable_op -def depth_to_space(node: Node, mode: str, block_size: int = 1, name: Optional[str] = None) -> Node: - """Rearranges input tensor from depth into blocks of spatial data. - - Values from the height and width dimensions are moved to the depth dimension. - - Input tensor has shape [N,C,H,W], where N is the batch axis, C is the channel or depth, - H is the height and W is the width. - - Output node produces a tensor with shape: - - [N, C * `block_size` * `block_size`, H / `block_size`, W / `block_size`] - - :param node: The node with input tensor data. - :param mode: Specifies how the input depth dimension is split to block coordinates - - blocks_first: The input is divided to [block_size, ..., block_size, new_depth] - depth_first: The input is divided to [new_depth, block_size, ..., block_size] - - :param block_size: The size of the spatial block of values describing - how the tensor's data is to be rearranged. - :param name: Optional output node name. - :return: The new node performing an DepthToSpace operation on its input tensor. - """ - return _get_node_factory_opset1().create( - "DepthToSpace", - [node], - {"mode": mode, "block_size": block_size}, - ) - - -@nameable_op -def detection_output( - box_logits: Node, - class_preds: Node, - proposals: Node, - attrs: dict, - aux_class_preds: NodeInput = None, - aux_box_preds: NodeInput = None, - name: Optional[str] = None, -) -> Node: - """Generate the detection output using information on location and confidence predictions. - - :param box_logits: The 2D input tensor with box logits. - :param class_preds: The 2D input tensor with class predictions. - :param proposals: The 3D input tensor with proposals. - :param attrs: The dictionary containing key, value pairs for attributes. - :param aux_class_preds: The 2D input tensor with additional class predictions information. - :param aux_box_preds: The 2D input tensor with additional box predictions information. - :param name: Optional name for the output node. - :return: Node representing DetectionOutput operation. - - Available attributes are: - - * num_classes The number of classes to be predicted. - Range of values: positive integer number - Default value: None - Required: yes - - * background_label_id The background label id. - Range of values: integer value - Default value: 0 - Required: no - - * top_k Maximum number of results to be kept per batch after NMS step. - Range of values: integer value - Default value: -1 - Required: no - - * variance_encoded_in_target The flag that denotes if variance is encoded in target. - Range of values: {False, True} - Default value: False - Required: no - - * keep_top_k Maximum number of bounding boxes per batch to be kept after NMS step. - Range of values: integer values - Default value: None - Required: yes - - * code_type The type of coding method for bounding boxes. - Range of values: {'caffe.PriorBoxParameter.CENTER_SIZE', - 'caffe.PriorBoxParameter.CORNER'} - Default value: 'caffe.PriorBoxParameter.CORNER' - Required: no - - * share_location The flag that denotes if bounding boxes are shared among different - classes. - Range of values: {True, False} - Default value: True - Required: no - - * nms_threshold The threshold to be used in the NMS stage. - Range of values: floating point value - Default value: None - Required: yes - - * confidence_threshold Specifies the minimum confidence threshold for detection boxes to be - considered. - Range of values: floating point value - Default value: 0 - Required: no - - * clip_after_nms The flag that denotes whether to perform clip bounding boxes after - non-maximum suppression or not. - Range of values: {True, False} - Default value: False - Required: no - - * clip_before_nms The flag that denotes whether to perform clip bounding boxes before - non-maximum suppression or not. - Range of values: {True, False} - Default value: False - Required: no - - * decrease_label_id The flag that denotes how to perform NMS. - Range of values: False - perform NMS like in Caffe*. - True - perform NMS like in MxNet*. - - Default value: False - Required: no - - * normalized The flag that denotes whether input tensors with boxes are normalized. - Range of values: {True, False} - Default value: False - Required: no - - * input_height The input image height. - Range of values: positive integer number - Default value: 1 - Required: no - - * input_width The input image width. - Range of values: positive integer number - Default value: 1 - Required: no - - * objectness_score The threshold to sort out confidence predictions. - Range of values: non-negative float number - Default value: 0 - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'num_classes': 85, - 'keep_top_k': [1, 2, 3], - 'nms_threshold': 0.645, - } - - attrs = { - 'num_classes': 85, - 'keep_top_k': [1, 2, 3], - 'nms_threshold': 0.645, - 'normalized': True, - 'clip_before_nms': True, - 'input_height': [32], - 'input_width': [32], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("num_classes", True, np.integer, is_positive_value), - ("background_label_id", False, np.integer, None), - ("top_k", False, np.integer, None), - ("variance_encoded_in_target", False, np.bool_, None), - ("keep_top_k", True, np.integer, None), - ("code_type", False, np.str_, None), - ("share_location", False, np.bool_, None), - ("nms_threshold", True, np.floating, None), - ("confidence_threshold", False, np.floating, None), - ("clip_after_nms", False, np.bool_, None), - ("clip_before_nms", False, np.bool_, None), - ("decrease_label_id", False, np.bool_, None), - ("normalized", False, np.bool_, None), - ("input_height", False, np.integer, is_positive_value), - ("input_width", False, np.integer, is_positive_value), - ("objectness_score", False, np.floating, is_non_negative_value), - ] - - check_valid_attributes("DetectionOutput", attrs, requirements) - - inputs = [box_logits, class_preds, proposals] - if aux_class_preds is not None: - inputs.append(aux_class_preds) - if aux_box_preds is not None: - inputs.append(aux_box_preds) - - return _get_node_factory_opset1().create("DetectionOutput", inputs, attrs) - - -@binary_op -def divide( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies f(x) = A/B to the input nodes element-wise. - - :param left_node: The node providing dividend data. - :param right_node: The node providing divisor data. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise division. - """ - return _get_node_factory_opset1().create("Divide", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def elu(data: NodeInput, alpha: NumericType, name: Optional[str] = None) -> Node: - """Perform Exponential Linear Unit operation element-wise on data from input node. - - Computes exponential linear: alpha * (exp(data) - 1) if < 0, data otherwise. - - For more information refer to: - [Fast and Accurate Deep Network Learning by Exponential Linear Units](http://arxiv.org/abs/1511.07289) - - :param data: Input tensor. One of: input node, array or scalar. - :param alpha: Scalar multiplier for negative values. - :param name: Optional output node name. - :return: The new node performing an ELU operation on its input data element-wise. - """ - return _get_node_factory_opset1().create("Elu", [as_node(data)], {"alpha": alpha}) - - -@binary_op -def equal( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if input nodes are equal element-wise. - - :param left_node: The first input node for equal operation. - :param right_node: The second input node for equal operation. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional name for output new node. - :return: The node performing element-wise equality check. - """ - return _get_node_factory_opset1().create("Equal", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def erf(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which calculates Gauss error function element-wise with given tensor. - - :param node: The node providing data for operation. - :param name: The optional name for new output node. - :return: The new node performing element-wise Erf operation. - """ - return _get_node_factory_opset1().create("Erf", [node]) - - -@unary_op -def exp(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies exponential function to the input node element-wise. - - :param node: The node providing data for operation. - :param name: The optional name for new output node. - :return: The new node performing natural exponential operation. - """ - return _get_node_factory_opset1().create("Exp", [node]) - - -@nameable_op -def fake_quantize( - data: NodeInput, - input_low: NodeInput, - input_high: NodeInput, - output_low: NodeInput, - output_high: NodeInput, - levels: int, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - r"""Perform an element-wise linear quantization on input data. - - :param data: The node with data tensor. - :param input_low: The node with the minimum for input values. - :param input_high: The node with the maximum for input values. - :param output_low: The node with the minimum quantized value. - :param output_high: The node with the maximum quantized value. - :param levels: The number of quantization levels. Integer value. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :return: New node with quantized value. - - Input floating point values are quantized into a discrete set of floating point values. - - .. code-block:: python - - if x <= input_low: - output = output_low - if x > input_high: - output = output_high - else: - output = fake_quantize(output) - - Fake quantize uses the following logic: - - .. math:: - - output = - \dfrac{round( \dfrac{data - input\_low}{(input\_high - input\_low)\cdot (levels-1)})} - {(levels-1)\cdot (output\_high - output\_low)} + output\_low - """ - return _get_node_factory_opset1().create( - "FakeQuantize", - as_nodes(data, input_low, input_high, output_low, output_high), - {"levels": levels, "auto_broadcast": auto_broadcast.upper()}, - ) - - -@unary_op -def floor(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies floor to the input node element-wise. - - :param node: The input node providing data. - :param name: The optional name for new output node. - :return: The node performing element-wise floor operation. - """ - return _get_node_factory_opset1().create("Floor", [node]) - - -@binary_op -def floor_mod( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node performing element-wise FloorMod (division reminder) with two given tensors. - - :param left_node: The first input node for FloorMod operation. - :param right_node: The second input node for FloorMod operation. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise FloorMod operation. - """ - return _get_node_factory_opset1().create("FloorMod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def gather(data: NodeInput, indices: NodeInput, axis: NodeInput, name: Optional[str] = None) -> Node: - """Return Gather node which takes slices from axis of data according to indices. - - :param data: The tensor from which slices are gathered. - :param indices: Tensor with indexes to gather. - :param axis: The dimension index to gather data from. - :param name: Optional name for output node. - :return: The new node performing a Gather operation on the data input tensor. - """ - node_inputs = as_nodes(data, indices, axis) - return _get_node_factory_opset1().create("Gather", node_inputs) - - -@nameable_op -def gather_tree( - step_ids: NodeInput, - parent_idx: NodeInput, - max_seq_len: NodeInput, - end_token: NodeInput, - name: Optional[str] = None, -) -> Node: - """Perform GatherTree operation. - - :param step_ids: The tensor with indices from per each step. - :param parent_idx: The tensor with with parent beam indices. - :param max_seq_len: The tensor with maximum lengths for each sequence in the batch. - :param end_token: The scalar tensor with value of the end marker in a sequence. - :param name: Optional name for output node. - :return: The new node performing a GatherTree operation. - - The GatherTree node generates the complete beams from the indices per each step - and the parent beam indices. - GatherTree uses the following logic: - - .. code-block:: python - - for batch in range(BATCH_SIZE): - for beam in range(BEAM_WIDTH): - max_sequence_in_beam = min(MAX_TIME, max_seq_len[batch]) - - parent = parent_idx[max_sequence_in_beam - 1, batch, beam] - - for level in reversed(range(max_sequence_in_beam - 1)): - final_idx[level, batch, beam] = step_idx[level, batch, parent] - - parent = parent_idx[level, batch, parent] - - """ - node_inputs = as_nodes(step_ids, parent_idx, max_seq_len, end_token) - return _get_node_factory_opset1().create("GatherTree", node_inputs) - - -@binary_op -def greater( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if left input node is greater than the right node element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is greater than right_node. - """ - return _get_node_factory_opset1().create("Greater", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def greater_equal( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if left node is greater or equal to the right node element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is greater than or equal - right_node. - """ - return _get_node_factory_opset1().create("GreaterEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -def grn(data: Node, bias: float, name: Optional[str] = None) -> Node: - r"""Perform Global Response Normalization with L2 norm (across channels only). - - Computes GRN operation on channels for input tensor: - - .. math:: output_i = \dfrac{input_i}{\sqrt{\sum_{i}^{C} input_i}} - - :param data: The node with data tensor. - :param bias: The bias added to the variance. Scalar value. - :param name: Optional output node name. - :return: The new node performing a GRN operation on tensor's channels. - """ - return _get_node_factory_opset1().create("GRN", [data], {"bias": bias}) - - -@nameable_op -def group_convolution( - data: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - auto_pad: str = "EXPLICIT", - name: Optional[str] = None, -) -> Node: - """Perform Group Convolution operation on data from input node. - - :param data: The node producing input data. - :param filters: The node producing filters data. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param auto_pad: Describes how to perform padding. Possible values: - EXPLICIT: Pad dimensions are explicity specified - SAME_LOWER: Pad dimensions computed to match input shape - Ceil(num_dims/2) at the beginning and - Floor(num_dims/2) at the end - SAME_UPPER: Pad dimensions computed to match input shape - Floor(num_dims/2) at the beginning and - Ceil(num_dims/2) at the end - VALID: No padding - :param name: Optional output node name. - :return: The new node performing a Group Convolution operation on tensor from input node. - """ - return _get_node_factory_opset1().create( - "GroupConvolution", - as_nodes(data, filters), - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad.upper(), - }, - ) - - -@nameable_op -def group_convolution_backprop_data( - data: NodeInput, - filters: NodeInput, - strides: List[int], - output_shape: Optional[NodeInput] = None, - pads_begin: Optional[List[int]] = None, - pads_end: Optional[List[int]] = None, - dilations: Optional[List[int]] = None, - auto_pad: str = "EXPLICIT", - output_padding: Optional[List[int]] = None, - name: Optional[str] = None, -) -> Node: - """Perform Group Convolution operation on data from input node. - - :param data: The node producing input data. - :param filters: The node producing filter data. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param output_shape: The node that specifies spatial shape of the output. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param dilations: The distance in width and height between elements (weights) - in the filter. - :param auto_pad: Describes how to perform padding. Possible values: - EXPLICIT: Pad dimensions are explicity specified - SAME_LOWER: Pad dimensions computed to match input shape - Ceil(num_dims/2) at the beginning and - Floor(num_dims/2) at the end - SAME_UPPER: Pad dimensions computed to match input shape - Floor(num_dims/2) at the beginning and - Ceil(num_dims/2) at the end - VALID: No padding - :param output_padding: The additional amount of paddings added per each spatial axis - in the output tensor. - :param name: Optional output node name. - :return: The new node performing a Group Convolution operation on tensor from input node. - """ - spatial_dim_count = len(strides) - if dilations is None: - dilations = [1] * spatial_dim_count - if output_padding is None: - output_padding = [0] * spatial_dim_count - - attributes = { - "strides": strides, - "dilations": dilations, - "auto_pad": auto_pad.upper(), - "output_padding": output_padding, - } - args = as_nodes(data, filters) - - if output_shape is not None: - args.append(as_node(output_shape)) - else: - if pads_begin is None: - pads_begin = [0] * spatial_dim_count - if pads_end is None: - pads_end = [0] * spatial_dim_count - attributes["pads_begin"] = pads_begin - attributes["pads_end"] = pads_end - - return _get_node_factory_opset1().create("GroupConvolutionBackpropData", args, attributes) - - -@nameable_op -def hard_sigmoid(data: Node, alpha: NodeInput, beta: NodeInput, name: Optional[str] = None) -> Node: - """Perform Hard Sigmoid operation element-wise on data from input node. - - :param data: The node with data tensor. - :param alpha: A node producing the alpha parameter. - :param beta: A node producing the beta parameter - :param name: Optional output node name. - :return: The new node performing a Hard Sigmoid element-wise on input tensor. - - Hard Sigmoid uses the following logic: - - .. code-block:: python - - y = max(0, min(1, alpha * data + beta)) - - """ - return _get_node_factory_opset1().create("HardSigmoid", [data, as_node(alpha), as_node(beta)]) - - -@nameable_op -def interpolate(image: Node, output_shape: NodeInput, attrs: dict, name: Optional[str] = None) -> Node: - """Perform interpolation of independent slices in input tensor. - - :param image: The node providing input tensor with data for interpolation. - :param output_shape: 1D tensor describing output shape for spatial axes. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing interpolation operation. - - Available attributes are: - - * axes Specify spatial dimension indices where interpolation is applied. - Type: List of non-negative integer numbers. - Required: yes. - - * mode Specifies type of interpolation. - Range of values: one of {nearest, linear, cubic, area} - Type: string - Required: yes - - * align_corners A flag that specifies whether to align corners or not. True means the - alignment is applied, False means the alignment isn't applied. - Range of values: True or False. Default: True. - Required: no - - * antialias A flag that specifies whether to perform anti-aliasing. - Range of values: False - do not perform anti-aliasing - True - perform anti-aliasing - Default value: False - Required: no - - * pads_begin Specify the number of pixels to add to the beginning of the image being - interpolated. A scalar that specifies padding for each spatial dimension. - Range of values: list of non-negative integer numbers. Default value: 0 - Required: no - - * pads_end Specify the number of pixels to add to the beginning of the image being - interpolated. A scalar that specifies padding for each spatial dimension. - Range of values: list of non-negative integer numbers. Default value: 0 - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'axes': [2, 3], - 'mode': 'cubic', - } - - attrs = { - 'axes': [2, 3], - 'mode': 'cubic', - 'antialias': True, - 'pads_begin': [2, 2, 2], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("axes", True, np.integer, is_non_negative_value), - ("mode", True, np.str_, None), - ("align_corners", False, np.bool_, None), - ("antialias", False, np.bool_, None), - ("pads_begin", False, np.integer, is_non_negative_value), - ("pads_end", False, np.integer, is_non_negative_value), - ] - - check_valid_attributes("Interpolate", attrs, requirements) - - return _get_node_factory_opset1().create("Interpolate", [image, as_node(output_shape)], attrs) - - -@binary_op -def less( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if left input node is less than the right node element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is less than the right_node. - """ - return _get_node_factory_opset1().create("Less", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def less_equal( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if left input node is less or equal the right node element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The node performing element-wise check whether left_node is less than or equal the - right_node. - """ - return _get_node_factory_opset1().create("LessEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def log(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies natural logarithm to the input node element-wise. - - :param node: The input node providing data for operation. - :param name: The optional new name for output node. - :return: The new node performing log operation element-wise. - """ - return _get_node_factory_opset1().create("Log", [node]) - - -@binary_op -def logical_and( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which perform logical and operation on input nodes element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical and operation on input nodes corresponding elements. - """ - return _get_node_factory_opset1().create("LogicalAnd", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def logical_not(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies element-wise logical negation to the input node. - - :param node: The input node providing data. - :param name: The optional new name for output node. - :return: The node performing element-wise logical NOT operation with given tensor. - """ - return _get_node_factory_opset1().create("LogicalNot", [node]) - - -@binary_op -def logical_or( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which performs logical OR operation on input nodes element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical or operation on input nodes corresponding elements. - """ - return _get_node_factory_opset1().create("LogicalOr", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def logical_xor( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which performs logical XOR operation on input nodes element-wise. - - :param left_node: The first input node providing data. - :param right_node: The second input node providing data. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: The optional new name for output node. - :return: The node performing logical or operation on input nodes corresponding elements. - """ - return _get_node_factory_opset1().create("LogicalXor", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def lrn( - data: NodeInput, - axes: NodeInput, - alpha: float = 1, - beta: float = 0.5, - bias: float = 1, - size: int = 5, - name: Optional[str] = None, -) -> Node: - """Return a node which performs element-wise Local Response Normalization (LRN) operation. - - :param data: Input data. - :param alpha: A scale factor (usually positive). - :param beta: An exponent. - :param bias: An offset (usually positive) to avoid dividing by 0. - :param size: Width of the 1-D normalization window. - :param name: An optional name of the output node. - :return: The new node which performs LRN. - """ - attributes = {"alpha": alpha, "beta": beta, "bias": bias, "size": size} - return _get_node_factory_opset1().create("LRN", as_nodes(data, axes), attributes) - - -@nameable_op -def lstm_cell( - X: NodeInput, - initial_hidden_state: NodeInput, - initial_cell_state: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs LSTMCell operation. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. - :param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. - :param W: The weight tensor with shape: [4*hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. - :param B: The bias tensor for gates with shape: [4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMCell. Node outputs count: 2. - """ - if activations is None: - activations = ["sigmoid", "tanh", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, W, R, B) - - # P - nGraph additional input, no such input in the OV spec - peepholes_count = 3 # nGraph default - peepholes_shape = [peepholes_count * hidden_size] - peepholes_array = np.zeros(peepholes_shape) # nGraph default - data_dtype = get_dtype(node_inputs[0].get_output_element_type(0)) - default_P = make_constant_node(peepholes_array, dtype=data_dtype) - node_inputs.append(default_P) - - weights_format = "fico" # IE LSTMWeightsFormat, no such attribute in the OV spec - input_forget = False # nGraph default, no such attribute in the OV spec - - attributes = { - "hidden_size": hidden_size, - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - "weights_format": weights_format, - "input_forget": input_forget, - } - return _get_node_factory_opset1().create("LSTMCell", node_inputs, attributes) - - -@nameable_op -def lstm_sequence( - X: NodeInput, - initial_hidden_state: NodeInput, - initial_cell_state: NodeInput, - sequence_lengths: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - direction: str, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs LSTMSequence operation. - - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param initial_cell_state: The cell state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. - Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. - Shape: [num_directions, 4*hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. - Shape: [num_directions, 4*hidden_size, hidden_size]. - :param B: The tensor with biases. - Shape: [num_directions, 4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMSequence. Node outputs count: 3. - """ - if activations is None: - activations = ["sigmoid", "tanh", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B) - - # P - nGraph additional input, no such input in the OV spec - peepholes_count = 3 # nGraph default - if direction.lower() == "bidirectional": - num_directions = 2 - else: - num_directions = 1 - peepholes_shape = [num_directions, peepholes_count * hidden_size] - peepholes_array = np.zeros(peepholes_shape) # nGraph default - data_dtype = get_dtype(node_inputs[0].get_output_element_type(0)) - default_P = make_constant_node(peepholes_array, dtype=data_dtype) - node_inputs.append(default_P) - - weights_format = "fico" # IE LSTMWeightsFormat, no such attribute in the OV spec - input_forget = False # nGraph default, no such attribute in the OV spec - - attributes = { - "hidden_size": hidden_size, - "direction": direction.lower(), - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - "weights_format": weights_format, - "input_forget": input_forget, - } - return _get_node_factory_opset1().create("LSTMSequence", node_inputs, attributes) - - -@nameable_op -def matmul( - data_a: NodeInput, - data_b: NodeInput, - transpose_a: bool, - transpose_b: bool, - name: Optional[str] = None, -) -> Node: - """Return the Matrix Multiplication operation. - - :param data_a: left-hand side matrix - :param data_b: right-hand side matrix - :param transpose_a: should the first matrix be transposed before operation - :param transpose_b: should the second matrix be transposed - :return: MatMul operation node - """ - return _get_node_factory_opset1().create("MatMul", as_nodes(data_a, data_b), {"transpose_a": transpose_a, "transpose_b": transpose_b}) - - -@nameable_op -def max_pool( - data: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - kernel_shape: TensorShape, - rounding_type: str = "floor", - auto_pad: Optional[str] = None, - name: Optional[str] = None, -) -> Node: - """Perform max pooling operation with given parameters on provided data. - - :param data: The node providing input data. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param kernel_shape: The pooling operation kernel shape. - :param rounding_type: Determines used rounding schema when computing output shape. Acceptable - values are: ['floor', 'ceil'] - :param auto_pad: Determines how the padding is calculated. Acceptable values: - [None, 'same_upper', 'same_lower', 'valid'] - :param name: The optional name for the created output node. - - :return: The new node performing max pooling operation. - """ - if auto_pad is None: - auto_pad = "explicit" - return _get_node_factory_opset1().create( - "MaxPool", - [as_node(data)], - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "kernel": kernel_shape, - "rounding_type": rounding_type.upper(), - "auto_pad": auto_pad.upper(), - }, - ) - - -@binary_op -def maximum( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies the maximum operation to input nodes elementwise.""" - return _get_node_factory_opset1().create("Maximum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def minimum( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies the minimum operation to input nodes elementwise.""" - return _get_node_factory_opset1().create("Minimum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def mod( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node performing element-wise division reminder with two given tensors. - - :param left_node: The first input node for mod operation. - :param right_node: The second input node for mod operation. - :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. - :param name: Optional name for output node. - :return: The node performing element-wise Mod operation. - """ - return _get_node_factory_opset1().create("Mod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@binary_op -def multiply( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies f(x) = A*B to the input nodes elementwise.""" - return _get_node_factory_opset1().create("Multiply", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def negative(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies f(x) = -x to the input node elementwise.""" - return _get_node_factory_opset1().create("Negative", [node]) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. Range of values: corner or cente. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :return: The new node which performs NonMaxSuppression - """ - if max_output_boxes_per_class is None: - max_output_boxes_per_class = make_constant_node(0, np.int64) - if iou_threshold is None: - iou_threshold = make_constant_node(0, np.float32) - if score_threshold is None: - score_threshold = make_constant_node(0, np.float32) - - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - } - - return _get_node_factory_opset1().create("NonMaxSuppression", inputs, attributes) - - -@nameable_op -def normalize_l2(data: NodeInput, axes: NodeInput, eps: float, eps_mode: str, name: Optional[str] = None) -> Node: - """Construct an NormalizeL2 operation. - - :param data: Node producing the input tensor - :param axes: Node indicating axes along which L2 reduction is calculated - :param eps: The epsilon added to L2 norm - :param eps_mode: how eps is combined with L2 value (`add` or `max`) - :return: New node which performs the L2 normalization. - """ - return _get_node_factory_opset1().create("NormalizeL2", as_nodes(data, axes), {"eps": eps, "mode": eps_mode}) - - -@binary_op -def not_equal( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which checks if input nodes are unequal element-wise. - - :param left_node: The first input node for not-equal operation. - :param right_node: The second input node for not-equal operation. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :param name: The optional name for output new node. - :return: The node performing element-wise inequality check. - """ - return _get_node_factory_opset1().create("NotEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def one_hot( - indices: NodeInput, - depth: NodeInput, - on_value: NodeInput, - off_value: NodeInput, - axis: int, - name: Optional[str] = None, -) -> Node: - """Create node performing one-hot encoding on input data. - - :param indices: Input tensor of rank N with indices of any supported integer data type. - :param depth: Scalar of any supported integer type that specifies number of classes and - the size of one-hot dimension. - :param on_value: Scalar of any type that is the value that the locations - in output tensor represented by indices in input take. - :param off_value: Scalar of any type that is the value that the locations not represented - by indices in input take. - - :param name: The optional name for new output node. - :return: New node performing one-hot operation. - """ - return _get_node_factory_opset1().create("OneHot", as_nodes(indices, depth, on_value, off_value), {"axis": axis}) - - -@nameable_op -def pad( - arg: NodeInput, - pads_begin: NodeInput, - pads_end: NodeInput, - pad_mode: str, - arg_pad_value: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a generic padding operation. - - :param arg: The node producing input tensor to be padded. - :param pads_begin: number of padding elements to be added before position 0 - on each axis of arg. - :param pads_end: number of padding elements to be added after the last element. - :param pad_mode: "constant", "edge", "reflect" or "symmetric" - :param arg_pad_value: value used for padding if pad_mode is "constant" - :return: Pad operation node. - """ - input_nodes = as_nodes(arg, pads_begin, pads_end) - if arg_pad_value: - input_nodes.append(as_node(arg_pad_value)) - - pad_mode = pad_mode.upper() - return _get_node_factory_opset1().create("Pad", input_nodes, {"pad_mode": pad_mode}) - - -@nameable_op -def parameter(shape: TensorShape, dtype: NumericType = np.float32, name: Optional[str] = None) -> Parameter: - """Return an ngraph Parameter object.""" - element_type = get_element_type(dtype) - return Parameter(element_type, PartialShape(shape)) - - -@binary_op -def power( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which perform element-wise exponentiation operation. - - :param left_node: The node providing the base of operation. - :param right_node: The node providing the exponent of operation. - :param name: The optional name for the new output node. - :param auto_broadcast: The type of broadcasting specifies rules used for - auto-broadcasting of input tensors. - :return: The new node performing element-wise exponentiation operation on input nodes. - """ - return _get_node_factory_opset1().create("Power", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def prelu(data: NodeInput, slope: NodeInput, name: Optional[str] = None) -> Node: - """Perform Parametrized Relu operation element-wise on data from input node. - - :param data: The node with data tensor. - :param slope: The node with the multipliers for negative values. - :param name: Optional output node name. - :return: The new node performing a PRelu operation on tensor's channels. - - PRelu uses the following logic: - - .. code-block:: python - - if data < 0: - data = data * slope - elif data >= 0: - data = data - - """ - return _get_node_factory_opset1().create("PRelu", as_nodes(data, slope)) - - -@nameable_op -def prior_box_clustered(output_size: Node, image_size: NodeInput, attrs: dict, name: Optional[str] = None) -> Node: - """Generate prior boxes of specified sizes normalized to the input image size. - - :param output_size: 1D tensor with two integer elements [height, width]. Specifies the - spatial size of generated grid with boxes. - :param image_size: 1D tensor with two integer elements [image_height, image_width] that - specifies shape of the image for which boxes are generated. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing PriorBoxClustered operation. - - Available attributes are: - - * widths Specifies desired boxes widths in pixels. - Range of values: floating point positive numbers. - Default value: 1.0 - Required: no - - * heights Specifies desired boxes heights in pixels. - Range of values: floating point positive numbers. - Default value: 1.0 - Required: no - - * clip The flag that denotes if each value in the output tensor should be clipped - within [0,1]. - Range of values: {True, False} - Default value: True - Required: no - - * step_widths The distance between box centers. - Range of values: floating point positive number - Default value: 0.0 - Required: no - - * step_heights The distance between box centers. - Range of values: floating point positive number - Default value: 0.0 - Required: no - - * offset The shift of box respectively to the top left corner. - Range of values: floating point positive number - Default value: None - Required: yes - - * variance Denotes a variance of adjusting bounding boxes. - Range of values: floating point positive numbers - Default value: [] - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'offset': 85, - } - - attrs = { - 'offset': 85, - 'clip': False, - 'step_widths': [1.5, 2.0, 2.5] - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("widths", False, np.floating, is_positive_value), - ("heights", False, np.floating, is_positive_value), - ("clip", False, np.bool_, None), - ("step_widths", False, np.floating, is_positive_value), - ("step_heights", False, np.floating, is_positive_value), - ("offset", True, np.floating, is_positive_value), - ("variance", False, np.floating, is_positive_value), - ] - - check_valid_attributes("PriorBoxClustered", attrs, requirements) - - return _get_node_factory_opset1().create("PriorBoxClustered", [output_size, as_node(image_size)], attrs) - - -@nameable_op -def prior_box(layer_shape: Node, image_shape: NodeInput, attrs: dict, name: Optional[str] = None) -> Node: - """Generate prior boxes of specified sizes and aspect ratios across all dimensions. - - :param layer_shape: Shape of layer for which prior boxes are computed. - :param image_shape: Shape of image to which prior boxes are scaled. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing prior box operation. - - Available attributes are: - - * min_size The minimum box size (in pixels). - Range of values: positive floating point numbers - Default value: [] - Required: no - - * max_size The maximum box size (in pixels). - Range of values: positive floating point numbers - Default value: [] - Required: no - - * aspect_ratio Aspect ratios of prior boxes. - Range of values: set of positive floating point numbers - Default value: [] - Required: no - - * flip The flag that denotes that each aspect_ratio is duplicated and flipped. - Range of values: {True, False} - Default value: False - Required: no - - * clip The flag that denotes if each value in the output tensor should be clipped - to [0,1] interval. - Range of values: {True, False} - Default value: False - Required: no - - * step The distance between box centers. - Range of values: floating point non-negative number - Default value: 0 - Required: no - - * offset This is a shift of box respectively to top left corner. - Range of values: floating point non-negative number - Default value: None - Required: yes - - * variance The variance denotes a variance of adjusting bounding boxes. The attribute - could contain 0, 1 or 4 elements. - Range of values: floating point positive numbers - Default value: [] - Required: no - - * scale_all_sizes The flag that denotes type of inference. - Range of values: False - max_size is ignored - True - max_size is used - Default value: True - Required: no - - * fixed_ratio This is an aspect ratio of a box. - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - - * fixed_size This is an initial box size (in pixels). - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - - * density This is the square root of the number of boxes of each type. - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'offset': 85, - } - - attrs = { - 'offset': 85, - 'flip': True, - 'clip': True, - 'fixed_size': [32, 64, 128] - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("offset", True, np.floating, is_non_negative_value), - ("min_size", False, np.floating, is_positive_value), - ("max_size", False, np.floating, is_positive_value), - ("aspect_ratio", False, np.floating, is_positive_value), - ("flip", False, np.bool_, None), - ("clip", False, np.bool_, None), - ("step", False, np.floating, is_non_negative_value), - ("variance", False, np.floating, is_positive_value), - ("scale_all_sizes", False, np.bool_, None), - ("fixed_ratio", False, np.floating, is_positive_value), - ("fixed_size", False, np.floating, is_positive_value), - ("density", False, np.floating, is_positive_value), - ] - - check_valid_attributes("PriorBox", attrs, requirements) - - return _get_node_factory_opset1().create("PriorBox", [layer_shape, as_node(image_shape)], attrs) - - -@nameable_op -def proposal( - class_probs: Node, - bbox_deltas: Node, - image_shape: NodeInput, - attrs: dict, - name: Optional[str] = None, -) -> Node: - """Filter bounding boxes and outputs only those with the highest prediction confidence. - - :param class_probs: 4D input floating point tensor with class prediction scores. - :param bbox_deltas: 4D input floating point tensor with box logits. - :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing Proposal operation. - - * base_size The size of the anchor to which scale and ratio attributes are applied. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - - * pre_nms_topn The number of bounding boxes before the NMS operation. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - - * post_nms_topn The number of bounding boxes after the NMS operation. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - - * nms_thresh The minimum value of the proposal to be taken into consideration. - Range of values: a positive floating-point number - Default value: None - Required: yes - - * feat_stride The step size to slide over boxes (in pixels). - Range of values: a positive unsigned integer - Default value: None - Required: yes - - * min_size The minimum size of box to be taken into consideration. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - - * ratio The ratios for anchor generation. - Range of values: a list of floating-point numbers - Default value: None - Required: yes - - * scale The scales for anchor generation. - Range of values: a list of floating-point numbers - Default value: None - Required: yes - - * clip_before_nms The flag that specifies whether to perform clip bounding boxes before - non-maximum suppression or not. - Range of values: True or False - Default value: True - Required: no - - * clip_after_nms The flag that specifies whether to perform clip bounding boxes after - non-maximum suppression or not. - Range of values: True or False - Default value: False - Required: no - - * normalize The flag that specifies whether to perform normalization of output boxes to - [0,1] interval or not. - Range of values: True or False - Default value: False - Required: no - - * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding. - Range of values: a positive floating-point number - Default value: 1.0 - Required: no - - * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates - before decoding. - Range of values: a positive floating-point number - Default value: 1.0 - Required: no - - * framework Specifies how the box coordinates are calculated. - Range of values: "" (empty string) - calculate box coordinates like in Caffe* - tensorflow - calculate box coordinates like in the TensorFlow* - Object Detection API models - Default value: "" (empty string) - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'base_size': 85, - 'pre_nms_topn': 10, - 'post_nms_topn': 20, - 'nms_thresh': 0.34, - 'feat_stride': 16, - 'min_size': 32, - 'ratio': [0.1, 1.5, 2.0, 2.5], - 'scale': [2, 3, 3, 4], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - """ - requirements = [ - ("base_size", True, np.unsignedinteger, is_positive_value), - ("pre_nms_topn", True, np.unsignedinteger, is_positive_value), - ("post_nms_topn", True, np.unsignedinteger, is_positive_value), - ("nms_thresh", True, np.floating, is_positive_value), - ("feat_stride", True, np.unsignedinteger, is_positive_value), - ("min_size", True, np.unsignedinteger, is_positive_value), - ("ratio", True, np.floating, None), - ("scale", True, np.floating, None), - ("clip_before_nms", False, np.bool_, None), - ("clip_after_nms", False, np.bool_, None), - ("normalize", False, np.bool_, None), - ("box_size_scale", False, np.floating, is_positive_value), - ("box_coordinate_scale", False, np.floating, is_positive_value), - ("framework", False, np.str_, None), - ] - - check_valid_attributes("Proposal", attrs, requirements) - - return _get_node_factory_opset1().create("Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs) - - -@nameable_op -def psroi_pooling( - input: NodeInput, - coords: NodeInput, - output_dim: int, - group_size: int, - spatial_scale: float, - spatial_bins_x: int, - spatial_bins_y: int, - mode: str, - name: Optional[str] = None, -) -> Node: - """Return a node which produces a PSROIPooling operation. - - :param input: Input feature map {N, C, ...} - :param coords: Coordinates of bounding boxes - :param output_dim: Output channel number - :param group_size: Number of groups to encode position-sensitive scores - :param spatial_scale: Ratio of input feature map over input image size - :param spatial_bins_x: Numbers of bins to divide the input feature maps over - :param spatial_bins_y: Numbers of bins to divide the input feature maps over - :param mode: Mode of pooling - "avg" or "bilinear" - :return: PSROIPooling node - """ - mode = mode.lower() - return _get_node_factory_opset1().create( - "PSROIPooling", - as_nodes(input, coords), - { - "output_dim": output_dim, - "group_size": group_size, - "spatial_scale": spatial_scale, - "spatial_bins_x": spatial_bins_x, - "spatial_bins_y": spatial_bins_y, - "mode": mode, - }, - ) - - -@nameable_op -def range(start: Node, stop: NodeInput, step: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which produces the Range operation. - - :param start: The start value of the generated range - :param stop: The stop value of the generated range - :param step: The step value for the generated range - :param name: Optional name for output node. - :return: Range node - """ - return _get_node_factory_opset1().create("Range", as_nodes(start, stop, step)) - - -@unary_op -def relu(node: NodeInput, name: Optional[str] = None) -> Node: - """Perform rectified linear unit operation on input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: The optional output node name. - :return: The new node performing relu operation on its input element-wise. - """ - return _get_node_factory_opset1().create("Relu", [node]) - - -@nameable_op -def reduce_logical_and(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Logical AND reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to reduce. - :param reduction_axes: The axes to eliminate through AND operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing reduction operation. - """ - return _get_node_factory_opset1().create("ReduceLogicalAnd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_logical_or(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Logical OR reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to reduce. - :param reduction_axes: The axes to eliminate through OR operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing reduction operation. - """ - return _get_node_factory_opset1().create("ReduceLogicalOr", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_max(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Max-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to max-reduce. - :param reduction_axes: The axes to eliminate through max operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - """ - return _get_node_factory_opset1().create("ReduceMax", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_mean(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Mean-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. - """ - return _get_node_factory_opset1().create("ReduceMean", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_min(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Min-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to min-reduce. - :param reduction_axes: The axes to eliminate through min operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - """ - return _get_node_factory_opset1().create("ReduceMin", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_prod(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Product-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to product-reduce. - :param reduction_axes: The axes to eliminate through product operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing product-reduction operation. - """ - return _get_node_factory_opset1().create("ReduceProd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_sum(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """Perform element-wise sums of the input tensor, eliminating the specified reduction axes. - - :param node: The node providing data for operation. - :param reduction_axes: The axes to eliminate through summation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: The optional new name for output node. - :return: The new node performing summation along `reduction_axes` element-wise. - """ - return _get_node_factory_opset1().create("ReduceSum", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def region_yolo( - input: Node, - coords: int, - classes: int, - num: int, - do_softmax: bool, - mask: List[int], - axis: int, - end_axis: int, - anchors: Optional[List[float]] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which produces the RegionYolo operation. - - :param input: Input data - :param coords: Number of coordinates for each region - :param classes: Number of classes for each region - :param num: Number of regions - :param do_softmax: Compute softmax - :param mask: Mask - :param axis: Axis to begin softmax on - :param end_axis: Axis to end softmax on - :param anchors: A flattened list of pairs `[width, height]` that describes prior box sizes - :param name: Optional name for output node. - :return: RegionYolo node - """ - if anchors is None: - anchors = [] - - return _get_node_factory_opset1().create( - "RegionYolo", - [input], - { - "coords": coords, - "classes": classes, - "num": num, - "do_softmax": do_softmax, - "mask": mask, - "axis": axis, - "end_axis": end_axis, - "anchors": anchors, - }, - ) - - -@nameable_op -def reshape(node: NodeInput, output_shape: NodeInput, special_zero: bool, name: Optional[str] = None) -> Node: - """Return reshaped node according to provided parameters. - - :param node: The tensor we want to reshape. - :param output_shape: The node with a new shape for input tensor. - :param special_zero: The boolean variable that controls how zero values in shape are - interpreted. If special_zero is false, then 0 is interpreted as-is - which means that output shape will contain a zero dimension at the - specified location. Input and output tensors are empty in this case. - If special_zero is true, then all zeros in shape implies the copying - of corresponding dimensions from data.shape into the output shape. - Range of values: False or True - """ - return _get_node_factory_opset1().create("Reshape", as_nodes(node, output_shape), {"special_zero": special_zero}) - - -@unary_op -def result(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which represents an output of a graph (Function). - - :param data: The tensor containing the input data - :return: Result node - """ - return _get_node_factory_opset1().create("Result", [data]) - - -@nameable_op -def reverse_sequence( - input: NodeInput, - seq_lengths: NodeInput, - batch_axis: NumericData, - seq_axis: NumericData, - name: Optional[str] = None, -) -> Node: - """Return a node which produces a ReverseSequence operation. - - :param input: tensor with input data to reverse - :param seq_lengths: 1D tensor of integers with sequence lengths in the input tensor. - :param batch_axis: index of the batch dimension. - :param seq_axis: index of the sequence dimension. - :return: ReverseSequence node - """ - return _get_node_factory_opset1().create( - "ReverseSequence", - as_nodes(input, seq_lengths), - {"batch_axis": batch_axis, "seq_axis": seq_axis}, - ) - - -@nameable_op -def select( - cond: NodeInput, - then_node: NodeInput, - else_node: NodeInput, - auto_broadcast: str = "numpy", - name: Optional[str] = None, -) -> Node: - """Perform an element-wise selection operation on input tensors. - - :param cond: Tensor with selection mask of type `boolean`. - :param then_node: Tensor providing data to be selected if respective `cond` - item value is `True`. - :param else_node: Tensor providing data to be selected if respective `cond` - item value is `False`. - :param auto_broadcast: Mode specifies rules used for auto-broadcasting of input tensors. - :param name: The optional new name for output node. - :return: The new node with values selected according to provided arguments. - """ - inputs = as_nodes(cond, then_node, else_node) - return _get_node_factory_opset1().create("Select", inputs, {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def selu(data: NodeInput, alpha: NodeInput, lambda_value: NodeInput, name: Optional[str] = None) -> Node: - """Perform a Scaled Exponential Linear Unit (SELU) operation on input node element-wise. - - :param data: input node, array or scalar. - :param alpha: Alpha coefficient of SELU operation - :param lambda_value: Lambda coefficient of SELU operation - :param name: The optional output node name. - :return: The new node performing relu operation on its input element-wise. - """ - return _get_node_factory_opset1().create("Selu", as_nodes(data, alpha, lambda_value)) - - -@nameable_op -def shape_of(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which produces a tensor containing the shape of its input data. - - :param data: The tensor containing the input data. - :return: ShapeOf node - """ - return _get_node_factory_opset1().create("ShapeOf", [as_node(data)]) - - -@unary_op -def sigmoid(data: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which applies the sigmoid function element-wise. - - :param data: The tensor containing the input data - :return: Sigmoid node - """ - return _get_node_factory_opset1().create("Sigmoid", [data]) - - -@unary_op -def sign(node: NodeInput, name: Optional[str] = None) -> Node: - """Perform element-wise sign operation. - - :param node: One of: input node, array or scalar. - :param name: The optional new name for output node. - :return: The node with mapped elements of the input tensor to -1 (if it is negative), - 0 (if it is zero), or 1 (if it is positive). - """ - return _get_node_factory_opset1().create("Sign", [node]) - - -@unary_op -def sin(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply sine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with sin operation applied on it. - """ - return _get_node_factory_opset1().create("Sin", [node]) - - -@unary_op -def sinh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic sine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with sin operation applied on it. - """ - return _get_node_factory_opset1().create("Sinh", [node]) - - -@nameable_op -def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: - """Apply softmax operation on each element of input tensor. - - :param data: The tensor providing input data. - :param axis: An axis along which Softmax should be calculated - :return: The new node with softmax operation applied on each element. - """ - return _get_node_factory_opset1().create("Softmax", [as_node(data)], {"axis": axis}) - - -@nameable_op -def space_to_depth(data: Node, mode: str, block_size: int = 1, name: Optional[str] = None) -> Node: - """Perform SpaceToDepth operation on the input tensor. - - SpaceToDepth rearranges blocks of spatial data into depth. - The operator returns a copy of the input tensor where values from the height - and width dimensions are moved to the depth dimension. - - :param data: The node with data tensor. - :param mode: Specifies how the output depth dimension is gathered from block coordinates. - - blocks_first: The output depth is gathered from [block_size, ..., block_size, C] - depth_first: The output depth is gathered from [C, block_size, ..., block_size] - - :param block_size: The size of the block of values to be moved. Scalar value. - :param name: Optional output node name. - :return: The new node performing a SpaceToDepth operation on input tensor. - """ - return _get_node_factory_opset1().create( - "SpaceToDepth", - [data], - {"mode": mode, "block_size": block_size}, - ) - - -@nameable_op -def split(data: NodeInput, axis: NodeInput, num_splits: int, name: Optional[str] = None) -> Node: - """Return a node which splits the input tensor into same-length slices. - - :param data: The input tensor to be split - :param axis: Axis along which the input data will be split - :param num_splits: Number of the output tensors that should be produced - :return: Split node - """ - return _get_node_factory_opset1().create("Split", as_nodes(data, axis), {"num_splits": num_splits}) - - -@unary_op -def sqrt(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies square root to the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: The new node with sqrt operation applied element-wise. - """ - return _get_node_factory_opset1().create("Sqrt", [node]) - - -@binary_op -def squared_difference(x1: NodeInput, x2: NodeInput, auto_broadcast: str = "NUMPY", name: Optional[str] = None) -> Node: - r"""Perform an element-wise squared difference between two tensors. - - ..math:: y[i] = (x_1[i] - x_2[i])^2 - - :param x1: The node with first input tensor. - :param x2: The node with second input tensor. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: Optional new name for output node. - :return: The new node performing a squared difference between two tensors. - """ - return _get_node_factory_opset1().create("SquaredDifference", [x1, x2], {"auto_broadcast": auto_broadcast.upper()}) - - -@nameable_op -def squeeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node: - """Perform squeeze operation on input tensor. - - :param data: The node with data tensor. - :param axes: List of non-negative integers, indicate the dimensions to squeeze. - One of: input node or array. - :param name: Optional new name for output node. - :return: The new node performing a squeeze operation on input tensor. - - Remove single-dimensional entries from the shape of a tensor. - Takes a parameter `axes` with a list of axes to squeeze. - If `axes` is not provided, all the single dimensions will be removed from the shape. - If an `axis` is selected with shape entry not equal to one, an error is raised. - - - For example: - - Inputs: tensor with shape [1, 2, 1, 3, 1, 1], axes=[2, 4] - - Result: tensor with shape [1, 2, 3, 1] - """ - return _get_node_factory_opset1().create("Squeeze", as_nodes(data, axes)) - - -@nameable_op -def strided_slice( - data: NodeInput, - begin: NodeInput, - end: NodeInput, - strides: NodeInput, - begin_mask: List[int], - end_mask: List[int], - new_axis_mask: Optional[List[int]] = None, - shrink_axis_mask: Optional[List[int]] = None, - ellipsis_mask: Optional[List[int]] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which dynamically repeats(replicates) the input data tensor. - - :param data: The tensor to be sliced - :param begin: 1D tensor with begin indexes for input blob slicing - :param end: 1D tensor with end indexes for input blob slicing - :param strides: The slicing strides - :param begin_mask: A mask applied to the 'begin' input indicating which elements - shoud be ignored - :param end_mask: A mask applied to the 'end' input indicating which elements - shoud be ignored - :param new_axis_mask: A mask indicating dimensions where '1' should be inserted - :param shrink_axis_mask: A mask indicating which dimensions should be deleted - :param ellipsis_mask: Indicates positions where missing dimensions should be inserted - :return: StridedSlice node - """ - if new_axis_mask is None: - new_axis_mask = [] - if shrink_axis_mask is None: - shrink_axis_mask = [] - if ellipsis_mask is None: - ellipsis_mask = [] - attributes = { - "begin_mask": begin_mask, - "end_mask": end_mask, - "new_axis_mask": new_axis_mask, - "shrink_axis_mask": shrink_axis_mask, - "ellipsis_mask": ellipsis_mask, - } - - return _get_node_factory_opset1().create("StridedSlice", as_nodes(data, begin, end, strides), attributes) - - -@binary_op -def subtract( - left_node: NodeInput, - right_node: NodeInput, - auto_broadcast: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Return node which applies f(x) = A-B to the input nodes element-wise. - - :param left_node: The node providing data for left hand side of operator. - :param right_node: The node providing data for right hand side of operator. - :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: numpy, explicit. - :param name: The optional name for output node. - :return: The new output node performing subtraction operation on both tensors element-wise. - """ - return _get_node_factory_opset1().create("Subtract", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}) - - -@unary_op -def tan(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply tangent function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with tan operation applied on it. - """ - return _get_node_factory_opset1().create("Tan", [node]) - - -@unary_op -def tanh(node: NodeInput, name: Optional[str] = None) -> Node: - """Return node which applies hyperbolic tangent to the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with tanh operation applied on it. - """ - return _get_node_factory_opset1().create("Tanh", [node]) - - -@nameable_op -def tensor_iterator( - inputs: List[Node], - graph_body: GraphBody, - slice_input_desc: List[TensorIteratorSliceInputDesc], - merged_input_desc: List[TensorIteratorMergedInputDesc], - invariant_input_desc: List[TensorIteratorInvariantInputDesc], - body_output_desc: List[TensorIteratorBodyOutputDesc], - concat_output_desc: List[TensorIteratorConcatOutputDesc], - name: Optional[str] = None, -) -> Node: - """Perform recurrent execution of the network described in the body, iterating through the data. - - :param inputs: The provided to TensorIterator operator. - :param graph_body: The graph representing the body we execute. - :param slice_input_desc: The descriptors describing sliced inputs, that is nodes - representing tensors we iterate through, processing single - data slice in one iteration. - :param merged_input_desc: The descriptors describing merged inputs, that is nodes - representing variables with initial value at first iteration, - which may be changing through iterations. - :param invariant_input_desc: The descriptors describing invariant inputs, that is nodes - representing variable with persistent value through all - iterations. - :param body_output_desc: The descriptors describing body outputs from specified - iteration. - :param concat_output_desc: The descriptors describing specified output values through - all the iterations concatenated into one node. - :param name: The optional name for output node. - :return: Node representing TensorIterator operation. - """ - attributes = { - "body": graph_body.serialize(), - "input_descriptions": { - "slice_input_desc": [desc.serialize() for desc in slice_input_desc], - "merged_input_desc": [desc.serialize() for desc in merged_input_desc], - "invariant_input_desc": [desc.serialize() for desc in invariant_input_desc], - }, - "output_descriptions": { - "body_output_desc": [desc.serialize() for desc in body_output_desc], - "concat_output_desc": [desc.serialize() for desc in concat_output_desc], - }, - } - - return _get_node_factory_opset1().create("TensorIterator", as_nodes(*inputs), attributes) - - -@nameable_op -def tile(data: NodeInput, repeats: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which dynamically repeats(replicates) the input data tensor. - - :param data: The input tensor to be tiled - :param repeats: Per-dimension replication factors - :return: Tile node - """ - return _get_node_factory_opset1().create("Tile", as_nodes(data, repeats)) - - -@nameable_op -def topk( - data: NodeInput, - k: NodeInput, - axis: int, - mode: str, - sort: str, - name: Optional[str] = None, -) -> Node: - """Return a node which performs TopK. - - :param data: Input data. - :param k: K. - :param axis: TopK Axis. - :param mode: Compute TopK largest ('max') or smallest ('min') - :param sort: Order of output elements (sort by: 'none', 'index' or 'value') - :return: The new node which performs TopK (both indices and values) - """ - return _get_node_factory_opset1().create( - "TopK", - as_nodes(data, k), - {"axis": axis, "mode": mode, "sort": sort}, - ) - - -@nameable_op -def transpose(data: NodeInput, input_order: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which transposes the data in the input tensor. - - :param data: The input tensor to be transposed - :param input_order: Permutation of axes to be applied to the input tensor - :return: Transpose node - """ - return _get_node_factory_opset1().create("Transpose", as_nodes(data, input_order)) - - -def unsqueeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node: - """Perform unsqueeze operation on input tensor. - - Insert single-dimensional entries to the shape of a tensor. Takes one required argument axes, - a list of dimensions that will be inserted. - Dimension indices in axes are as seen in the output tensor. - - For example: Inputs: tensor with shape [3, 4, 5], axes=[0, 4] - Result: tensor with shape [1, 3, 4, 5, 1] - - :param data: The node with data tensor. - :param axes: List of non-negative integers, indicate the dimensions to be inserted. - One of: input node or array. - :return: The new node performing an unsqueeze operation on input tensor. - """ - return _get_node_factory_opset1().create("Unsqueeze", as_nodes(data, axes)) - - -@nameable_op -def variadic_split(data: NodeInput, axis: NodeInput, split_lengths: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which splits the input tensor into variadic length slices. - - :param data: The input tensor to be split - :param axis: Axis along which the input data will be split - :param split_lengths: Sizes of the output tensors along the split axis - :return: VariadicSplit node - """ - return _get_node_factory_opset1().create("VariadicSplit", as_nodes(data, axis, split_lengths)) diff --git a/src/bindings/python/src/compatibility/ngraph/opset10/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset10/__init__.py deleted file mode 100644 index ff4e4e4b39483a..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset10/__init__.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset8.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset9.ops import eye -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset8.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset9.ops import generate_proposals -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset9.ops import grid_sample -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset8.ops import if_op -from ngraph.opset10.ops import interpolate -from ngraph.opset9.ops import irdft -from ngraph.opset10.ops import is_finite -from ngraph.opset10.ops import is_inf -from ngraph.opset10.ops import is_nan -from ngraph.opset8.ops import i420_to_bgr -from ngraph.opset8.ops import i420_to_rgb -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset9.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset9.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset8.ops import nv12_to_bgr -from ngraph.opset8.ops import nv12_to_rgb -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset8.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset9.ops import rdft -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset9.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset8.ops import slice -from ngraph.opset8.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset9.ops import softsign -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset10.ops import unique -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset10/ops.py b/src/bindings/python/src/compatibility/ngraph/opset10/ops.py deleted file mode 100644 index b27c21e9dcbab0..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset10/ops.py +++ /dev/null @@ -1,173 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all openvino ops.""" -from functools import partial -from typing import List, Optional - -from ngraph.impl import Node -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import nameable_op -from ngraph.utils.types import ( - NodeInput, - as_nodes, - as_node, - make_constant_node, -) - -_get_node_factory_opset4 = partial(_get_node_factory, "opset4") -_get_node_factory_opset10 = partial(_get_node_factory, "opset10") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def interpolate( - image: NodeInput, - output_shape: NodeInput, - scales: NodeInput, - mode: str, - shape_calculation_mode: str, - pads_begin: Optional[List[int]] = None, - pads_end: Optional[List[int]] = None, - coordinate_transformation_mode: str = "half_pixel", - nearest_mode: str = "round_prefer_floor", - antialias: bool = False, - cube_coeff: float = -0.75, - axes: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Perform interpolation of independent slices in input tensor. - - :param image: The node providing input tensor with data for interpolation. - :param output_shape: 1D tensor describing output shape for spatial axes. - :param scales: 1D tensor describing scales for spatial axes. - :param mode: Specifies type of interpolation. Possible values are: nearest, linear, - linear_onnx, cubic. - :param shape_calculation_mode: - Specifies which input, sizes or scales, is used to calculate an output - shape. - :param pads_begin: Specifies the number of pixels to add to the beginning of the image - being interpolated. Default is None. - :param pads_end: Specifies the number of pixels to add to the end of the image being - interpolated. Default is None. - :param coordinate_transformation_mode: - Specifies how to transform the coordinate in the resized tensor to the - coordinate in the original tensor. Default is "half_pixel". - :param nearest_mode: Specifies round mode when mode == nearest and is used only when - mode == nearest. Default is "round_prefer_floor". - :param antialias: Specifies whether to perform anti-aliasing. Default is False. - :param cube_coeff: Specifies the parameter a for cubic interpolation. Default is -0.75. - :param axes: 1D tensor specifying dimension indices where interpolation is applied. - Default is None. - :param name: Optional name for the output node. Default is None. - :return: Node representing interpolation operation. - """ - attrs = { - "mode": mode, - "shape_calculation_mode": shape_calculation_mode, - "coordinate_transformation_mode": coordinate_transformation_mode, - "nearest_mode": nearest_mode, - "antialias": antialias, - "cube_coeff": cube_coeff, - } - - attrs["pads_begin"] = [] if pads_begin is None else pads_begin - attrs["pads_end"] = [] if pads_end is None else pads_end - - inputs = as_nodes(image, output_shape, scales) if axes is None else as_nodes(image, output_shape, scales, axes) - - # This is an update of the operator version, so even though this is opset 10, - # the operator is taken from opset 4. - return _get_node_factory_opset4().create("Interpolate", inputs, attrs) - - -@nameable_op -def is_finite(data: NodeInput, name: Optional[str] = None) -> Node: - """Performs element-wise mapping from NaN and Infinity to False. Other values are mapped to True. - - :param data: A tensor of floating-point numeric type and arbitrary shape. - :param name: Optional name for the output node. The default is None. - :return: Node representing is_finite operation. - """ - return _get_node_factory_opset10().create("IsFinite", as_nodes(data)) - - -@nameable_op -def is_inf( - data: NodeInput, - attributes: Optional[dict] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs IsInf operation. - - :param data: The input tensor. - :param attributes: Optional dictionary containing IsInf attributes. - :param name: Optional name of the node. - - Available attributes: - - * detect_negative Specifies whether to map negative infinities to true in output map. - Range of values: true, false - Default value: true - Required: no - * detect_positive Specifies whether to map positive infinities to true in output map. - Range of values: true, false - Default value: true - Required: no - - :return: A new IsInf node. - """ - if not attributes: - attributes = {} - return _get_node_factory_opset10().create("IsInf", as_nodes(data), attributes) - - -@nameable_op -def is_nan(data: NodeInput, name: Optional[str] = None) -> Node: - """Performs element-wise mapping from NaN to True. Other values are mapped to False. - - :param data: A tensor of floating point numeric type and arbitrary shape. - :param name: Optional name for the output node. Default is None. - :return: Node representing is_nan operation. - """ - return _get_node_factory_opset10().create("IsNaN", as_nodes(data)) - - -@nameable_op -def unique( - data: NodeInput, - axis: Optional[NodeInput] = None, - sorted: Optional[bool] = True, - index_element_type: Optional[str] = "i64", - count_element_type: Optional[str] = "i64", - name: Optional[str] = None, -) -> Node: - """Operator which selects and returns unique elements or unique slices of the input tensor. - - :param data: Input data tensor. - :param axis: (Optional) An input tensor containing the axis value. - If not provided or None, data input is considered as a flattened tensor. - Default value: None. - :param sorted: (Optional) Controls the order of the returned unique values, - sorts ascendingly when true. - Default value: True. - :param index_element_type: (Optional) The data type set for outputs containing indices. - Default value: "i64". - :param count_element_type: (Optional) The data type set for the output with repetition count. - Default value: "i64". - :param name: (Optional) A name for the output node. Default value: None. - :return: Node representing Unique operation. - """ - if axis is None: - inputs = as_nodes(data) - else: - inputs = as_nodes(data, axis) - - attributes = { - "sorted": sorted, - "index_element_type": index_element_type, - "count_element_type": count_element_type, - } - return _get_node_factory_opset10().create("Unique", inputs, attributes) diff --git a/src/bindings/python/src/compatibility/ngraph/opset11/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset11/__init__.py deleted file mode 100644 index 047c93e4cc03d3..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset11/__init__.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset8.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset9.ops import eye -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset8.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset9.ops import generate_proposals -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset9.ops import grid_sample -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset8.ops import if_op -from ngraph.opset11.ops import interpolate -from ngraph.opset9.ops import irdft -from ngraph.opset10.ops import is_finite -from ngraph.opset10.ops import is_inf -from ngraph.opset10.ops import is_nan -from ngraph.opset8.ops import i420_to_bgr -from ngraph.opset8.ops import i420_to_rgb -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset9.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset9.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset8.ops import nv12_to_bgr -from ngraph.opset8.ops import nv12_to_rgb -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset8.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset9.ops import rdft -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset9.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset8.ops import slice -from ngraph.opset8.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset9.ops import softsign -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset11.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset10.ops import unique -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset11/ops.py b/src/bindings/python/src/compatibility/ngraph/opset11/ops.py deleted file mode 100644 index 3a4b54059ca6fc..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset11/ops.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all openvino ops.""" -from functools import partial -from typing import List, Optional - -from ngraph.impl import Node -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import nameable_op -from ngraph.utils.types import ( - NodeInput, - as_nodes, -) - -_get_node_factory_opset11 = partial(_get_node_factory, "opset11") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def interpolate( - image: NodeInput, - scales_or_sizes: NodeInput, - mode: str, - shape_calculation_mode: str, - pads_begin: Optional[List[int]] = None, - pads_end: Optional[List[int]] = None, - coordinate_transformation_mode: str = "half_pixel", - nearest_mode: str = "round_prefer_floor", - antialias: bool = False, - cube_coeff: float = -0.75, - axes: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Performs the interpolation of the input tensor. - - :param image: The node providing input tensor with data for interpolation. - :param scales_or_sizes: - 1D tensor providing information used to calculate the output shape - of the operation. It might contain floats (scales) or integers(sizes). - :param mode: Specifies type of interpolation. Possible values are: nearest, linear, - linear_onnx, cubic, bilinear_pillow, bicubic_pillow. - :param shape_calculation_mode: - Specifies how the scales_or_sizes input should be interpreted. - :param pads_begin: Specifies the number of pixels to add to the beginning of the image - being interpolated. Default is None. - :param pads_end: Specifies the number of pixels to add to the end of the image being - interpolated. Default is None. - :param coordinate_transformation_mode: - Specifies how to transform the coordinate in the resized tensor to the - coordinate in the original tensor. Default is "half_pixel". - :param nearest_mode: Specifies round mode when mode == nearest and is used only when - mode == nearest. Default is "round_prefer_floor". - :param antialias: Specifies whether to perform anti-aliasing. Default is False. - :param cube_coeff: Specifies the parameter a for cubic interpolation. Default is -0.75. - :param axes: 1D tensor specifying dimension indices where interpolation is applied. - The default is None. - :param name: Optional name for the output node. The default is None. - :return: Node representing the interpolation operation. - """ - attrs = { - "mode": mode, - "shape_calculation_mode": shape_calculation_mode, - "coordinate_transformation_mode": coordinate_transformation_mode, - "nearest_mode": nearest_mode, - "antialias": antialias, - "cube_coeff": cube_coeff, - } - - attrs["pads_begin"] = [] if pads_begin is None else pads_begin - attrs["pads_end"] = [] if pads_end is None else pads_end - - inputs = as_nodes(image, scales_or_sizes) if axes is None else as_nodes(image, scales_or_sizes, axes) - - return _get_node_factory_opset11().create("Interpolate", inputs, attrs) - - -@nameable_op -def topk( - data: NodeInput, - k: NodeInput, - axis: int, - mode: str, - sort: str, - index_element_type: str = "i32", - stable: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs TopK. - - :param data: Input data. - :param k: K. - :param axis: TopK Axis. - :param mode: Compute TopK largest ('max') or smallest ('min') - :param sort: Order of output elements (sort by: 'none', 'index' or 'value') - :param index_element_type: Type of output tensor with indices. - :param stable: Specifies whether the equivalent elements should maintain - their relative order from the input tensor during sorting. - :return: The new node which performs TopK - """ - return _get_node_factory_opset11().create( - "TopK", - as_nodes(data, k), - {"axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type, "stable": stable}, - ) diff --git a/src/bindings/python/src/compatibility/ngraph/opset2/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset2/__init__.py deleted file mode 100644 index cd30551ec091b0..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset2/__init__.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset1.ops import atan -from ngraph.opset1.ops import avg_pool -from ngraph.opset1.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset1.ops import broadcast -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset1.ops import lrn -from ngraph.opset1.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset2.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset1.ops import non_max_suppression -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset1.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset2.ops import roi_pooling -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset1.ops import shape_of -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset1.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset2/ops.py b/src/bindings/python/src/compatibility/ngraph/opset2/ops.py deleted file mode 100644 index 412e8a7bd894b3..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset2/ops.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset2 = partial(_get_node_factory, "opset2") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def batch_to_space( - data: NodeInput, - block_shape: NodeInput, - crops_begin: NodeInput, - crops_end: NodeInput, - name: Optional[str] = None, -) -> Node: - """Perform BatchToSpace operation on the input tensor. - - BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions. - - :param data: Node producing the data tensor. - :param block_shape: The sizes of the block of values to be moved. - :param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`. - :param crops_end: Specifies the amount to crop from the end along each axis of `data`. - :param name: Optional output node name. - :return: The new node performing a BatchToSpace operation. - """ - return _get_node_factory_opset2().create("BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end)) - - -@unary_op -def gelu(node: NodeInput, name: Optional[str] = None) -> Node: - r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node. - - Computes GELU function: - - .. math:: f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}}) - - For more information refer to [Gaussian Error Linear Unit (GELU)](https://arxiv.org/pdf/1606.08415.pdf>) - - :param node: Input tensor. One of: input node, array or scalar. - :param name: Optional output node name. - :return: The new node performing a GELU operation on its input data element-wise. - """ - return _get_node_factory_opset2().create("Gelu", [node]) - - -@nameable_op -def mvn( - data: Node, - across_channels: bool = False, - normalize_variance: bool = False, - eps: float = 1e-9, - name: Optional[str] = None, -) -> Node: - r"""Perform Mean Variance Normalization operation on data from input node. - - Computes MVN on the input tensor `data` (called `X`) using formula: - - ..math:: Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}} - - :param data: The node with data tensor. - :param across_channels: Denotes if mean values are shared across channels. - :param normalize_variance: Denotes whether to perform variance normalization. - :param eps: The number added to the variance to avoid division by zero - when normalizing the value. Scalar value. - :param name: Optional output node name. - :return: The new node performing a MVN operation on input tensor. - """ - return _get_node_factory_opset2().create( - "MVN", - [data], - {"across_channels": across_channels, "normalize_variance": normalize_variance, "eps": eps}, - ) - - -@nameable_op -def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node: - """Return a node which produces the ReorgYolo operation. - - :param input: Input data - :param stride: Stride to reorganize input by - :param name: Optional name for output node. - :return: ReorgYolo node - """ - return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride}) - - -@nameable_op -def roi_pooling( - input: NodeInput, - coords: NodeInput, - output_size: TensorShape, - spatial_scale: NumericData, - method: str, - name: Optional[str] = None, -) -> Node: - """Return a node which produces an ROIPooling operation. - - :param input: Input feature map {N, C, ...} - :param coords: Coordinates of bounding boxes - :param output_size: Height/Width of ROI output features (shape) - :param spatial_scale: Ratio of input feature map over input image size (float) - :param method: Method of pooling - string: "max" or "bilinear" - :return: ROIPooling node - """ - method = method.lower() - return _get_node_factory_opset2().create( - "ROIPooling", - as_nodes(input, coords), - {"output_size": Shape(output_size), "spatial_scale": spatial_scale, "method": method}, - ) - - -@nameable_op -def space_to_batch( - data: NodeInput, - block_shape: NodeInput, - pads_begin: NodeInput, - pads_end: NodeInput, - name: Optional[str] = None, -) -> Node: - """Perform SpaceToBatch operation on the input tensor. - - SpaceToBatch permutes data tensor blocks of spatial data into batch dimension. - The operator returns a copy of the input tensor where values from spatial blocks dimensions - are moved in the batch dimension - - :param data: Node producing the data tensor. - :param block_shape: The sizes of the block of values to be moved. - :param pads_begin: Specifies the padding for the beginning along each axis of `data`. - :param pads_end: Specifies the padding for the ending along each axis of `data`. - :param name: Optional output node name. - :return: The new node performing a SpaceToBatch operation. - """ - return _get_node_factory_opset2().create("SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end)) diff --git a/src/bindings/python/src/compatibility/ngraph/opset3/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset3/__init__.py deleted file mode 100644 index 06cd926cc4513c..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset3/__init__.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset1.ops import avg_pool -from ngraph.opset1.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset1.ops import lrn -from ngraph.opset1.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset2.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset3.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset1.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset3/ops.py b/src/bindings/python/src/compatibility/ngraph/opset3/ops.py deleted file mode 100644 index 7d7c757d9cd5dc..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset3/ops.py +++ /dev/null @@ -1,609 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset3 = partial(_get_node_factory, "opset3") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. - - :param new_value: Node producing a value to be assigned to a variable. - :param variable_id: Id of a variable to be updated. - :param name: Optional name for output node. - :return: Assign node - """ - return _get_node_factory_opset3().create("Assign", [as_node(new_value)], {"variable_id": variable_id}) - - -@nameable_op -def broadcast( - data: NodeInput, - target_shape: NodeInput, - axes_mapping: Optional[NodeInput] = None, - broadcast_spec: str = "NUMPY", - name: Optional[str] = None, -) -> Node: - """Create a node which broadcasts the input node's values along specified axes to a desired shape. - - :param data: The node with input tensor data. - :param target_shape: The node with a new shape we want to broadcast tensor to. - :param axes_mapping: The node with a axis positions (0-based) in the result - that are being broadcast. - :param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes - to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL. - :param name: Optional new name for output node. - :return: New node with broadcast shape. - """ - inputs = as_nodes(data, target_shape) - if broadcast_spec.upper() == "EXPLICIT": - inputs.append(as_node(axes_mapping)) - return _get_node_factory_opset3().create("Broadcast", inputs, {"mode": broadcast_spec.upper()}) - - -@nameable_op -def bucketize( - data: Node, - buckets: NodeInput, - output_type: str = "i64", - with_right_bound: bool = True, - name: Optional[str] = None, -) -> Node: - """Return a node which produces the Bucketize operation. - - :param data: Input data to bucketize - :param buckets: 1-D of sorted unique boundaries for buckets - :param output_type: Output tensor type, "i64" or "i32", defaults to i64 - :param with_right_bound: indicates whether bucket includes the right or left - edge of interval. default true = includes right edge - :param name: Optional name for output node. - :return: Bucketize node - """ - return _get_node_factory_opset3().create( - "Bucketize", - [data, as_node(buckets)], - {"output_type": output_type, "with_right_bound": with_right_bound}, - ) - - -@nameable_op -def cum_sum( - arg: NodeInput, - axis: NodeInput, - exclusive: bool = False, - reverse: bool = False, - name: Optional[str] = None, -) -> Node: - """Construct a cumulative summation operation. - - :param arg: The tensor to be summed. - :param axis: zero dimension tensor specifying axis position along which sum will be performed. - :param exclusive: if set to true, the top element is not included - :param reverse: if set to true, will perform the sums in reverse direction - :return: New node performing the operation - """ - return _get_node_factory_opset3().create("CumSum", as_nodes(arg, axis), {"exclusive": exclusive, "reverse": reverse}) - - -@nameable_op -def embedding_bag_offsets_sum( - emb_table: Node, - indices: NodeInput, - offsets: NodeInput, - default_index: Optional[NodeInput] = None, - per_sample_weights: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs sums of bags of embeddings without the intermediate embeddings. - - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param offsets: Tensor containing the starting index positions of each bag in indices. - :param per_sample_weights: Tensor with weights for each sample. - :param default_index: Scalar containing default index in embedding table to fill empty bags. - :param name: Optional name for output node. - :return: The new node which performs EmbeddingBagOffsetsSum - """ - inputs = [emb_table, as_node(indices), as_node(offsets)] - if per_sample_weights is not None: - inputs.append(default_index) - inputs.append(per_sample_weights) - elif default_index is not None: - inputs.append(default_index) - - return _get_node_factory_opset3().create("EmbeddingBagOffsetsSum", inputs, {}) - - -@nameable_op -def embedding_bag_packed_sum( - emb_table: NodeInput, - indices: NodeInput, - per_sample_weights: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return an EmbeddingBagPackedSum node. - - EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given - input tensor with a row (from the weights matrix) at that index - - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param per_sample_weights: Weights to be multiplied with embedding table. - :param name: Optional name for output node. - :return: EmbeddingBagPackedSum node - """ - inputs = [as_node(emb_table), as_node(indices)] - if per_sample_weights is not None: - inputs.append(as_node(per_sample_weights)) - - return _get_node_factory_opset3().create("EmbeddingBagPackedSum", inputs, {}) - - -@nameable_op -def embedding_segments_sum( - emb_table: Node, - indices: NodeInput, - segment_ids: NodeInput, - num_segments: Optional[NodeInput] = None, - default_index: Optional[NodeInput] = None, - per_sample_weights: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return an EmbeddingSegmentsSum node. - - EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given - input tensor with a row (from the weights matrix) at that index - - :param emb_table: Tensor containing the embedding lookup table. - :param indices: Tensor with indices. - :param segment_ids: Tensor with indices into the output Tensor - :param num_segments: Tensor with number of segments. - :param default_index: Scalar containing default index in embedding table to fill empty bags. - :param per_sample_weights: Weights to be multiplied with embedding table. - :param name: Optional name for output node. - :return: EmbeddingSegmentsSum node - """ - inputs = [as_node(emb_table), as_node(indices), as_node(segment_ids)] - if per_sample_weights is not None: - inputs.append(as_node(num_segments)) - inputs.append(as_node(default_index)) - inputs.append(as_node(per_sample_weights)) - elif default_index is not None: - inputs.append(as_node(num_segments)) - inputs.append(as_node(default_index)) - elif num_segments is not None: - inputs.append(as_node(num_segments)) - - return _get_node_factory_opset3().create("EmbeddingSegmentsSum", inputs, {}) - - -@nameable_op -def extract_image_patches( - image: NodeInput, - sizes: TensorShape, - strides: List[int], - rates: TensorShape, - auto_pad: str, - name: Optional[str] = None, -) -> Node: - """Return a node which produces the ExtractImagePatches operation. - - :param image: 4-D Input data to extract image patches. - :param sizes: Patch size in the format of [size_rows, size_cols]. - :param strides: Patch movement stride in the format of [stride_rows, stride_cols] - :param rates: Element seleciton rate for creating a patch. - :param auto_pad: Padding type. - :param name: Optional name for output node. - :return: ExtractImagePatches node - """ - return _get_node_factory_opset3().create( - "ExtractImagePatches", - [as_node(image)], - {"sizes": sizes, "strides": strides, "rates": rates, "auto_pad": auto_pad}, - ) - - -@nameable_op -def gru_cell( - X: NodeInput, - initial_hidden_state: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - linear_before_reset: bool = False, - name: Optional[str] = None, -) -> Node: - """Perform GRUCell operation on the tensor from input node. - - GRUCell represents a single GRU Cell that computes the output - using the formula described in the paper: https://arxiv.org/abs/1406.1078 - - Note this class represents only single *cell* and not whole *layer*. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor at current time step with shape: - [batch_size, hidden_size]. - :param W: The weights for matrix multiplication, gate order: zrh. - Shape: [3*hidden_size, input_size]. - :param R: The recurrence weights for matrix multiplication. - Shape: [3*hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). - For linear_before_reset set True the shape is [4*hidden_size]. - Otherwise the shape is [3*hidden_size]. - :param hidden_size: The number of hidden units for recurrent cell. - Specifies hidden state size. - :param activations: The vector of activation functions used inside recurrent cell. - :param activation_alpha: The vector of alpha parameters for activation functions in - order respective to activation list. - :param activation_beta: The vector of beta parameters for activation functions in order - respective to activation list. - :param clip: The value defining clipping range [-clip, clip] on input of - activation functions. - :param linear_before_reset: Flag denotes if the layer behaves according to the modification - of GRUCell described in the formula in the ONNX documentation. - :param name: Optional output node name. - :return: The new node performing a GRUCell operation on tensor from input node. - """ - if activations is None: - activations = ["sigmoid", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - input_nodes = as_nodes(X, initial_hidden_state, W, R, B) - attributes = { - "hidden_size": hidden_size, - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "linear_before_reset": linear_before_reset, - "clip": clip, - } - return _get_node_factory_opset3().create("GRUCell", input_nodes, attributes) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression - """ - if max_output_boxes_per_class is None: - max_output_boxes_per_class = make_constant_node(0, np.int64) - if iou_threshold is None: - iou_threshold = make_constant_node(0, np.float32) - if score_threshold is None: - score_threshold = make_constant_node(0, np.float32) - - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - "output_type": output_type, - } - - return _get_node_factory_opset3().create("NonMaxSuppression", inputs, attributes) - - -@nameable_op -def non_zero( - data: NodeInput, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return the indices of the elements that are non-zero. - - :param data: Input data. - :param output_type: Output tensor type. - - :return: The new node which performs NonZero - """ - return _get_node_factory_opset3().create("NonZero", [as_node(data)], {"output_type": output_type}) - - -@nameable_op -def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. - - :param init_value: Node producing a value to be returned instead of an unassigned variable. - :param variable_id: Id of a variable to be read. - :param name: Optional name for output node. - :return: ReadValue node - """ - return _get_node_factory_opset3().create("ReadValue", [as_node(init_value)], {"variable_id": variable_id}) - - -@nameable_op -def rnn_cell( - X: NodeInput, - initial_hidden_state: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - activations: List[str], - activations_alpha: List[float], - activations_beta: List[float], - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Perform RNNCell operation on tensor from input node. - - It follows notation and equations defined as in ONNX standard: - https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN - - Note this class represents only single *cell* and not whole RNN *layer*. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor at current time step with shape: - [batch_size, hidden_size]. - :param W: The weight tensor with shape: [hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [hidden_size, - hidden_size]. - :param B: The sum of biases (weight and recurrence) with shape: [hidden_size]. - :param hidden_size: The number of hidden units for recurrent cell. - Specifies hidden state size. - :param activations: The vector of activation functions used inside recurrent cell. - :param activation_alpha: The vector of alpha parameters for activation functions in - order respective to activation list. - :param activation_beta: The vector of beta parameters for activation functions in order - respective to activation list. - :param clip: The value defining clipping range [-clip, clip] on input of - activation functions. - :param name: Optional output node name. - :return: The new node performing a RNNCell operation on tensor from input node. - """ - if activations is None: - activations = ["tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - input_nodes = as_nodes(X, initial_hidden_state, W, R, B) - attributes = { - "hidden_size": hidden_size, - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - } - return _get_node_factory_opset3().create("RNNCell", input_nodes, attributes) - - -@nameable_op -def roi_align( - data: NodeInput, - rois: NodeInput, - batch_indices: NodeInput, - pooled_h: int, - pooled_w: int, - sampling_ratio: int, - spatial_scale: float, - mode: str, - name: Optional[str] = None, -) -> Node: - """Return a node which performs ROIAlign. - - :param data: Input data. - :param rois: RoIs (Regions of Interest) to pool over. - :param batch_indices: Tensor with each element denoting the index of - the corresponding image in the batch. - :param pooled_h: Height of the ROI output feature map. - :param pooled_w: Width of the ROI output feature map. - :param sampling_ratio: Number of bins over height and width to use to calculate - each output feature map element. - :param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. - :param mode: Method to perform pooling to produce output feature map elements. - - :return: The new node which performs ROIAlign - """ - inputs = as_nodes(data, rois, batch_indices) - attributes = { - "pooled_h": pooled_h, - "pooled_w": pooled_w, - "sampling_ratio": sampling_ratio, - "spatial_scale": spatial_scale, - "mode": mode, - } - return _get_node_factory_opset3().create("ROIAlign", inputs, attributes) - - -@nameable_op -def scatter_elements_update( - data: NodeInput, - indices: NodeInput, - updates: NodeInput, - axis: NodeInput, - name: Optional[str] = None, -) -> Node: - """Return a node which produces a ScatterElementsUpdate operation. - - :param data: The input tensor to be updated. - :param indices: The tensor with indexes which will be updated. - :param updates: The tensor with update values. - :param axis: The axis for scatter. - :return: ScatterElementsUpdate node - - ScatterElementsUpdate creates a copy of the first input tensor with updated elements - specified with second and third input tensors. - - For each entry in `updates`, the target index in `data` is obtained by combining - the corresponding entry in `indices` with the index of the entry itself: the - index-value for dimension equal to `axis` is obtained from the value of the - corresponding entry in `indices` and the index-value for dimension not equal - to `axis` is obtained from the index of the entry itself. - - """ - return _get_node_factory_opset3().create("ScatterElementsUpdate", as_nodes(data, indices, updates, axis)) - - -@nameable_op -def scatter_update(data: Node, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None) -> Node: - """Return a node which produces a ScatterUpdate operation. - - ScatterUpdate sets new values to slices from data addressed by indices. - - :param data: The input tensor to be updated. - :param indices: The tensor with indexes which will be updated. - :param updates: The tensor with update values. - :param axis: The axis at which elements will be updated. - :return: ScatterUpdate node - """ - return _get_node_factory_opset3().create("ScatterUpdate", as_nodes(data, indices, updates, axis)) - - -@nameable_op -def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node: - """Return a node which produces a tensor containing the shape of its input data. - - :param data: The tensor containing the input data. - :param output_type: Output element type. - :return: ShapeOf node - """ - return _get_node_factory_opset3().create("ShapeOf", [as_node(data)], {"output_type": output_type}) - - -@nameable_op -def shuffle_channels(data: Node, axis: int, group: int, name: Optional[str] = None) -> Node: - """Perform permutation on data in the channel dimension of the input tensor. - - :param data: The node with input tensor. - :param axis: Channel dimension index in the data tensor. - A negative value means that the index should be calculated - from the back of the input data shape. - :param group: The channel dimension specified by the axis parameter - should be split into this number of groups. - :param name: Optional output node name. - :return: The new node performing a permutation on data in the channel dimension - of the input tensor. - - The operation is the equivalent with the following transformation of the input tensor - `data` of shape [N, C, H, W]: - - `data_reshaped` = reshape(`data`, [N, group, C / group, H * W]) - - `data_transposed` = transpose(`data_reshaped`, [0, 2, 1, 3]) - - `output` = reshape(`data_transposed`, [N, C, H, W]) - - For example: - - .. code-block:: python - - Inputs: tensor of shape [1, 6, 2, 2] - - data = [[[[ 0., 1.], [ 2., 3.]], - [[ 4., 5.], [ 6., 7.]], - [[ 8., 9.], [10., 11.]], - [[12., 13.], [14., 15.]], - [[16., 17.], [18., 19.]], - [[20., 21.], [22., 23.]]]] - - axis = 1 - groups = 3 - - Output: tensor of shape [1, 6, 2, 2] - - output = [[[[ 0., 1.], [ 2., 3.]], - [[ 8., 9.], [10., 11.]], - [[16., 17.], [18., 19.]], - [[ 4., 5.], [ 6., 7.]], - [[12., 13.], [14., 15.]], - [[20., 21.], [22., 23.]]]] - """ - return _get_node_factory_opset3().create("ShuffleChannels", [as_node(data)], {"axis": axis, "group": group}) - - -@nameable_op -def topk( - data: NodeInput, - k: NodeInput, - axis: int, - mode: str, - sort: str, - index_element_type: str = "i32", - name: Optional[str] = None, -) -> Node: - """Return a node which performs TopK. - - :param data: Input data. - :param k: K. - :param axis: TopK Axis. - :param mode: Compute TopK largest ('max') or smallest ('min') - :param sort: Order of output elements (sort by: 'none', 'index' or 'value') - :param index_element_type: Type of output tensor with indices. - :return: The new node which performs TopK (both indices and values) - """ - return _get_node_factory_opset3().create( - "TopK", - as_nodes(data, k), - {"axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type}, - ) diff --git a/src/bindings/python/src/compatibility/ngraph/opset4/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset4/__init__.py deleted file mode 100644 index 09406337a43966..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset4/__init__.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset1.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset1.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset2.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset4.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset4/ops.py b/src/bindings/python/src/compatibility/ngraph/opset4/ops.py deleted file mode 100644 index 4526725f2e982f..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset4/ops.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset4 = partial(_get_node_factory, "opset4") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def ctc_loss( - logits: NodeInput, - logit_length: NodeInput, - labels: NodeInput, - label_length: NodeInput, - blank_index: Optional[NodeInput] = None, - preprocess_collapse_repeated: bool = False, - ctc_merge_repeated: bool = True, - unique: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs CTCLoss. - - :param logits: 3-D tensor of logits. - :param logit_length: 1-D tensor of lengths for each object from a batch. - :param labels: 2-D tensor of labels for which likelihood is estimated using logits. - :param label_length: 1-D tensor of length for each label sequence. - :param blank_index: Scalar used to mark a blank index. - :param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation. - :param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment. - :param unique: Flag to find unique elements in a target. - :return: The new node which performs CTCLoss - """ - if blank_index is not None: - inputs = as_nodes(logits, logit_length, labels, label_length, blank_index) - else: - inputs = as_nodes(logits, logit_length, labels, label_length) - - attributes = { - "preprocess_collapse_repeated": preprocess_collapse_repeated, - "ctc_merge_repeated": ctc_merge_repeated, - "unique": unique, - } - - return _get_node_factory_opset4().create("CTCLoss", inputs, attributes) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression - """ - if max_output_boxes_per_class is None: - max_output_boxes_per_class = make_constant_node(0, np.int64) - if iou_threshold is None: - iou_threshold = make_constant_node(0, np.float32) - if score_threshold is None: - score_threshold = make_constant_node(0, np.float32) - - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - "output_type": output_type, - } - - return _get_node_factory_opset4().create("NonMaxSuppression", inputs, attributes) - - -@nameable_op -def softplus(data: NodeInput, name: Optional[str] = None) -> Node: - """Apply SoftPlus operation on each element of input tensor. - - :param data: The tensor providing input data. - :return: The new node with SoftPlus operation applied on each element. - """ - return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {}) - - -@nameable_op -def mish( - data: NodeInput, - name: Optional[str] = None, -) -> Node: - """Return a node which performs Mish. - - :param data: Tensor with input data floating point type. - :return: The new node which performs Mish - """ - return _get_node_factory_opset4().create("Mish", as_nodes(data), {}) - - -@nameable_op -def hswish( - data: NodeInput, - name: Optional[str] = None, -) -> Node: - """Return a node which performs HSwish (hard version of Swish). - - :param data: Tensor with input data floating point type. - :return: The new node which performs HSwish - """ - return _get_node_factory_opset4().create("HSwish", as_nodes(data), {}) - - -@nameable_op -def swish( - data: NodeInput, - beta: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). - - :param data: Tensor with input data floating point type. - :return: The new node which performs Swish - """ - if beta is None: - beta = make_constant_node(1.0, np.float32) - return _get_node_factory_opset4().create("Swish", as_nodes(data, beta), {}) - - -@nameable_op -def acosh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse cosine function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arccosh operation applied on it. - """ - return _get_node_factory_opset4().create("Acosh", as_nodes(node)) - - -@nameable_op -def asinh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse sinus function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arcsinh operation applied on it. - """ - return _get_node_factory_opset4().create("Asinh", as_nodes(node)) - - -@nameable_op -def atanh(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply hyperbolic inverse tangent function on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: Optional new name for output node. - :return: New node with arctanh operation applied on it. - """ - return _get_node_factory_opset4().create("Atanh", as_nodes(node)) - - -@nameable_op -def proposal( - class_probs: Node, - bbox_deltas: Node, - image_shape: NodeInput, - attrs: dict, - name: Optional[str] = None, -) -> Node: - """Filter bounding boxes and outputs only those with the highest prediction confidence. - - :param class_probs: 4D input floating point tensor with class prediction scores. - :param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes - :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - * base_size The size of the anchor to which scale and ratio attributes are applied. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - * pre_nms_topn The number of bounding boxes before the NMS operation. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - * post_nms_topn The number of bounding boxes after the NMS operation. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - * nms_thresh The minimum value of the proposal to be taken into consideration. - Range of values: a positive floating-point number - Default value: None - Required: yes - * feat_stride The step size to slide over boxes (in pixels). - Range of values: a positive unsigned integer - Default value: None - Required: yes - * min_size The minimum size of box to be taken into consideration. - Range of values: a positive unsigned integer number - Default value: None - Required: yes - * ratio The ratios for anchor generation. - Range of values: a list of floating-point numbers - Default value: None - Required: yes - * scale The scales for anchor generation. - Range of values: a list of floating-point numbers - Default value: None - Required: yes - * clip_before_nms The flag that specifies whether to perform clip bounding boxes before - non-maximum suppression or not. - Range of values: True or False - Default value: True - Required: no - * clip_after_nms The flag that specifies whether to perform clip bounding boxes after - non-maximum suppression or not. - Range of values: True or False - Default value: False - Required: no - * normalize The flag that specifies whether to perform normalization of output boxes to - [0,1] interval or not. - Range of values: True or False - Default value: False - Required: no - * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding. - Range of values: a positive floating-point number - Default value: 1.0 - Required: no - * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates - before decoding. - Range of values: a positive floating-point number - Default value: 1.0 - Required: no - * framework Specifies how the box coordinates are calculated. - Range of values: "" (empty string) - calculate box coordinates like in Caffe* - tensorflow - calculate box coordinates like in the TensorFlow* - Object Detection API models - Default value: "" (empty string) - Required: no - - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'base_size': 85, - 'pre_nms_topn': 10, - 'post_nms_topn': 20, - 'nms_thresh': 0.34, - 'feat_stride': 16, - 'min_size': 32, - 'ratio': [0.1, 1.5, 2.0, 2.5], - 'scale': [2, 3, 3, 4], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - :return: Node representing Proposal operation. - """ - requirements = [ - ("base_size", True, np.unsignedinteger, is_positive_value), - ("pre_nms_topn", True, np.unsignedinteger, is_positive_value), - ("post_nms_topn", True, np.unsignedinteger, is_positive_value), - ("nms_thresh", True, np.floating, is_positive_value), - ("feat_stride", True, np.unsignedinteger, is_positive_value), - ("min_size", True, np.unsignedinteger, is_positive_value), - ("ratio", True, np.floating, None), - ("scale", True, np.floating, None), - ("clip_before_nms", False, np.bool_, None), - ("clip_after_nms", False, np.bool_, None), - ("normalize", False, np.bool_, None), - ("box_size_scale", False, np.floating, is_positive_value), - ("box_coordinate_scale", False, np.floating, is_positive_value), - ("framework", False, np.str_, None), - ] - - check_valid_attributes("Proposal", attrs, requirements) - - return _get_node_factory_opset4().create("Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs) - - -@nameable_op -def reduce_l1(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """L1-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. - """ - return _get_node_factory_opset4().create("ReduceL1", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def reduce_l2(node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None) -> Node: - """L2-reduction operation on input tensor, eliminating the specified reduction axes. - - :param node: The tensor we want to mean-reduce. - :param reduction_axes: The axes to eliminate through mean operation. - :param keep_dims: If set to True it holds axes that are used for reduction - :param name: Optional name for output node. - :return: The new node performing mean-reduction operation. - """ - return _get_node_factory_opset4().create("ReduceL2", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}) - - -@nameable_op -def lstm_cell( - X: NodeInput, - initial_hidden_state: NodeInput, - initial_cell_state: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs LSTMCell operation. - - :param X: The input tensor with shape: [batch_size, input_size]. - :param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. - :param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. - :param W: The weight tensor with shape: [4*hidden_size, input_size]. - :param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. - :param B: The bias tensor for gates with shape: [4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMCell. Node outputs count: 2. - """ - if activations is None: - activations = ["sigmoid", "tanh", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, W, R, B) - - attributes = { - "hidden_size": hidden_size, - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - } - return _get_node_factory_opset4().create("LSTMCell", node_inputs, attributes) diff --git a/src/bindings/python/src/compatibility/ngraph/opset5/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset5/__init__.py deleted file mode 100644 index a7cdd05d652de9..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset5/__init__.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset5.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset2.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset5/ops.py b/src/bindings/python/src/compatibility/ngraph/opset5/ops.py deleted file mode 100644 index 0baf48becd26b3..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset5/ops.py +++ /dev/null @@ -1,426 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset5 = partial(_get_node_factory, "opset5") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def batch_norm_inference( - data: NodeInput, - gamma: NodeInput, - beta: NodeInput, - mean: NodeInput, - variance: NodeInput, - epsilon: float, - name: Optional[str] = None, -) -> Node: - """Perform layer normalizes a input tensor by mean and variance with appling scale and offset. - - :param data: The input tensor with data for normalization. - :param gamma: The scalar scaling for normalized value. - :param beta: The bias added to the scaled normalized value. - :param mean: The value for mean normalization. - :param variance: The value for variance normalization. - :param epsilon: The number to be added to the variance to avoid division - by zero when normalizing a value. - :param name: The optional name of the output node. - :return: The new node which performs BatchNormInference. - """ - inputs = as_nodes(data, gamma, beta, mean, variance) - return _get_node_factory_opset5().create("BatchNormInference", inputs, {"epsilon": epsilon}) - - -@nameable_op -def gather_nd( - data: NodeInput, - indices: NodeInput, - batch_dims: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs GatherND. - - :param data: N-D tensor with data for gathering - :param indices: K-D tensor of tuples with indices by which data is gathered - :param batch_dims: Scalar value of batch dimensions - :return: The new node which performs GatherND - """ - inputs = as_nodes(data, indices) - - attributes = {"batch_dims": batch_dims} - - return _get_node_factory_opset5().create("GatherND", inputs, attributes) - - -@nameable_op -def log_softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: - """Apply LogSoftmax operation on each element of input tensor. - - :param data: The tensor providing input data. - :param axis: An axis along which LogSoftmax should be calculated - :return: The new node with LogSoftmax operation applied on each element. - """ - return _get_node_factory_opset5().create("LogSoftmax", [as_node(data)], {"axis": axis}) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - soft_nms_sigma: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param soft_nms_sigma: Tensor specifying the sigma parameter for Soft-NMS. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression - """ - if max_output_boxes_per_class is None: - max_output_boxes_per_class = make_constant_node(0, np.int64) - if iou_threshold is None: - iou_threshold = make_constant_node(0, np.float32) - if score_threshold is None: - score_threshold = make_constant_node(0, np.float32) - if soft_nms_sigma is None: - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) - else: - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, soft_nms_sigma) - - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - "output_type": output_type, - } - - return _get_node_factory_opset5().create("NonMaxSuppression", inputs, attributes) - - -@nameable_op -def round(data: NodeInput, mode: str = "half_to_even", name: Optional[str] = None) -> Node: - """Apply Round operation on each element of input tensor. - - :param data: The tensor providing input data. - :param mode: Rule to round halfway cases. If set to 'half_to_even' then halfs round to the nearest even - integer or rounding in such a way that the result heads away from zero if `mode` attribute is - 'half_away_from_zero`. - :param name: An optional name of the output node. - :return: The new node with Round operation applied on each element. - """ - return _get_node_factory_opset5().create("Round", as_nodes(data), {"mode": mode.upper()}) - - -@nameable_op -def lstm_sequence( - X: NodeInput, - initial_hidden_state: NodeInput, - initial_cell_state: NodeInput, - sequence_lengths: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - direction: str, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs LSTMSequence operation. - - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param initial_cell_state: The cell state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. - Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. - Expected format: fico - Shape: [num_directions, 4*hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. - Expected format: fico - Shape: [num_directions, 4*hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). Expected format: fico - Shape: [num_directions, 4*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents LSTMSequence. Node outputs count: 3. - """ - if activations is None: - activations = ["sigmoid", "tanh", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B) - - attributes = { - "hidden_size": hidden_size, - "direction": direction.lower(), - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - } - return _get_node_factory_opset5().create("LSTMSequence", node_inputs, attributes) - - -def hsigmoid( - data: NodeInput, - name: Optional[str] = None, -) -> Node: - """Return a node which performs HSigmoid. - - :param data: Tensor with input data floating point type. - :return: The new node which performs HSigmoid - """ - return _get_node_factory_opset5().create("HSigmoid", as_nodes(data), {}) - - -@nameable_op -def gru_sequence( - X: NodeInput, - initial_hidden_state: NodeInput, - sequence_lengths: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - direction: str, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - linear_before_reset: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs GRUSequence operation. - - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. - Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. - Shape: [num_directions, 3*hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. - Shape: [num_directions, 3*hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). - For linear_before_reset set True the shape is [num_directions, 4*hidden_size]. - Otherwise the shape is [num_directions, 3*hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param linear_before_reset: Flag denotes if the layer behaves according to the modification - of GRU described in the formula in the ONNX documentation. - :param name: An optional name of the output node. - - :return: The new node represents GRUSequence. Node outputs count: 2. - """ - if activations is None: - activations = ["sigmoid", "tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - node_inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B) - - attributes = { - "hidden_size": hidden_size, - "direction": direction.lower(), - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "linear_before_reset": linear_before_reset, - "clip": clip, - } - return _get_node_factory_opset5().create("GRUSequence", node_inputs, attributes) - - -@nameable_op -def rnn_sequence( - X: NodeInput, - initial_hidden_state: NodeInput, - sequence_lengths: NodeInput, - W: NodeInput, - R: NodeInput, - B: NodeInput, - hidden_size: int, - direction: str, - activations: Optional[List[str]] = None, - activations_alpha: Optional[List[float]] = None, - activations_beta: Optional[List[float]] = None, - clip: float = 0.0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs RNNSequence operation. - - :param X: The input tensor. Shape: [batch_size, seq_length, input_size]. - :param initial_hidden_state: The hidden state tensor. - Shape: [batch_size, num_directions, hidden_size]. - :param sequence_lengths: Specifies real sequence lengths for each batch element. - Shape: [batch_size]. Integer type. - :param W: Tensor with weights for matrix multiplication operation with input portion of data. - Shape: [num_directions, hidden_size, input_size]. - :param R: The tensor with weights for matrix multiplication operation with hidden state. - Shape: [num_directions, hidden_size, hidden_size]. - :param B: The sum of biases (weight and recurrence). - Shape: [num_directions, hidden_size]. - :param hidden_size: Specifies hidden state size. - :param direction: Specifies if the RNN is forward, reverse, or bidirectional. - :param activations: The list of three activation functions for gates. - :param activations_alpha: The list of alpha parameters for activation functions. - :param activations_beta: The list of beta parameters for activation functions. - :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. - :param name: An optional name of the output node. - - :return: The new node represents RNNSequence. Node outputs count: 2. - """ - if activations is None: - activations = ["tanh"] - if activations_alpha is None: - activations_alpha = [] - if activations_beta is None: - activations_beta = [] - - inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B) - - attributes = { - "hidden_size": hidden_size, - "direction": direction.lower(), - "activations": activations, - "activations_alpha": activations_alpha, - "activations_beta": activations_beta, - "clip": clip, - } - - return _get_node_factory_opset5().create("RNNSequence", inputs, attributes) - - -@nameable_op -def loop( - trip_count: NodeInput, - execution_condition: NodeInput, - inputs: List[Node], - graph_body: GraphBody, - slice_input_desc: List[TensorIteratorSliceInputDesc], - merged_input_desc: List[TensorIteratorMergedInputDesc], - invariant_input_desc: List[TensorIteratorInvariantInputDesc], - body_output_desc: List[TensorIteratorBodyOutputDesc], - concat_output_desc: List[TensorIteratorConcatOutputDesc], - body_condition_output_idx: int, - current_iteration_input_idx: int = -1, - name: Optional[str] = None, -) -> Node: - """Perform recurrent execution of the network described in the body, iterating through the data. - - :param trip_count: A scalar or 1D tensor with 1 element specifying - maximum number of iterations. - :param execution_condition: A scalar or 1D tensor with 1 element - specifying whether to execute the first iteration or not. - :param inputs: The provided to TensorIterator operator. - :param graph_body: The graph representing the body we execute. - :param slice_input_desc: The descriptors describing sliced inputs, that is nodes - representing tensors we iterate through, processing single - data slice in one iteration. - :param merged_input_desc: The descriptors describing merged inputs, that is nodes - representing variables with initial value at first iteration, - which may be changing through iterations. - :param invariant_input_desc: The descriptors describing invariant inputs, that is nodes - representing variable with persistent value through all - iterations. - :param body_output_desc: The descriptors describing body outputs from specified - iteration. - :param concat_output_desc: The descriptors describing specified output values through - all the iterations concatenated into one node. - :param body_condition_output_idx: Determines the purpose of the corresponding result in - the graph_body. This result will determine the dynamic - exit condition. If the value of this result is False, - then iterations stop. - :param current_iteration_input_idx: Determines the purpose of the corresponding parameter - in the graph_body. This parameter will be used as - an iteration counter. Optional. - :return: The new node which performs Loop. - """ - attributes = { - "body": graph_body.serialize(), - "input_descriptions": { - "slice_input_desc": [desc.serialize() for desc in slice_input_desc], - "merged_input_desc": [desc.serialize() for desc in merged_input_desc], - "invariant_input_desc": [desc.serialize() for desc in invariant_input_desc], - }, - "output_descriptions": { - "body_output_desc": [desc.serialize() for desc in body_output_desc], - "concat_output_desc": [desc.serialize() for desc in concat_output_desc], - }, - "special_body_ports": {"body_condition_output_idx": body_condition_output_idx, "current_iteration_input_idx": current_iteration_input_idx}, - } - return _get_node_factory_opset5().create("Loop", as_nodes(trip_count, execution_condition, *inputs), attributes) diff --git a/src/bindings/python/src/compatibility/ngraph/opset6/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset6/__init__.py deleted file mode 100644 index 5dca25e0a0576b..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset6/__init__.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset6.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset1.ops import divide -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset5.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset2.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset6.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset6/ops.py b/src/bindings/python/src/compatibility/ngraph/opset6/ops.py deleted file mode 100644 index bf8e1eb8dc797b..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset6/ops.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from functools import partial - -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset6 = partial(_get_node_factory, "opset6") - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def ctc_greedy_decoder_seq_len( - data: NodeInput, - sequence_length: NodeInput, - blank_index: Optional[NodeInput] = None, - merge_repeated: bool = True, - classes_index_type: str = "i32", - sequence_length_type: str = "i32", - name: Optional[str] = None, -) -> Node: - """Return a node which performs CTCGreedyDecoderSeqLen. - - :param data: The input 3D tensor. Shape: [batch_size, seq_length, num_classes] - :param sequence_length: Input 1D tensor with sequence length. Shape: [batch_size] - :param blank_index: Scalar or 1D tensor with specifies the class index to use for the blank class. - Optional parameter. Default value is num_classes-1. - :return: The new node which performs CTCGreedyDecoderSeqLen. - """ - if blank_index is not None: - inputs = as_nodes(data, sequence_length, blank_index) - else: - inputs = as_nodes(data, sequence_length) - - attributes = {"merge_repeated": merge_repeated, "classes_index_type": classes_index_type, "sequence_length_type": sequence_length_type} - - return _get_node_factory_opset6().create("CTCGreedyDecoderSeqLen", inputs, attributes) - - -@nameable_op -def gather_elements( - data: NodeInput, - indices: NodeInput, - axis: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs GatherElements. - - :param data: N-D tensor with data for gathering - :param indices: N-D tensor with indices by which data is gathered - :param axis: axis along which elements are gathered - :return: The new node which performs GatherElements - """ - inputs = as_nodes(data, indices) - - attributes = {"axis": axis} - - return _get_node_factory_opset6().create("GatherElements", inputs, attributes) - - -@nameable_op -def mvn( - data: Node, - axes: Node, - normalize_variance: bool, - eps: float, - eps_mode: str, - name: Optional[str] = None, -) -> Node: - """Return a node which performs MeanVarianceNormalization (MVN). - - :param data: The node with data tensor. - :param axes: The node with axes to reduce on. - :param normalize_variance: Denotes whether to perform variance normalization. - :param eps: The number added to the variance to avoid division by zero - when normalizing the value. Scalar value. - :param eps_mode: how eps is applied (`inside_sqrt` or `outside_sqrt`) - :param name: Optional output node name. - :return: The new node performing a MVN operation on input tensor. - """ - inputs = as_nodes(data, axes) - - attributes = {"normalize_variance": normalize_variance, "eps": eps, "eps_mode": eps_mode} - - return _get_node_factory_opset6().create("MVN", inputs, attributes) - - -@nameable_op -def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. - - :param new_value: Node producing a value to be assigned to a variable. - :param variable_id: Id of a variable to be updated. - :param name: Optional name for output node. - :return: Assign node - """ - return _get_node_factory_opset6().create("Assign", [as_node(new_value)], {"variable_id": variable_id}) - - -@nameable_op -def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: - """Return a node which produces the Assign operation. - - :param init_value: Node producing a value to be returned instead of an unassigned variable. - :param variable_id: Id of a variable to be read. - :param name: Optional name for output node. - :return: ReadValue node - """ - return _get_node_factory_opset6().create("ReadValue", [as_node(init_value)], {"variable_id": variable_id}) diff --git a/src/bindings/python/src/compatibility/ngraph/opset7/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset7/__init__.py deleted file mode 100644 index 2a7d139ba597ff..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset7/__init__.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset1.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset1.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset7.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset5.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset1.ops import interpolate -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset1.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset1.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset1.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset7/ops.py b/src/bindings/python/src/compatibility/ngraph/opset7/ops.py deleted file mode 100644 index d66d8a57e4dec8..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset7/ops.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from functools import partial -from typing import Callable, Iterable, List, Optional, Set, Union - -import numpy as np -from ngraph.impl import Node, Shape -from ngraph.impl.op import Constant, Parameter -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import binary_op, nameable_op, unary_op -from ngraph.utils.input_validation import ( - assert_list_of_ints, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - NumericData, - NumericType, - ScalarData, - TensorShape, - as_node, - as_nodes, - get_dtype, - get_element_type, - get_element_type_str, - make_constant_node, -) - -_get_node_factory_opset7 = partial(_get_node_factory, "opset7") - - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def einsum(inputs: List[Node], equation: str) -> Node: - """Return a node which performs Einsum operation. - - :param inputs: The list of input nodes - :param equation: Einsum equation - :return: The new node performing Einsum operation on the inputs - """ - attributes = {"equation": equation} - - return _get_node_factory_opset7().create("Einsum", as_nodes(*inputs), attributes) - - -@nameable_op -def gelu( - data: Node, - approximation_mode: str, - name: Optional[str] = None, -) -> Node: - """Return a node which performs Gelu activation function. - - :param data: The node with data tensor. - :param approximation_mode: defines which approximation to use ('tanh' or 'erf') - :param name: Optional output node name. - :return: The new node performing a Gelu activation with the input tensor. - """ - inputs = as_nodes(data) - - attributes = {"approximation_mode": approximation_mode} - - return _get_node_factory_opset7().create("Gelu", inputs, attributes) - - -@nameable_op -def roll( - data: NodeInput, - shift: NodeInput, - axes: NodeInput, -) -> Node: - """Return a node which performs Roll operation. - - :param data: The node with data tensor. - :param shift: The node with the tensor with numbers of places by which elements are shifted. - :param axes: The node with the tensor with axes along which elements are shifted. - :return: The new node performing a Roll operation on the input tensor. - """ - inputs = as_nodes(data, shift, axes) - - return _get_node_factory_opset7().create("Roll", inputs) - - -@nameable_op -def gather( - data: NodeInput, - indices: NodeInput, - axis: NodeInput, - batch_dims: Optional[int] = 0, -) -> Node: - """Return a node which performs Gather. - - :param data: N-D tensor with data for gathering - :param indices: N-D tensor with indices by which data is gathered - :param axis: axis along which elements are gathered - :param batch_dims: number of batch dimensions - :return: The new node which performs Gather - """ - inputs = as_nodes(data, indices, axis) - attributes = {"batch_dims": batch_dims} - return _get_node_factory_opset7().create("Gather", inputs, attributes) - - -def dft( - data: NodeInput, - axes: NodeInput, - signal_size: Optional[NodeInput] = None, -) -> Node: - """Return a node which performs DFT operation. - - :param data: Tensor with transformed data. - :param axes: Tensor with axes to transform. - :param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. - :return: The new node which performs DFT operation on the input data tensor. - """ - if signal_size is None: - inputs = as_nodes(data, axes) - else: - inputs = as_nodes(data, axes, signal_size) - - return _get_node_factory_opset7().create("DFT", inputs) - - -@nameable_op -def idft( - data: NodeInput, - axes: NodeInput, - signal_size: Optional[NodeInput] = None, -) -> Node: - """Return a node which performs IDFT operation. - - :param data: Tensor with transformed data. - :param axes: Tensor with axes to transform. - :param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. - :return: The new node which performs IDFT operation on the input data tensor. - """ - if signal_size is None: - inputs = as_nodes(data, axes) - else: - inputs = as_nodes(data, axes, signal_size) - - return _get_node_factory_opset7().create("IDFT", inputs) diff --git a/src/bindings/python/src/compatibility/ngraph/opset8/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset8/__init__.py deleted file mode 100644 index b4bd72cb4b1384..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset8/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset8.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset8.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset8.ops import if_op -from ngraph.opset1.ops import interpolate -from ngraph.opset8.ops import i420_to_bgr -from ngraph.opset8.ops import i420_to_rgb -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset8.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset5.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset8.ops import nv12_to_bgr -from ngraph.opset8.ops import nv12_to_rgb -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset8.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset3.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset8.ops import slice -from ngraph.opset8.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset8/ops.py b/src/bindings/python/src/compatibility/ngraph/opset8/ops.py deleted file mode 100644 index f659f481096699..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset8/ops.py +++ /dev/null @@ -1,772 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from functools import partial -from typing import List, Optional, Tuple - -import numpy as np -from ngraph.exceptions import UserInputError -from ngraph.impl import Node -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import nameable_op -from ngraph.utils.input_validation import ( - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, -) -from ngraph.utils.types import ( - NodeInput, - TensorShape, - as_node, - as_nodes, -) - -_get_node_factory_opset8 = partial(_get_node_factory, "opset8") - - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def deformable_convolution( - data: NodeInput, - offsets: NodeInput, - filters: NodeInput, - strides: List[int], - pads_begin: List[int], - pads_end: List[int], - dilations: List[int], - mask: Optional[NodeInput] = None, - auto_pad: str = "EXPLICIT", - group: int = 1, - deformable_group: int = 1, - bilinear_interpolation_pad: bool = False, - name: Optional[str] = None, -) -> Node: - """Return a node which performs deformable convolution operation. - - :param data: The node providing data batch tensor. - :param offsets: The node providing offset tensor. - :param filters: The node providing filters tensor. - :param strides: The distance (in pixels) to slide the filter on the feature map over the axes. - :param pads_begin: The number of pixels to add to the beginning along each axis. - :param pads_end: The number of pixels to add to the end along each axis. - :param dilations: The distance in width and height between elements (weights) in the filter. - :param mask: The node providing modulation scalar (mask) tensor. - :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. - :param group: The number of groups which both output and input should be split into. - :param deformable_group: The number of groups which deformable values and output should be split - into along the channel axis. - :param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation - execution. - :param name: The optional new name for output node. - :return: New node performing deformable convolution operation. - """ - if mask is None: - inputs = as_nodes(data, offsets, filters) - else: - inputs = as_nodes(data, offsets, filters, mask) - - return _get_node_factory_opset8().create( - "DeformableConvolution", - inputs, - { - "strides": strides, - "pads_begin": pads_begin, - "pads_end": pads_end, - "dilations": dilations, - "auto_pad": auto_pad, - "group": group, - "deformable_group": deformable_group, - "bilinear_interpolation_pad": bilinear_interpolation_pad, - }, - ) - - -@nameable_op -def adaptive_avg_pool(data: NodeInput, output_shape: NodeInput) -> Node: - """Return a node which performs AdaptiveAvgPool operation. - - :param data: The list of input nodes - :param output_shape: the shape of spatial dimentions after operation - :return: The new node performing AdaptiveAvgPool operation on the data - """ - inputs = as_nodes(data, output_shape) - return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs) - - -@nameable_op -def adaptive_max_pool(data: NodeInput, output_shape: NodeInput, index_element_type: str = "i64") -> Node: - """Return a node which performs AdaptiveMaxPool operation. - - :param data: The list of input nodes - :param output_shape: the shape of spatial dimentions after operation - :param index_element_type: Type of indices output. - :return: The new node performing AdaptiveMaxPool operation on the data - """ - inputs = as_nodes(data, output_shape) - - attributes = { - "index_element_type": index_element_type, - } - - return _get_node_factory_opset8().create("AdaptiveMaxPool", inputs, attributes) - - -@nameable_op -def multiclass_nms( - boxes: NodeInput, - scores: NodeInput, - sort_result_type: str = "none", - sort_result_across_batch: bool = False, - output_type: str = "i64", - iou_threshold: float = 0.0, - score_threshold: float = 0.0, - nms_top_k: int = -1, - keep_top_k: int = -1, - background_class: int = -1, - nms_eta: float = 1.0, - normalized: bool = True, -) -> Node: - """Return a node which performs MulticlassNms. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param sort_result_type: Specifies order of output elements, possible values: - 'class': sort selected boxes by class id (ascending) - 'score': sort selected boxes by score (descending) - 'none': do not guarantee the order. - :param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes - across batches or not - :param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32' - :param iou_threshold: Specifies intersection over union threshold - :param score_threshold: Specifies minimum score to consider box for the processing - :param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning - to keep all boxes - :param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 - meaning to keep all boxes - :param background_class: Specifies the background class id, -1 meaning to keep all classes - :param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0] - :param normalized: Specifies whether boxes are normalized or not - :return: The new node which performs MuticlassNms - """ - inputs = as_nodes(boxes, scores) - - attributes = { - "sort_result_type": sort_result_type, - "sort_result_across_batch": sort_result_across_batch, - "output_type": output_type, - "iou_threshold": iou_threshold, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "keep_top_k": keep_top_k, - "background_class": background_class, - "nms_eta": nms_eta, - "normalized": normalized, - } - - return _get_node_factory_opset8().create("MulticlassNms", inputs, attributes) - - -@nameable_op -def matrix_nms( - boxes: NodeInput, - scores: NodeInput, - sort_result_type: str = "none", - sort_result_across_batch: bool = False, - output_type: str = "i64", - score_threshold: float = 0.0, - nms_top_k: int = -1, - keep_top_k: int = -1, - background_class: int = -1, - decay_function: str = "linear", - gaussian_sigma: float = 2.0, - post_threshold: float = 0.0, - normalized: bool = True, -) -> Node: - """Return a node which performs MatrixNms. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param sort_result_type: Specifies order of output elements, possible values: - 'class': sort selected boxes by class id (ascending) - 'score': sort selected boxes by score (descending) - 'none': do not guarantee the order. - :param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes - across batches or not - :param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32' - :param score_threshold: Specifies minimum score to consider box for the processing - :param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning - to keep all boxes - :param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 - meaning to keep all boxes - :param background_class: Specifies the background class id, -1 meaning to keep all classes - :param decay_function: Specifies decay function used to decay scores, possible values: - 'gaussian', 'linear' - :param gaussian_sigma: Specifies gaussian_sigma parameter for gaussian decay_function - :param post_threshold: Specifies threshold to filter out boxes with low confidence score - after decaying - :param normalized: Specifies whether boxes are normalized or not - :return: The new node which performs MatrixNms - """ - inputs = as_nodes(boxes, scores) - - attributes = { - "sort_result_type": sort_result_type, - "sort_result_across_batch": sort_result_across_batch, - "output_type": output_type, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "keep_top_k": keep_top_k, - "background_class": background_class, - "decay_function": decay_function, - "gaussian_sigma": gaussian_sigma, - "post_threshold": post_threshold, - "normalized": normalized, - } - - return _get_node_factory_opset8().create("MatrixNms", inputs, attributes) - - -@nameable_op -def gather( - data: NodeInput, - indices: NodeInput, - axis: NodeInput, - batch_dims: Optional[int] = 0, -) -> Node: - """Return a node which performs Gather with support of negative indices. - - :param data: N-D tensor with data for gathering - :param indices: N-D tensor with indices by which data is gathered. Negative indices - indicate reverse indexing from the end - :param axis: axis along which elements are gathered - :param batch_dims: number of batch dimensions - :return: The new node which performs Gather - """ - inputs = as_nodes(data, indices, axis) - attributes = {"batch_dims": batch_dims} - return _get_node_factory_opset8().create("Gather", inputs, attributes) - - -@nameable_op -def max_pool( - data: NodeInput, - strides: List[int], - dilations: List[int], - pads_begin: List[int], - pads_end: List[int], - kernel_shape: TensorShape, - rounding_type: str = "floor", - auto_pad: Optional[str] = None, - index_element_type: Optional[str] = "i64", - axis: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Perform max pooling operation and return both values and indices of the selected elements. - - :param data: The node providing input data. - :param strides: The distance (in pixels) to slide the filter on the feature map - over the axes. - :param dilations: The dilation of filter elements(distance between elements). - :param pads_begin: The number of pixels to add at the beginning along each axis. - :param pads_end: The number of pixels to add at the end along each axis. - :param kernel_shape: The pooling operation kernel shape. - :param rounding_type: Determines used rounding schema when computing output shape. - Acceptable values are: ['floor', 'ceil']. Defaults to 'floor'. - :param auto_pad: Determines how the padding is calculated. Acceptable values: - [None, 'same_upper', 'same_lower', 'valid']. Defaults to None. - :param index_element_type: The data type used for the indices output of this operator. - Defaults to i64. - :param axis: The first dimension in the data shape used to determine the maximum - returned index value. The value is the product of all dimensions - starting at the provided axis. Defaults to 0. - :param name: The optional name for the created output node. - - :return: The new node performing max pooling operation. - """ - if auto_pad is None: - auto_pad = "explicit" - return _get_node_factory_opset8().create( - "MaxPool", - [as_node(data)], - { - "strides": strides, - "dilations": dilations, - "pads_begin": pads_begin, - "pads_end": pads_end, - "kernel": kernel_shape, - "rounding_type": rounding_type.upper(), - "auto_pad": auto_pad.upper(), - "index_element_type": index_element_type, - "axis": axis, - }, - ) - - -@nameable_op -def random_uniform(output_shape: NodeInput, min_val: NodeInput, max_val: NodeInput, output_type: str, global_seed: int = 0, op_seed: int = 0) -> Node: - """Return a node which generates sequence of random values from uniform distribution. - - :param output_shape: Tensor with shape of the output tensor. - :param min_val: Tensor with the lower bound on the range of random values to generate. - :param max_val: Tensor with the upper bound on the range of random values to generate. - :param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'. - :param global_seed: Specifies global seed value. Required to be a positive integer or 0. - :param op_seed: Specifies operational seed value. Required to be a positive integer or 0. - :return: The new node which performs generation of random values from uniform distribution. - """ - inputs = as_nodes(output_shape, min_val, max_val) - - if global_seed < 0: - raise RuntimeError("global_seed should be positive or 0. Got: {}".format(global_seed)) - - if op_seed < 0: - raise RuntimeError("op_seed should be positive or 0. Got: {}".format(op_seed)) - - attributes = { - "output_type": output_type, - "global_seed": global_seed, - "op_seed": op_seed, - } - return _get_node_factory_opset8().create("RandomUniform", inputs, attributes) - - -@nameable_op -def if_op( - condition: NodeInput, - inputs: List[Node], - bodies: Tuple[GraphBody, GraphBody], - input_desc: Tuple[List[TensorIteratorInvariantInputDesc], List[TensorIteratorInvariantInputDesc]], - output_desc: Tuple[List[TensorIteratorBodyOutputDesc], List[TensorIteratorBodyOutputDesc]], - name: Optional[str] = None, -) -> Node: - """Execute one of the bodies depending on condtion value. - - :param condition: A scalar or 1D tensor with 1 element specifying body will be executed. - If condition is True, then body will be executed, False - else_body. - :param inputs: The provided inputs to If operation. - :param bodies: Two graphs (then_body, else_body) which will be executed depending on - condition value. - :param input_desc Two lists (for then_body and else_body) which contain rules how If - inputs are connected with body parameters. - :param output_desc: Two lists (for then_body and else_body) which contain rules how If - outputs are connected with body results. - :param name: The optional name for the created output node. - - :return: The new node which performs If operation. - """ - attributes = { - "then_body": bodies[0].serialize(), - "else_body": bodies[1].serialize(), - "then_inputs": {"invariant_input_desc": [desc.serialize() for desc in input_desc[0]]}, - "else_inputs": {"invariant_input_desc": [desc.serialize() for desc in input_desc[1]]}, - "then_outputs": {"body_output_desc": [desc.serialize() for desc in output_desc[0]]}, - "else_outputs": {"body_output_desc": [desc.serialize() for desc in output_desc[1]]}, - } - return _get_node_factory_opset8().create("If", as_nodes(condition, *inputs), attributes) - - -@nameable_op -def slice( - data: NodeInput, - start: NodeInput, - stop: NodeInput, - step: NodeInput, - axes: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which generates Slice operation. - - :param data: The node providing input data. - :param start: The node providing start indices (inclusively). - :param stop: The node providing stop indices (exclusively). - :param step: The node providing step values. - :param axes: The optional node providing axes to slice, default [0, 1, ..., len(start)-1]. - :param name: The optional name for the created output node. - :return: The new node performing Slice operation. - """ - if axes is None: - inputs = as_nodes(data, start, stop, step) - else: - inputs = as_nodes(data, start, stop, step, axes) - - return _get_node_factory_opset8().create("Slice", inputs) - - -@nameable_op -def gather_nd( - data: NodeInput, - indices: NodeInput, - batch_dims: Optional[int] = 0, - name: Optional[str] = None, -) -> Node: - """Return a node which performs GatherND. - - :param data: N-D tensor with data for gathering - :param indices: K-D tensor of tuples with indices by which data is gathered - :param batch_dims: Scalar value of batch dimensions - :return: The new node which performs GatherND - """ - inputs = as_nodes(data, indices) - - attributes = {"batch_dims": batch_dims} - - return _get_node_factory_opset8().create("GatherND", inputs, attributes) - - -def prior_box(layer_shape: Node, image_shape: NodeInput, attrs: dict, name: Optional[str] = None) -> Node: - """Generate prior boxes of specified sizes and aspect ratios across all dimensions. - - Available attributes are: - * min_size The minimum box size (in pixels). - Range of values: positive floating point numbers - Default value: [] - Required: no - * max_size The maximum box size (in pixels). - Range of values: positive floating point numbers - Default value: [] - Required: no - * aspect_ratio Aspect ratios of prior boxes. - Range of values: set of positive floating point numbers - Default value: [] - Required: no - * flip The flag that denotes that each aspect_ratio is duplicated and flipped. - Range of values: {True, False} - Default value: False - Required: no - * clip The flag that denotes if each value in the output tensor should be clipped - to [0,1] interval. - Range of values: {True, False} - Default value: False - Required: no - * step The distance between box centers. - Range of values: floating point non-negative number - Default value: 0 - Required: no - * offset This is a shift of box respectively to top left corner. - Range of values: floating point non-negative number - Default value: None - Required: yes - * variance The variance denotes a variance of adjusting bounding boxes. The attribute - could contain 0, 1 or 4 elements. - Range of values: floating point positive numbers - Default value: [] - Required: no - * scale_all_sizes The flag that denotes type of inference. - Range of values: False - max_size is ignored - True - max_size is used - Default value: True - Required: no - * fixed_ratio This is an aspect ratio of a box. - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - * fixed_size This is an initial box size (in pixels). - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - * density This is the square root of the number of boxes of each type. - Range of values: a list of positive floating-point numbers - Default value: None - Required: no - * min_max_aspect_ratios_order The flag that denotes the order of output prior box. - Range of values: False - the output prior box is in order of - [min, aspect_ratios, max] - True - the output prior box is in order of - [min, max, aspect_ratios] - Default value: True - Required: no - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'offset': 85, - } - attrs = { - 'offset': 85, - 'flip': True, - 'clip': True, - 'fixed_size': [32, 64, 128] - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - - :param layer_shape: Shape of layer for which prior boxes are computed. - :param image_shape: Shape of image to which prior boxes are scaled. - :param attrs: The dictionary containing key, value pairs for attributes. - :param name: Optional name for the output node. - :return: Node representing prior box operation. - """ - requirements = [ - ("offset", True, np.floating, is_non_negative_value), - ("min_size", False, np.floating, is_positive_value), - ("max_size", False, np.floating, is_positive_value), - ("aspect_ratio", False, np.floating, is_positive_value), - ("flip", False, np.bool_, None), - ("clip", False, np.bool_, None), - ("step", False, np.floating, is_non_negative_value), - ("variance", False, np.floating, is_positive_value), - ("scale_all_sizes", False, np.bool_, None), - ("fixed_ratio", False, np.floating, is_positive_value), - ("fixed_size", False, np.floating, is_positive_value), - ("density", False, np.floating, is_positive_value), - ("min_max_aspect_ratios_order", False, np.bool_, None), - ] - - check_valid_attributes("PriorBox", attrs, requirements) - - return _get_node_factory_opset8().create("PriorBox", [layer_shape, as_node(image_shape)], attrs) - - -@nameable_op -def i420_to_bgr( - arg: NodeInput, - arg_u: Optional[NodeInput] = None, - arg_v: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs I420toBGR operation. - - :param arg: The node providing single or Y plane data. - :param arg_u: The node providing U plane data. Required for separate planes. - :param arg_v: The node providing V plane data. Required for separate planes. - :param name: The optional name for the created output node. - :return: The new node performing I420toBGR operation. - """ - if arg_u is None and arg_v is None: - inputs = as_nodes(arg) - elif arg_u is not None and arg_v is not None: - inputs = as_nodes(arg, arg_u, arg_v) - else: - raise UserInputError("Operation I420toBGR must have one (single plane) or three (separate planes) inputs provided.") - - return _get_node_factory_opset8().create("I420toBGR", inputs) - - -@nameable_op -def i420_to_rgb( - arg: NodeInput, - arg_u: Optional[NodeInput] = None, - arg_v: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs I420toRGB operation. - - :param arg: The node providing single or Y plane data. - :param arg_u: The node providing U plane data. Required for separate planes. - :param arg_v: The node providing V plane data. Required for separate planes. - :param name: The optional name for the created output node. - :return: The new node performing I420toRGB operation. - """ - if arg_u is None and arg_v is None: - inputs = as_nodes(arg) - elif arg_u is not None and arg_v is not None: - inputs = as_nodes(arg, arg_u, arg_v) - else: - raise UserInputError("Operation I420toRGB must have one (single plane) or three (separate planes) inputs provided.") - - return _get_node_factory_opset8().create("I420toRGB", inputs) - - -@nameable_op -def nv12_to_bgr( - arg: NodeInput, - arg_uv: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs NV12toBGR operation. - - :param arg: The node providing single or Y plane data. - :param arg_uv: The node providing UV plane data. Required for separate planes. - :param name: The optional name for the created output node. - :return: The new node performing NV12toBGR operation. - """ - if arg_uv is None: - inputs = as_nodes(arg) - else: - inputs = as_nodes(arg, arg_uv) - - return _get_node_factory_opset8().create("NV12toBGR", inputs) - - -@nameable_op -def nv12_to_rgb( - arg: NodeInput, - arg_uv: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs NV12toRGB operation. - - :param arg: The node providing single or Y plane data. - :param arg_uv: The node providing UV plane data. Required for separate planes. - :param name: The optional name for the created output node. - :return: The new node performing NV12toRGB operation. - """ - if arg_uv is None: - inputs = as_nodes(arg) - else: - inputs = as_nodes(arg, arg_uv) - - return _get_node_factory_opset8().create("NV12toRGB", inputs) - - -@nameable_op -def detection_output( - box_logits: NodeInput, - class_preds: NodeInput, - proposals: NodeInput, - attrs: dict, - aux_class_preds: Optional[NodeInput] = None, - aux_box_preds: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Generate the detection output using information on location and confidence predictions. - - Available attributes are: - * background_label_id The background label id. - Range of values: integer value - Default value: 0 - Required: no - * top_k Maximum number of results to be kept per batch after NMS step. - Range of values: integer value - Default value: -1 - Required: no - * variance_encoded_in_target The flag that denotes if variance is encoded in target. - Range of values: {False, True} - Default value: False - Required: no - * keep_top_k Maximum number of bounding boxes per batch to be kept after NMS step. - Range of values: integer values - Default value: None - Required: yes - * code_type The type of coding method for bounding boxes. - Range of values: {'caffe.PriorBoxParameter.CENTER_SIZE', - 'caffe.PriorBoxParameter.CORNER'} - Default value: 'caffe.PriorBoxParameter.CORNER' - Required: no - * share_location The flag that denotes if bounding boxes are shared among different - classes. - Range of values: {True, False} - Default value: True - Required: no - * nms_threshold The threshold to be used in the NMS stage. - Range of values: floating point value - Default value: None - Required: yes - * confidence_threshold Specifies the minimum confidence threshold for detection boxes to be - considered. - Range of values: floating point value - Default value: 0 - Required: no - * clip_after_nms The flag that denotes whether to perform clip bounding boxes after - non-maximum suppression or not. - Range of values: {True, False} - Default value: False - Required: no - * clip_before_nms The flag that denotes whether to perform clip bounding boxes before - non-maximum suppression or not. - Range of values: {True, False} - Default value: False - Required: no - * decrease_label_id The flag that denotes how to perform NMS. - Range of values: False - perform NMS like in Caffe*. - True - perform NMS like in MxNet*. - Default value: False - Required: no - * normalized The flag that denotes whether input tensors with boxes are normalized. - Range of values: {True, False} - Default value: False - Required: no - * input_height The input image height. - Range of values: positive integer number - Default value: 1 - Required: no - * input_width The input image width. - Range of values: positive integer number - Default value: 1 - Required: no - * objectness_score The threshold to sort out confidence predictions. - Range of values: non-negative float number - Default value: 0 - Required: no - Example of attribute dictionary: - - .. code-block:: python - - # just required ones - attrs = { - 'keep_top_k': [1, 2, 3], - 'nms_threshold': 0.645, - } - attrs = { - 'keep_top_k': [1, 2, 3], - 'nms_threshold': 0.645, - 'normalized': True, - 'clip_before_nms': True, - 'input_height': [32], - 'input_width': [32], - } - - Optional attributes which are absent from dictionary will be set with corresponding default. - - :param box_logits: The 2D input tensor with box logits. - :param class_preds: The 2D input tensor with class predictions. - :param proposals: The 3D input tensor with proposals. - :param attrs: The dictionary containing key, value pairs for attributes. - :param aux_class_preds: The 2D input tensor with additional class predictions information. - :param aux_box_preds: The 2D input tensor with additional box predictions information. - :param name: Optional name for the output node. - :return: Node representing DetectionOutput operation. - """ - requirements = [ - ("background_label_id", False, np.integer, None), - ("top_k", False, np.integer, None), - ("variance_encoded_in_target", False, np.bool_, None), - ("keep_top_k", True, np.integer, None), - ("code_type", False, np.str_, None), - ("share_location", False, np.bool_, None), - ("nms_threshold", True, np.floating, None), - ("confidence_threshold", False, np.floating, None), - ("clip_after_nms", False, np.bool_, None), - ("clip_before_nms", False, np.bool_, None), - ("decrease_label_id", False, np.bool_, None), - ("normalized", False, np.bool_, None), - ("input_height", False, np.integer, is_positive_value), - ("input_width", False, np.integer, is_positive_value), - ("objectness_score", False, np.floating, is_non_negative_value), - ] - - check_valid_attributes("DetectionOutput", attrs, requirements) - - inputs = [box_logits, class_preds, proposals] - if aux_class_preds is not None: - inputs.append(aux_class_preds) - if aux_box_preds is not None: - inputs.append(aux_box_preds) - inputs = as_nodes(*inputs) - - return _get_node_factory_opset8().create("DetectionOutput", inputs, attrs) - - -@nameable_op -def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: - """Apply softmax operation on each element of input tensor. - - :param data: The tensor providing input data. - :param axis: An axis along which Softmax should be calculated. Can be positive or negative. - :param name: Optional name for the node - :return: The new node with softmax operation applied on each element. - """ - return _get_node_factory_opset8().create("Softmax", [as_node(data)], {"axis": axis}) diff --git a/src/bindings/python/src/compatibility/ngraph/opset9/__init__.py b/src/bindings/python/src/compatibility/ngraph/opset9/__init__.py deleted file mode 100644 index b967e2c6d0d068..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset9/__init__.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.opset1.ops import absolute -from ngraph.opset1.ops import absolute as abs -from ngraph.opset1.ops import acos -from ngraph.opset4.ops import acosh -from ngraph.opset8.ops import adaptive_avg_pool -from ngraph.opset8.ops import adaptive_max_pool -from ngraph.opset1.ops import add -from ngraph.opset1.ops import asin -from ngraph.opset4.ops import asinh -from ngraph.opset3.ops import assign -from ngraph.opset1.ops import atan -from ngraph.opset4.ops import atanh -from ngraph.opset1.ops import avg_pool -from ngraph.opset5.ops import batch_norm_inference -from ngraph.opset2.ops import batch_to_space -from ngraph.opset1.ops import binary_convolution -from ngraph.opset3.ops import broadcast -from ngraph.opset3.ops import bucketize -from ngraph.opset1.ops import ceiling -from ngraph.opset1.ops import ceiling as ceil -from ngraph.opset1.ops import clamp -from ngraph.opset1.ops import concat -from ngraph.opset1.ops import constant -from ngraph.opset1.ops import convert -from ngraph.opset1.ops import convert_like -from ngraph.opset1.ops import convolution -from ngraph.opset1.ops import convolution_backprop_data -from ngraph.opset1.ops import cos -from ngraph.opset1.ops import cosh -from ngraph.opset1.ops import ctc_greedy_decoder -from ngraph.opset6.ops import ctc_greedy_decoder_seq_len -from ngraph.opset4.ops import ctc_loss -from ngraph.opset3.ops import cum_sum -from ngraph.opset3.ops import cum_sum as cumsum -from ngraph.opset8.ops import deformable_convolution -from ngraph.opset1.ops import deformable_psroi_pooling -from ngraph.opset1.ops import depth_to_space -from ngraph.opset8.ops import detection_output -from ngraph.opset7.ops import dft -from ngraph.opset1.ops import divide -from ngraph.opset7.ops import einsum -from ngraph.opset1.ops import elu -from ngraph.opset3.ops import embedding_bag_offsets_sum -from ngraph.opset3.ops import embedding_bag_packed_sum -from ngraph.opset3.ops import embedding_segments_sum -from ngraph.opset3.ops import extract_image_patches -from ngraph.opset1.ops import equal -from ngraph.opset1.ops import erf -from ngraph.opset1.ops import exp -from ngraph.opset9.ops import eye -from ngraph.opset1.ops import fake_quantize -from ngraph.opset1.ops import floor -from ngraph.opset1.ops import floor_mod -from ngraph.opset8.ops import gather -from ngraph.opset6.ops import gather_elements -from ngraph.opset8.ops import gather_nd -from ngraph.opset1.ops import gather_tree -from ngraph.opset7.ops import gelu -from ngraph.opset9.ops import generate_proposals -from ngraph.opset1.ops import greater -from ngraph.opset1.ops import greater_equal -from ngraph.opset9.ops import grid_sample -from ngraph.opset1.ops import grn -from ngraph.opset1.ops import group_convolution -from ngraph.opset1.ops import group_convolution_backprop_data -from ngraph.opset3.ops import gru_cell -from ngraph.opset5.ops import gru_sequence -from ngraph.opset1.ops import hard_sigmoid -from ngraph.opset5.ops import hsigmoid -from ngraph.opset4.ops import hswish -from ngraph.opset7.ops import idft -from ngraph.opset8.ops import if_op -from ngraph.opset1.ops import interpolate -from ngraph.opset9.ops import irdft -from ngraph.opset8.ops import i420_to_bgr -from ngraph.opset8.ops import i420_to_rgb -from ngraph.opset1.ops import less -from ngraph.opset1.ops import less_equal -from ngraph.opset1.ops import log -from ngraph.opset1.ops import logical_and -from ngraph.opset1.ops import logical_not -from ngraph.opset1.ops import logical_or -from ngraph.opset1.ops import logical_xor -from ngraph.opset5.ops import log_softmax -from ngraph.opset5.ops import loop -from ngraph.opset1.ops import lrn -from ngraph.opset4.ops import lstm_cell -from ngraph.opset5.ops import lstm_sequence -from ngraph.opset1.ops import matmul -from ngraph.opset8.ops import matrix_nms -from ngraph.opset8.ops import max_pool -from ngraph.opset1.ops import maximum -from ngraph.opset1.ops import minimum -from ngraph.opset4.ops import mish -from ngraph.opset1.ops import mod -from ngraph.opset9.ops import multiclass_nms -from ngraph.opset1.ops import multiply -from ngraph.opset6.ops import mvn -from ngraph.opset1.ops import negative -from ngraph.opset9.ops import non_max_suppression -from ngraph.opset3.ops import non_zero -from ngraph.opset1.ops import normalize_l2 -from ngraph.opset1.ops import not_equal -from ngraph.opset8.ops import nv12_to_bgr -from ngraph.opset8.ops import nv12_to_rgb -from ngraph.opset1.ops import one_hot -from ngraph.opset1.ops import pad -from ngraph.opset1.ops import parameter -from ngraph.opset1.ops import power -from ngraph.opset1.ops import prelu -from ngraph.opset8.ops import prior_box -from ngraph.opset1.ops import prior_box_clustered -from ngraph.opset1.ops import psroi_pooling -from ngraph.opset4.ops import proposal -from ngraph.opset8.ops import random_uniform -from ngraph.opset1.ops import range -from ngraph.opset9.ops import rdft -from ngraph.opset3.ops import read_value -from ngraph.opset4.ops import reduce_l1 -from ngraph.opset4.ops import reduce_l2 -from ngraph.opset1.ops import reduce_logical_and -from ngraph.opset1.ops import reduce_logical_or -from ngraph.opset1.ops import reduce_max -from ngraph.opset1.ops import reduce_mean -from ngraph.opset1.ops import reduce_min -from ngraph.opset1.ops import reduce_prod -from ngraph.opset1.ops import reduce_sum -from ngraph.opset1.ops import region_yolo -from ngraph.opset2.ops import reorg_yolo -from ngraph.opset1.ops import relu -from ngraph.opset1.ops import reshape -from ngraph.opset1.ops import result -from ngraph.opset1.ops import reverse_sequence -from ngraph.opset3.ops import rnn_cell -from ngraph.opset5.ops import rnn_sequence -from ngraph.opset9.ops import roi_align -from ngraph.opset2.ops import roi_pooling -from ngraph.opset7.ops import roll -from ngraph.opset5.ops import round -from ngraph.opset3.ops import scatter_elements_update -from ngraph.opset3.ops import scatter_update -from ngraph.opset1.ops import select -from ngraph.opset1.ops import selu -from ngraph.opset3.ops import shape_of -from ngraph.opset3.ops import shuffle_channels -from ngraph.opset1.ops import sigmoid -from ngraph.opset1.ops import sign -from ngraph.opset1.ops import sin -from ngraph.opset1.ops import sinh -from ngraph.opset8.ops import slice -from ngraph.opset8.ops import softmax -from ngraph.opset4.ops import softplus -from ngraph.opset9.ops import softsign -from ngraph.opset2.ops import space_to_batch -from ngraph.opset1.ops import space_to_depth -from ngraph.opset1.ops import split -from ngraph.opset1.ops import sqrt -from ngraph.opset1.ops import squared_difference -from ngraph.opset1.ops import squeeze -from ngraph.opset1.ops import strided_slice -from ngraph.opset1.ops import subtract -from ngraph.opset4.ops import swish -from ngraph.opset1.ops import tan -from ngraph.opset1.ops import tanh -from ngraph.opset1.ops import tensor_iterator -from ngraph.opset1.ops import tile -from ngraph.opset3.ops import topk -from ngraph.opset1.ops import transpose -from ngraph.opset1.ops import unsqueeze -from ngraph.opset1.ops import variadic_split diff --git a/src/bindings/python/src/compatibility/ngraph/opset9/ops.py b/src/bindings/python/src/compatibility/ngraph/opset9/ops.py deleted file mode 100644 index 1c744216e9dadb..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset9/ops.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Factory functions for all ngraph ops.""" -from functools import partial -from typing import Optional - -import numpy as np -from ngraph.impl import Node -from ngraph.opset_utils import _get_node_factory -from ngraph.utils.decorators import nameable_op -from ngraph.utils.types import ( - NodeInput, - as_nodes, - as_node, - make_constant_node, -) - - -_get_node_factory_opset9 = partial(_get_node_factory, "opset9") - - -# -------------------------------------------- ops ------------------------------------------------ - - -@nameable_op -def eye( - num_rows: NodeInput, - num_columns: NodeInput, - diagonal_index: NodeInput, - output_type: str, - batch_shape: Optional[NodeInput] = None, - name: Optional[str] = None, -) -> Node: - """Return a node which performs eye operation. - - :param num_rows: The node providing row number tensor. - :param num_columns: The node providing column number tensor. - :param diagonal_index: The node providing the index of the diagonal to be populated. - :param output_type: Specifies the output tensor type, supports any numeric types. - :param batch_shape: The node providing the leading batch dimensions of output shape. Optionally. - :param name: The optional new name for output node. - :return: New node performing deformable convolution operation. - """ - if batch_shape is not None: - inputs = as_nodes(num_rows, num_columns, diagonal_index, batch_shape) - else: - inputs = as_nodes(num_rows, num_columns, diagonal_index) - - return _get_node_factory_opset9().create("Eye", inputs, {"output_type": output_type}) - - -def roi_align( - data: NodeInput, - rois: NodeInput, - batch_indices: NodeInput, - pooled_h: int, - pooled_w: int, - sampling_ratio: int, - spatial_scale: float, - mode: str, - aligned_mode: Optional[str] = "asymmetric", - name: Optional[str] = None, -) -> Node: - """Return a node which performs ROIAlign operation. - - :param data: Input data. - :param rois: RoIs (Regions of Interest) to pool over. - :param batch_indices: Tensor with each element denoting the index of - the corresponding image in the batch. - :param pooled_h: Height of the ROI output feature map. - :param pooled_w: Width of the ROI output feature map. - :param sampling_ratio: Number of bins over height and width to use to calculate - each output feature map element. - :param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. - :param mode: Method to perform pooling to produce output feature map elements. Avaiable modes are: - - 'max' - maximum pooling - - 'avg' - average pooling - :param aligned_mode: Specifies how to transform the coordinate in original tensor to the resized tensor. - Mode 'asymmetric' is the default value. Optional. Avaiable aligned modes are: - - 'asymmetric' - - 'half_pixel_for_nn' - - 'half_pixel' - :param name: The optional name for the output node - - :return: The new node which performs ROIAlign - """ - inputs = as_nodes(data, rois, batch_indices) - attributes = { - "pooled_h": pooled_h, - "pooled_w": pooled_w, - "sampling_ratio": sampling_ratio, - "spatial_scale": spatial_scale, - "mode": mode, - "aligned_mode": aligned_mode, - } - return _get_node_factory_opset9().create("ROIAlign", inputs, attributes) - - -@nameable_op -def non_max_suppression( - boxes: NodeInput, - scores: NodeInput, - max_output_boxes_per_class: Optional[NodeInput] = None, - iou_threshold: Optional[NodeInput] = None, - score_threshold: Optional[NodeInput] = None, - soft_nms_sigma: Optional[NodeInput] = None, - box_encoding: str = "corner", - sort_result_descending: bool = True, - output_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs NonMaxSuppression. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param max_output_boxes_per_class: Tensor Specifying maximum number of boxes - to be selected per class. - :param iou_threshold: Tensor specifying intersection over union threshold - :param score_threshold: Tensor specifying minimum score to consider box for the processing. - :param soft_nms_sigma: Tensor specifying the sigma parameter for Soft-NMS. - :param box_encoding: Format of boxes data encoding. - :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected - boxes across batches or not. - :param output_type: Output element type. - :return: The new node which performs NonMaxSuppression - """ - max_output_boxes_per_class = max_output_boxes_per_class if max_output_boxes_per_class is not None else make_constant_node(0, np.int64) - iou_threshold = iou_threshold if iou_threshold is not None else make_constant_node(0, np.float32) - score_threshold = score_threshold if score_threshold is not None else make_constant_node(0, np.float32) - soft_nms_sigma = soft_nms_sigma if soft_nms_sigma is not None else make_constant_node(0, np.float32) - - inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, soft_nms_sigma) - - attributes = { - "box_encoding": box_encoding, - "sort_result_descending": sort_result_descending, - "output_type": output_type, - } - - return _get_node_factory_opset9().create("NonMaxSuppression", inputs, attributes) - - -def softsign(node: NodeInput, name: Optional[str] = None) -> Node: - """Apply SoftSign operation on the input node element-wise. - - :param node: One of: input node, array or scalar. - :param name: The optional name for the output node. - :return: New node with SoftSign operation applied on each element of it. - """ - return _get_node_factory_opset9().create("SoftSign", [as_node(node)], {}) - - -@nameable_op -def rdft( - data: NodeInput, - axes: NodeInput, - signal_size: Optional[NodeInput] = None, -) -> Node: - """Return a node which performs RDFT operation. - - :param data: Tensor with data. - :param axes: Tensor with axes to transform. - :param signal_size: Optional tensor specifying signal size with respect to axes from the input 'axes'. - :return: The new node which performs RDFT operation on the input data tensor. - """ - if signal_size is None: - inputs = as_nodes(data, axes) - else: - inputs = as_nodes(data, axes, signal_size) - - return _get_node_factory_opset9().create("RDFT", inputs) - - -@nameable_op -def irdft( - data: NodeInput, - axes: NodeInput, - signal_size: Optional[NodeInput] = None, -) -> Node: - """Return a node which performs IRDFT operation. - - :param data: Tensor with data. - :param axes: Tensor with axes to transform. - :param signal_size: Optional tensor specifying signal size with respect to axes from the input 'axes'. - :return: The new node which performs IRDFT operation on the input data tensor. - """ - if signal_size is None: - inputs = as_nodes(data, axes) - else: - inputs = as_nodes(data, axes, signal_size) - - return _get_node_factory_opset9().create("IRDFT", inputs) - - -@nameable_op -def multiclass_nms( - boxes: NodeInput, - scores: NodeInput, - roisnum: Optional[NodeInput] = None, - sort_result_type: Optional[str] = "none", - sort_result_across_batch: Optional[bool] = False, - output_type: Optional[str] = "i64", - iou_threshold: Optional[float] = 0.0, - score_threshold: Optional[float] = 0.0, - nms_top_k: Optional[int] = -1, - keep_top_k: Optional[int] = -1, - background_class: Optional[int] = -1, - nms_eta: Optional[float] = 1.0, - normalized: Optional[bool] = True, -) -> Node: - """Return a node which performs MulticlassNms. - - :param boxes: Tensor with box coordinates. - :param scores: Tensor with box scores. - :param roisnum: Tensor with roisnum. Specifies the number of rois in each image. Required when - 'scores' is a 2-dimensional tensor. - :param sort_result_type: Specifies order of output elements, possible values: - 'class': sort selected boxes by class id (ascending) - 'score': sort selected boxes by score (descending) - 'none': do not guarantee the order. - :param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes - across batches or not - :param output_type: Specifies the output tensor type, possible values: - 'i64', 'i32' - :param iou_threshold: Specifies intersection over union threshold - :param score_threshold: Specifies minimum score to consider box for the processing - :param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning - to keep all boxes - :param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 - meaning to keep all boxes - :param background_class: Specifies the background class id, -1 meaning to keep all classes - :param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0] - :param normalized: Specifies whether boxes are normalized or not - :return: The new node which performs MuticlassNms - """ - if roisnum is None: - inputs = as_nodes(boxes, scores) - else: - inputs = as_nodes(boxes, scores, roisnum) - - attributes = { - "sort_result_type": sort_result_type, - "sort_result_across_batch": sort_result_across_batch, - "output_type": output_type, - "iou_threshold": iou_threshold, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "keep_top_k": keep_top_k, - "background_class": background_class, - "nms_eta": nms_eta, - "normalized": normalized, - } - - return _get_node_factory_opset9().create("MulticlassNms", inputs, attributes) - - -def generate_proposals( - im_info: NodeInput, - anchors: NodeInput, - deltas: NodeInput, - scores: NodeInput, - min_size: float, - nms_threshold: float, - pre_nms_count: int, - post_nms_count: int, - normalized: bool = True, - nms_eta: float = 1.0, - roi_num_type: str = "i64", - name: Optional[str] = None, -) -> Node: - """Return a node which performs GenerateProposals operation. - - :param im_info: Input with image info. - :param anchors: Input anchors. - :param deltas: Input deltas. - :param scores: Input scores. - :param min_size: Specifies minimum box width and height. - :param nms_threshold: Specifies threshold to be used in the NMS stage. - :param pre_nms_count: Specifies number of top-n proposals before NMS. - :param post_nms_count: Specifies number of top-n proposals after NMS. - :param normalized: Specifies whether proposal bboxes are normalized or not. Optional attribute, default value is `True`. - :param nms_eta: Specifies eta parameter for adaptive NMS., must be in range `[0.0, 1.0]`. Optional attribute, default value is `1.0`. - :param roi_num_type: Specifies the element type of the third output `rpnroisnum`. Optional attribute, range of values: `i64` (default) or `i32`. - :param name: The optional name for the output node. - :return: New node performing GenerateProposals operation. - """ - inputs = as_nodes(im_info, anchors, deltas, scores) - - attributes = { - "min_size": min_size, - "nms_threshold": nms_threshold, - "pre_nms_count": pre_nms_count, - "post_nms_count": post_nms_count, - "normalized": normalized, - "nms_eta": nms_eta, - "roi_num_type": roi_num_type, - } - - return _get_node_factory_opset9().create("GenerateProposals", inputs, attributes) - - -def grid_sample(data: NodeInput, grid: NodeInput, attributes: dict, name: Optional[str] = None) -> Node: - """Return a node which performs GridSample operation. - - :param data: The input image. - :param grid: Grid values (normalized input coordinates). - :param attributes: A dictionary containing GridSample's attributes. - :param name: Optional name of the node. - Available attributes: - * align_corners A flag which specifies whether to align the grid extrema values - with the borders or center points of the input tensor's border pixels. - Range of values: true, false - Default value: false - Required: no - * mode Specifies the type of interpolation. - Range of values: bilinear, bicubic, nearest - Default value: bilinear - Required: no - * padding_mode Specifies how the out-of-bounds coordinates should be handled. - Range of values: zeros, border, reflection - Default value: zeros - Required: no - :return: A new GridSample node. - """ - return _get_node_factory_opset9().create("GridSample", as_nodes(data, grid), attributes) diff --git a/src/bindings/python/src/compatibility/ngraph/opset_utils.py b/src/bindings/python/src/compatibility/ngraph/opset_utils.py deleted file mode 100644 index a639dcbe90abd2..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/opset_utils.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Optional -import numpy as np - -from ngraph.impl import Node -from ngraph.utils.decorators import nameable_op -from ngraph.utils.node_factory import NodeFactory -from ngraph.utils.types import ( - as_node, - NodeInput, -) - - -def _get_node_factory(opset_version: Optional[str] = None) -> NodeFactory: - """Return NodeFactory configured to create operators from specified opset version.""" - if opset_version: - return NodeFactory(opset_version) - else: - return NodeFactory() diff --git a/src/bindings/python/src/compatibility/ngraph/utils/__init__.py b/src/bindings/python/src/compatibility/ngraph/utils/__init__.py deleted file mode 100644 index 0375e1394e7a63..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Generic utilities. Factor related functions out to separate files.""" diff --git a/src/bindings/python/src/compatibility/ngraph/utils/broadcasting.py b/src/bindings/python/src/compatibility/ngraph/utils/broadcasting.py deleted file mode 100644 index 7d4bb114d00f0d..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/broadcasting.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging -from typing import List, Optional - -import ngraph as ng -from ngraph.impl import AxisSet, Node -from ngraph.utils.types import NodeInput, TensorShape, get_dtype, make_constant_node - -log = logging.getLogger(__name__) - - -def get_broadcast_axes(output_shape: TensorShape, input_shape: TensorShape, axis: Optional[int] = None) -> AxisSet: - """Generate a list of broadcast axes for ngraph++ broadcast. - - Informally, a broadcast "adds" axes to the input tensor, - replicating elements from the input tensor as needed to fill the new dimensions. - Function calculate which of the output axes are added in this way. - - :param output_shape: The new shape for the output tensor. - :param input_shape: The shape of input tensor. - :param axis: The axis along which we want to replicate elements. - :return: The indices of added axes. - """ - axes_indexes = list(range(0, len(output_shape))) - if axis is None: - output_begin = len(output_shape) - len(input_shape) - else: - output_begin = axis - right_axes_indexes = list(range(output_begin, output_begin + len(input_shape))) - for index in reversed(right_axes_indexes): - del axes_indexes[index] - return AxisSet(set(axes_indexes)) diff --git a/src/bindings/python/src/compatibility/ngraph/utils/decorators.py b/src/bindings/python/src/compatibility/ngraph/utils/decorators.py deleted file mode 100644 index a0b955714a0ba4..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/decorators.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from functools import wraps -from typing import Any, Callable - -from ngraph.impl import Node -from ngraph.utils.types import NodeInput, as_node, as_nodes - - -def _set_node_friendly_name(node: Node, **kwargs: Any) -> Node: - if "name" in kwargs: - node.friendly_name = kwargs["name"] - return node - - -def nameable_op(node_factory_function: Callable) -> Callable: - """Set the name to the ngraph operator returned by the wrapped function.""" - - @wraps(node_factory_function) - def wrapper(*args: Any, **kwargs: Any) -> Node: - node = node_factory_function(*args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) - return node - - return wrapper - - -def unary_op(node_factory_function: Callable) -> Callable: - """Convert the first input value to a Constant Node if a numeric value is detected.""" - - @wraps(node_factory_function) - def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node: - input_node = as_node(input_value) - node = node_factory_function(input_node, *args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) - return node - - return wrapper - - -def binary_op(node_factory_function: Callable) -> Callable: - """Convert the first two input values to Constant Nodes if numeric values are detected.""" - - @wraps(node_factory_function) - def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node: - left, right = as_nodes(left, right) - node = node_factory_function(left, right, *args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) - return node - - return wrapper diff --git a/src/bindings/python/src/compatibility/ngraph/utils/input_validation.py b/src/bindings/python/src/compatibility/ngraph/utils/input_validation.py deleted file mode 100644 index f0e5f61f52fb50..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/input_validation.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Helper functions for validating user input.""" - -import logging -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type - -import numpy as np - -from ngraph.exceptions import UserInputError - -log = logging.getLogger(__name__) - - -def assert_list_of_ints(value_list: Iterable[int], message: str) -> None: - """Verify that the provided value is an iterable of integers.""" - try: - for value in value_list: - if not isinstance(value, int): - raise TypeError - except TypeError: - log.warning(message) - raise UserInputError(message, value_list) - - -def _check_value(op_name, attr_key, value, val_type, cond=None): - # type: (str, str, Any, Type, Optional[Callable[[Any], bool]]) -> bool - """Check whether provided value satisfies specified criteria. - - :param op_name: The operator name which attributes are checked. - :param attr_key: The attribute name. - :param value: The value to check. - :param val_type: Required value type. - :param cond: The optional function running additional checks. - - :raises UserInputError: - :return: True if attribute satisfies all criterias. Otherwise False. - """ - if not np.issubdtype(type(value), val_type): - raise UserInputError('{} operator attribute "{}" value must by of type {}.'.format(op_name, attr_key, val_type)) - if cond is not None and not cond(value): - raise UserInputError('{} operator attribute "{}" value does not satisfy provided condition.'.format(op_name, attr_key)) - return True - - -def check_valid_attribute(op_name, attr_dict, attr_key, val_type, cond=None, required=False): - # type: (str, dict, str, Type, Optional[Callable[[Any], bool]], Optional[bool]) -> bool - """Check whether specified attribute satisfies given criteria. - - :param op_name: The operator name which attributes are checked. - :param attr_dict: Dictionary containing key-value attributes to check. - :param attr_key: Key value for validated attribute. - :param val_type: Value type for validated attribute. - :param cond: Any callable wich accept attribute value and returns True or False. - :param required: Whether provided attribute key is not required. This mean it may be missing - from provided dictionary. - - :raises UserInputError: - - :return: True if attribute satisfies all criterias. Otherwise False. - """ - result = True - - if required and attr_key not in attr_dict: - raise UserInputError('Provided dictionary is missing {} operator required attribute "{}"'.format(op_name, attr_key)) - - if attr_key not in attr_dict: - return result - - attr_value = attr_dict[attr_key] - - if np.isscalar(attr_value): - result = result and _check_value(op_name, attr_key, attr_value, val_type, cond) - else: - for v in attr_value: - result = result and _check_value(op_name, attr_key, v, val_type, cond) - - return result - - -def check_valid_attributes( - op_name, # type: str - attributes, # type: Dict[str, Any] - requirements, # type: List[Tuple[str, bool, Type, Optional[Callable]]] -): - # type: (...) -> bool - """Perform attributes validation according to specified type, value criteria. - - :param op_name: The operator name which attributes are checked. - :param attributes: The dictionary with user provided attributes to check. - :param requirements: The list of tuples describing attributes' requirements. The tuple should - contain following values: - (attr_name: str, - is_required: bool, - value_type: Type, - value_condition: Callable) - - :raises UserInputError: - :return: True if all attributes satisfies criterias. Otherwise False. - """ - for attr, required, val_type, cond in requirements: - check_valid_attribute(op_name, attributes, attr, val_type, cond, required) - return True - - -def is_positive_value(x): # type: (Any) -> bool - """Determine whether the specified x is positive value. - - :param x: The value to check. - - :return: True if the specified x is positive value, False otherwise. - """ - return x > 0 - - -def is_non_negative_value(x): # type: (Any) -> bool - """Determine whether the specified x is non-negative value. - - :param x: The value to check. - - :return: True if the specified x is non-negative value, False otherwise. - """ - return x >= 0 diff --git a/src/bindings/python/src/compatibility/ngraph/utils/node_factory.py b/src/bindings/python/src/compatibility/ngraph/utils/node_factory.py deleted file mode 100644 index 0e3d2cc09cecc2..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/node_factory.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from functools import partial -from typing import Any, Dict, List, Optional, Union - -from _pyngraph import NodeFactory as _NodeFactory - -from ngraph.impl import Node, Output - -from ngraph.exceptions import UserInputError - -DEFAULT_OPSET = "opset11" - - -class NodeFactory(object): - """Factory front-end to create node objects.""" - - def __init__(self, opset_version: str = DEFAULT_OPSET) -> None: - """Create the NodeFactory object. - - :param opset_version: The opset version the factory will use to produce ops from. - """ - self.factory = _NodeFactory(opset_version) - - def create( - self, - op_type_name: str, - arguments: Optional[List[Union[Node, Output]]] = None, - attributes: Optional[Dict[str, Any]] = None, - ) -> Node: - """Create node object from provided description. - - The user does not have to provide all node's attributes, but only required ones. - - :param op_type_name: The operator type name. - :param arguments: The operator arguments. - :param attributes: The operator attributes. - - returns Node object representing requested operator with attributes set. - """ - if arguments is None and attributes is None: - node = self.factory.create(op_type_name) - node._attr_cache = {} - node._attr_cache_valid = False - return node - - if arguments is None and attributes is not None: - raise UserInputError('Error: cannot create "{}" op without arguments.'.format(op_type_name)) - - if attributes is None: - attributes = {} - - assert arguments is not None - - arguments = self._arguments_as_outputs(arguments) - node = self.factory.create(op_type_name, arguments, attributes) - - # Currently we don't support any attribute getters & setters for TensorIterator node. - if node.get_type_name() == "TensorIterator": - return node - - # Set getters and setters for each node's attribute. - # node.get_attribute_name() - # node.set_attribute_name() - # For compound (with more than one level of nesting) attributes of form ie.: - # node.class_member_name.some_metric.attr_name: - # node.get_some_metric_attr_name() - # node.set_some_metric_attr_name() - # Please see test_dyn_attributes.py for more usage examples. - all_attributes = node.get_attributes() - for attr_name in all_attributes.keys(): - setattr( - node, - self._normalize_attr_name_getter(attr_name), - partial(NodeFactory._get_node_attr_value, node, attr_name), - ) - setattr( - node, - self._normalize_attr_name_setter(attr_name), - partial(NodeFactory._set_node_attr_value, node, attr_name), - ) - - # Setup helper members for caching attribute values. - # The cache would be lazily populated at first access attempt. - node._attr_cache = {} - node._attr_cache_valid = False - - return node - - @staticmethod - def _arguments_as_outputs(arguments: List[Union[Node, Output]]) -> List[Output]: - outputs = [] - for argument in arguments: - if issubclass(type(argument), Output): - outputs.append(argument) - else: - log.warning( - "Op arguments were passed as Node, please avoid passing arguments in " - "this manner, and pass Output(s) instead, because accepting Nodes will " - "be deprecated in a future release." - ) - outputs.extend(argument.outputs()) - return outputs - - @staticmethod - def _normalize_attr_name(attr_name: str, prefix: str) -> str: - """Normalize attribute name. - - :param attr_name: The attribute name. - :param prefix: The prefix to attach to attribute name. - - returns The modified attribute name. - """ - # Trim first part of the name if there is only one level of attribute hierarchy. - if attr_name.count(".") == 1: - attr_name = attr_name[attr_name.find(".") + 1:] - return prefix + attr_name.replace(".", "_") - - @classmethod - def _normalize_attr_name_getter(cls, attr_name: str) -> str: - """Normalize atr name to be suitable for getter function name. - - :param attr_name: The attribute name to normalize - - returns The appropriate getter function name. - """ - return cls._normalize_attr_name(attr_name, "get_") - - @classmethod - def _normalize_attr_name_setter(cls, attr_name: str) -> str: - """Normalize attribute name to be suitable for setter function name. - - :param attr_name: The attribute name to normalize - - returns The appropriate setter function name. - """ - return cls._normalize_attr_name(attr_name, "set_") - - @staticmethod - def _get_node_attr_value(node: Node, attr_name: str) -> Any: - """Get provided node attribute value. - - :param node: The node we retrieve attribute value from. - :param attr_name: The attribute name. - - returns The node attribute value. - """ - if not node._attr_cache_valid: - node._attr_cache = node.get_attributes() - node._attr_cache_valid = True - return node._attr_cache[attr_name] - - @staticmethod - def _set_node_attr_value(node: Node, attr_name: str, value: Any) -> None: - """Set the node attribute value. - - :param node: The node we change attribute value for. - :param attr_name: The attribute name. - :param value: The new attribute value. - """ - node.set_attribute(attr_name, value) - node._attr_cache[attr_name] = value diff --git a/src/bindings/python/src/compatibility/ngraph/utils/reduction.py b/src/bindings/python/src/compatibility/ngraph/utils/reduction.py deleted file mode 100644 index 1c1779554c7f15..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/reduction.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Iterable, Optional - -from ngraph.impl import Node - - -def get_reduction_axes(node: Node, reduction_axes: Optional[Iterable[int]]) -> Iterable[int]: - """Get reduction axes if it is None and convert it to set if its type is different. - - If reduction_axes is None we default to reduce all axes. - - :param node: The node we fill reduction axes for. - :param reduction_axes: The collection of indices of axes to reduce. May be None. - :return: Set filled with indices of axes we want to reduce. - """ - if reduction_axes is None: - reduction_axes = set(range(len(node.shape))) - - if type(reduction_axes) is not set: - reduction_axes = set(reduction_axes) - return reduction_axes diff --git a/src/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py b/src/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py deleted file mode 100644 index 0f4650ea6f1279..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Helper classes for aggregating TensorIterator input/output desciptor attributes.""" - -from typing import List - -from ngraph.impl import Node -from ngraph.impl.op import Parameter - - -class GraphBody(object): - """Class containing graph parameters and results.""" - - def __init__( - self, - parameters: List[Parameter], - results: List[Node], - ) -> None: - self.parameters = parameters - self.results = results - - def serialize(self) -> dict: - """Serialize GraphBody as a dictionary.""" - return { - "parameters": self.parameters, - "results": self.results, - } - - -class TensorIteratorInputDesc(object): - """Represents a generic input descriptor for TensorIterator operator.""" - - def __init__( - self, - input_idx: int, - body_parameter_idx: int, - ) -> None: - self.input_idx = input_idx - self.body_parameter_idx = body_parameter_idx - - def serialize(self) -> dict: - """Serialize TensorIteratorInputDesc as a dictionary.""" - return { - "input_idx": self.input_idx, - "body_parameter_idx": self.body_parameter_idx, - } - - -class TensorIteratorSliceInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input formed from slices of TensorIterator input.""" - - def __init__( - self, - input_idx: int, - body_parameter_idx: int, - start: int, - stride: int, - part_size: int, - end: int, - axis: int, - ) -> None: - super().__init__(input_idx, body_parameter_idx) - self.start = start - self.stride = stride - self.part_size = part_size - self.end = end - self.axis = axis - - def serialize(self) -> dict: - """Serialize TensorIteratorSliceInputDesc as a dictionary.""" - output = super().serialize() - output["start"] = self.start - output["stride"] = self.stride - output["part_size"] = self.part_size - output["end"] = self.end - output["axis"] = self.axis - return output - - -class TensorIteratorMergedInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input with initial value in the first iteration. - - Later on, this input value is computed inside graph body. - """ - - def __init__( - self, - input_idx: int, - body_parameter_idx: int, - body_value_idx: int, - ) -> None: - super().__init__(input_idx, body_parameter_idx) - self.body_value_idx = body_value_idx - - def serialize(self) -> dict: - """Serialize TensorIteratorMergedInputDesc as a dictionary.""" - output = super().serialize() - output["body_value_idx"] = self.body_value_idx - return output - - -class TensorIteratorInvariantInputDesc(TensorIteratorInputDesc): - """Represents a TensorIterator graph body input that has invariant value during iteration.""" - - def __init__( - self, - input_idx: int, - body_parameter_idx: int, - ) -> None: - super().__init__(input_idx, body_parameter_idx) - - -class TensorIteratorOutputDesc(object): - """Represents a generic output descriptor for TensorIterator operator.""" - - def __init__( - self, - body_value_idx: int, - output_idx: int, - ) -> None: - self.body_value_idx = body_value_idx - self.output_idx = output_idx - - def serialize(self) -> dict: - """Serialize TensorIteratorOutputDesc as a dictionary.""" - return { - "body_value_idx": self.body_value_idx, - "output_idx": self.output_idx, - } - - -class TensorIteratorBodyOutputDesc(TensorIteratorOutputDesc): - """Represents an output from a specific iteration.""" - - def __init__( - self, - body_value_idx: int, - output_idx: int, - iteration: int = -1, - ) -> None: - super().__init__(body_value_idx, output_idx) - self.iteration = iteration - - def serialize(self) -> dict: - """Serialize TensorIteratorBodyOutputDesc as a dictionary.""" - output = super().serialize() - output["iteration"] = self.iteration - return output - - -class TensorIteratorConcatOutputDesc(TensorIteratorOutputDesc): - """Represents an output produced by concatenation of output from each iteration.""" - - def __init__( - self, - body_value_idx: int, - output_idx: int, - start: int, - stride: int, - part_size: int, - end: int, - axis: int, - ) -> None: - super().__init__(body_value_idx, output_idx) - self.start = start - self.stride = stride - self.part_size = part_size - self.end = end - self.axis = axis - - def serialize(self) -> dict: - """Serialize TensorIteratorConcatOutputDesc as a dictionary.""" - output = super().serialize() - output["start"] = self.start - output["stride"] = self.stride - output["part_size"] = self.part_size - output["end"] = self.end - output["axis"] = self.axis - return output diff --git a/src/bindings/python/src/compatibility/ngraph/utils/types.py b/src/bindings/python/src/compatibility/ngraph/utils/types.py deleted file mode 100644 index 9556fe2ccf04f2..00000000000000 --- a/src/bindings/python/src/compatibility/ngraph/utils/types.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Functions related to converting between Python and numpy types and ngraph types.""" - -import logging -from typing import List, Union, Optional - -import numpy as np - -from ngraph.exceptions import NgraphTypeError -from ngraph.impl import Node, Shape, Output -from ngraph.impl import Type as NgraphType -from ngraph.impl.op import Constant - -log = logging.getLogger(__name__) - -TensorShape = List[int] -NumericData = Union[int, float, np.ndarray] -NumericType = Union[type, np.dtype] -ScalarData = Union[int, float] -NodeInput = Union[Node, NumericData] - -ngraph_to_numpy_types_map = [ - (NgraphType.boolean, bool), - (NgraphType.f16, np.float16), - (NgraphType.f32, np.float32), - (NgraphType.f64, np.float64), - (NgraphType.i8, np.int8), - (NgraphType.i16, np.int16), - (NgraphType.i32, np.int32), - (NgraphType.i64, np.int64), - (NgraphType.u8, np.uint8), - (NgraphType.u16, np.uint16), - (NgraphType.u32, np.uint32), - (NgraphType.u64, np.uint64), - (NgraphType.bf16, np.uint16), -] - -ngraph_to_numpy_types_str_map = [ - ("boolean", bool), - ("f16", np.float16), - ("f32", np.float32), - ("f64", np.float64), - ("i8", np.int8), - ("i16", np.int16), - ("i32", np.int32), - ("i64", np.int64), - ("u8", np.uint8), - ("u16", np.uint16), - ("u32", np.uint32), - ("u64", np.uint64), -] - - -def get_element_type(data_type: NumericType) -> NgraphType: - """Return an ngraph element type for a Python type or numpy.dtype.""" - if data_type is int: - log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.") - return NgraphType.i32 - - if data_type is float: - log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.") - return NgraphType.f32 - - ng_type = next((ng_type for (ng_type, np_type) in ngraph_to_numpy_types_map if np_type == data_type), None) - if ng_type: - return ng_type - - raise NgraphTypeError("Unidentified data type %s", data_type) - - -def get_element_type_str(data_type: NumericType) -> str: - """Return an ngraph element type string representation for a Python type or numpy dtype.""" - if data_type is int: - log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.") - return "i32" - - if data_type is float: - log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.") - return "f32" - - ng_type = next( - (ng_type for (ng_type, np_type) in ngraph_to_numpy_types_str_map if np_type == data_type), - None, - ) - if ng_type: - return ng_type - - raise NgraphTypeError("Unidentified data type %s", data_type) - - -def get_dtype(ngraph_type: NgraphType) -> np.dtype: - """Return a numpy.dtype for an ngraph element type.""" - np_type = next( - (np_type for (ng_type, np_type) in ngraph_to_numpy_types_map if ng_type == ngraph_type), - None, - ) - - if np_type: - return np.dtype(np_type) - - raise NgraphTypeError("Unidentified data type %s", ngraph_type) - - -def get_ndarray(data: NumericData) -> np.ndarray: - """Wrap data into a numpy ndarray.""" - if isinstance(data, np.ndarray): - return data - return np.array(data) - - -def get_shape(data: NumericData) -> TensorShape: - """Return a shape of NumericData.""" - if isinstance(data, np.ndarray): - return data.shape # type: ignore - if isinstance(data, list): - return [len(data)] # type: ignore - return [] - - -def make_constant_node(value: NumericData, dtype: Optional[NumericType] = None) -> Constant: - """Return an ngraph Constant node with the specified value.""" - ndarray = get_ndarray(value) - if dtype: - element_type = get_element_type(dtype) - else: - element_type = get_element_type(ndarray.dtype) - - return Constant(element_type, Shape(ndarray.shape), ndarray.flatten().tolist()) - - -def as_node(input_value: NodeInput) -> Node: - """Return input values as nodes. Scalars will be converted to Constant nodes.""" - if issubclass(type(input_value), Node): - return input_value - if issubclass(type(input_value), Output): - return input_value - return make_constant_node(input_value) - - -def as_nodes(*input_values: NodeInput) -> List[Node]: - """Return input values as nodes. Scalars will be converted to Constant nodes.""" - return [as_node(input_value) for input_value in input_values] diff --git a/src/bindings/python/src/compatibility/openvino/.bandit b/src/bindings/python/src/compatibility/openvino/.bandit deleted file mode 100644 index f7831187e35161..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/.bandit +++ /dev/null @@ -1,2 +0,0 @@ -[bandit] -skips: B101 diff --git a/src/bindings/python/src/compatibility/openvino/CMakeLists.txt b/src/bindings/python/src/compatibility/openvino/CMakeLists.txt deleted file mode 100644 index aa2e7093d41b1b..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/CMakeLists.txt +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# Defines the CMake commands/policies -cmake_minimum_required (VERSION 3.13) - -# Set the project name -project (ie_python_api) - -if(NOT DEFINED OpenVINO_SOURCE_DIR) - find_package(OpenVINODeveloperPackage REQUIRED - PATHS "${InferenceEngineDeveloperPackage_DIR}") -endif() - -# Python API 1.0 will be removed before 2024.0 -ov_disable_deprecated_warnings() - -if(UNIX) - # cython generated files requires public visibility. Force visibility required. - set(CMAKE_CXX_VISIBILITY_PRESET default) - set(CMAKE_C_VISIBILITY_PRESET default) -endif() - -include (cmake/UseCython.cmake) - -if(CYTHON_VERSION VERSION_LESS 0.29) - message(FATAL_ERROR "OpenVINO Python API needs at least Cython version 0.29, found version ${CYTHON_VERSION}") -else() - message(STATUS "Found Cython version ${CYTHON_VERSION}") -endif() - -# Python3_VERSION_MAJOR and Python3_VERSION_MINOR are defined in FindPython3 -set(pyversion python${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}) - -set(PYTHON_COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion}) -if(OV_GENERATOR_MULTI_CONFIG) - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/openvino) -else() - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/openvino) -endif() - -function(ov_python_disable_intel_warnings target) - if(UNIX AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - # 1292: unknown attribute "fallthrough" - target_compile_options(${target} PRIVATE -diag-disable=1292) - endif() -endfunction() - -set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR}) -add_subdirectory(inference_engine) - -if(TARGET _pyngraph) - add_dependencies(ie_api _pyngraph) -endif() - -# install - -ov_cpack_add_component(${PYTHON_COMPONENT} HIDDEN) diff --git a/src/bindings/python/src/compatibility/openvino/__init__.py b/src/bindings/python/src/compatibility/openvino/__init__.py deleted file mode 100644 index b7dc434f3148cc..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -__path__ = __import__("pkgutil").extend_path(__path__, __name__) - -# Required for Windows OS platforms -# Note: always top-level -try: - from openvino.utils import _add_openvino_libs_to_search_path - _add_openvino_libs_to_search_path() -except ImportError: - pass - -# # -# # API 2.0 -# # This __init__.py forces checking of runtime modules to propagate errors. -# # It is not compared with init files from openvino-dev package. -# # -# Import all public modules -from openvino import runtime as runtime -from openvino import frontend as frontend -from openvino import helpers as helpers -from openvino import preprocess as preprocess -from openvino import utils as utils -from openvino.runtime import properties as properties - -# Import most important classes and functions from openvino.runtime -from openvino.runtime import Model -from openvino.runtime import Core -from openvino.runtime import CompiledModel -from openvino.runtime import InferRequest -from openvino.runtime import AsyncInferQueue - -from openvino.runtime import Dimension -from openvino.runtime import Strides -from openvino.runtime import PartialShape -from openvino.runtime import Shape -from openvino.runtime import Layout -from openvino.runtime import Type -from openvino.runtime import Tensor -from openvino.runtime import OVAny - -from openvino.runtime import compile_model -from openvino.runtime import get_batch -from openvino.runtime import set_batch -from openvino.runtime import serialize -from openvino.runtime import shutdown -from openvino.runtime import tensor_from_file -from openvino.runtime import save_model -from openvino.runtime import layout_helpers - -# Set version for openvino package -from openvino.runtime import get_version -__version__ = get_version() - -# Tools -try: - # Model Conversion API - ovc should reside in the main namespace - from openvino.tools.ovc import convert_model -except ImportError: - pass diff --git a/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake b/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake deleted file mode 100644 index 8eeabf849f49c5..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/cmake/CythonConfig.cmake +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# -# -# Following changes were done on top of original file: -# Add CYTHON_EXECUTABLE searching hints at lines 50 and 51 - -#============================================================================= -# Copyright 2011 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= -# Find the Cython compiler. -# -# This code sets the following variables: -# -# CYTHON_EXECUTABLE -# -# See also UseCython.cmake -# Use the Cython executable that lives next to the Python executable -# if it is a local installation. - -function( _find_cython_executable ) - find_host_package(Python3 QUIET COMPONENTS Interpreter) - if( Python3_Interpreter_FOUND ) - get_filename_component( _python_path ${Python3_EXECUTABLE} PATH ) - file(TO_CMAKE_PATH "$ENV{HOME}" ENV_HOME) - find_host_program( CYTHON_EXECUTABLE - NAMES cython cython.exe cython.bat cython3 - HINTS ${_python_path} - ${ENV_HOME}/.local/bin - $ENV{HOMEBREW_OPT}/cython/bin - ${ENV_HOME}/Library/Python/${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}/bin - ${_python_path}/Scripts - ) - else() - find_host_program( CYTHON_EXECUTABLE - NAMES cython cython.bat cython3 - ) - endif() - - set(CYTHON_EXECUTABLE "${CYTHON_EXECUTABLE}" PARENT_SCOPE) -endfunction() - -_find_cython_executable() - -include( FindPackageHandleStandardArgs ) -FIND_PACKAGE_HANDLE_STANDARD_ARGS( Cython REQUIRED_VARS CYTHON_EXECUTABLE ) - -# Find Cython version -execute_process(COMMAND ${CYTHON_EXECUTABLE} -V - ERROR_VARIABLE CYTHON_OUTPUT - OUTPUT_VARIABLE CYTHON_ERROR_MESSAGE - RESULT_VARIABLE CYTHON_EXIT_CODE - OUTPUT_STRIP_TRAILING_WHITESPACE) - -if(CYTHON_EXIT_CODE EQUAL 0) - if(NOT CYTHON_OUTPUT) - set(CYTHON_OUTPUT "${CYTHON_ERROR_MESSAGE}") - endif() - string(REGEX REPLACE "^Cython version ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1" CYTHON_VERSION "${CYTHON_OUTPUT}") -else() - if(${CMAKE_FIND_PACKAGE_NAME}_FIND_QUIETLY) - if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.15) - set(CYTHON_MESSAGE_MODE TRACE) - else() - set(CYTHON_MESSAGE_MODE WARNING) - endif() - endif() - if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED) - set(CYTHON_MESSAGE_MODE FATAL_ERROR) - endif() - message(${CYTHON_MESSAGE_MODE} "Failed to detect cython version: ${CYTHON_ERROR_MESSAGE}") - unset(CYTHON_MESSAGE_MODE) -endif() - -unset(CYTHON_OUTPUT) -unset(CYTHON_EXIT_CODE) -unset(CYTHON_ERROR_MESSAGE) - -mark_as_advanced( CYTHON_EXECUTABLE CYTHON_VERSION ) diff --git a/src/bindings/python/src/compatibility/openvino/cmake/UseCython.cmake b/src/bindings/python/src/compatibility/openvino/cmake/UseCython.cmake deleted file mode 100644 index 03a208f03c233f..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/cmake/UseCython.cmake +++ /dev/null @@ -1,298 +0,0 @@ -# Define a function to create Cython modules. -# -# For more information on the Cython project, see http://cython.org/. -# "Cython is a language that makes writing C extensions for the Python language -# as easy as Python itself." -# -# This file defines a CMake function to build a Cython Python module. -# To use it, first include this file. -# -# include( UseCython ) -# -# Then call cython_add_module to create a module. -# -# cython_add_module( ... ) -# -# To avoid dependence on Python, set the Python3_LIBRARY cache variable to point -# to a static library. If a MAIN_MODULE source is specified, -# the "if __name__ == '__main__':" from that module is used as the C main() method -# for the executable. If MAIN_MODULE, the source with the same basename as -# is assumed to be the MAIN_MODULE. -# -# Where is the name of the resulting Python module and -# ... are source files to be compiled into the module, e.g. *.pyx, -# *.py, *.c, *.cxx, etc. A CMake target is created with name . This can -# be used for target_link_libraries(), etc. -# -# The sample paths set with the CMake include_directories() command will be used -# for include directories to search for *.pxd when running the Cython complire. -# -# Cache variables that effect the behavior include: -# -# CYTHON_ANNOTATE -# CYTHON_NO_DOCSTRINGS -# CYTHON_FLAGS -# -# Source file properties that effect the build process are -# -# CYTHON_IS_CXX -# -# If this is set of a *.pyx file with CMake set_source_files_properties() -# command, the file will be compiled as a C++ file. -# -# See also FindCython.cmake - -# Copyright (C) 2018-2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Following changes were done on top of the original file: -# added PRIVATE linking mode for target_link_libraries call at lines 298 and 336 - -#============================================================================= -# Copyright 2011 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Configuration options. -set( CYTHON_ANNOTATE OFF - CACHE BOOL "Create an annotated .html file when compiling *.pyx." ) -set( CYTHON_NO_DOCSTRINGS OFF - CACHE BOOL "Strip docstrings from the compiled module." ) -set( CYTHON_FLAGS "" CACHE STRING - "Extra flags to the cython compiler." ) -mark_as_advanced( CYTHON_ANNOTATE CYTHON_NO_DOCSTRINGS CYTHON_FLAGS ) - -find_package( Cython REQUIRED - PATHS "${CMAKE_CURRENT_SOURCE_DIR}/cmake" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH ) - -find_package(Python3 REQUIRED COMPONENTS Interpreter ${python3_development_component}) - -set( CYTHON_CXX_EXTENSION "cxx" ) -set( CYTHON_C_EXTENSION "c" ) - -# Create a *.c or *.cxx file from a *.pyx file. -# Input the generated file basename. The generate file will put into the variable -# placed in the "generated_file" argument. Finally all the *.py and *.pyx files. -function( compile_pyx _name generated_file ) - # Default to assuming all files are C. - set( cxx_arg "" ) - set( extension ${CYTHON_C_EXTENSION} ) - set( pyx_lang "C" ) - set( comment "Compiling Cython C source for ${_name}..." ) - - set( cython_include_directories "" ) - set( pxd_dependencies "" ) - set( pxi_dependencies "" ) - set( c_header_dependencies "" ) - set( pyx_locations "" ) - - foreach( pyx_file ${ARGN} ) - get_filename_component( pyx_file_basename "${pyx_file}" NAME_WE ) - - # Determine if it is a C or C++ file. - get_source_file_property( property_is_cxx ${pyx_file} CYTHON_IS_CXX ) - if( ${property_is_cxx} ) - set( cxx_arg "--cplus" ) - set( extension ${CYTHON_CXX_EXTENSION} ) - set( pyx_lang "CXX" ) - set( comment "Compiling Cython CXX source for ${_name}..." ) - endif() - - # Get the include directories. - get_source_file_property( pyx_location ${pyx_file} LOCATION ) - get_filename_component( pyx_path ${pyx_location} PATH ) - get_directory_property( cmake_include_directories DIRECTORY ${pyx_path} INCLUDE_DIRECTORIES ) - list( APPEND cython_include_directories ${cmake_include_directories} ) - list( APPEND pyx_locations "${pyx_location}" ) - - # Determine dependencies. - # Add the pxd file will the same name as the given pyx file. - unset( corresponding_pxd_file CACHE ) - find_file( corresponding_pxd_file ${pyx_file_basename}.pxd - PATHS "${pyx_path}" ${cmake_include_directories} - NO_DEFAULT_PATH ) - if( corresponding_pxd_file ) - list( APPEND pxd_dependencies "${corresponding_pxd_file}" ) - endif() - - # Look for included pxi files - file(STRINGS "${pyx_file}" include_statements REGEX "include +['\"]([^'\"]+).*") - foreach(statement ${include_statements}) - string(REGEX REPLACE "include +['\"]([^'\"]+).*" "\\1" pxi_file "${statement}") - unset(pxi_location CACHE) - find_file(pxi_location ${pxi_file} - PATHS "${pyx_path}" ${cmake_include_directories} NO_DEFAULT_PATH) - if (pxi_location) - list(APPEND pxi_dependencies ${pxi_location}) - get_filename_component( found_pyi_file_basename "${pxi_file}" NAME_WE ) - get_filename_component( found_pyi_path ${pxi_location} PATH ) - unset( found_pyi_pxd_file CACHE ) - find_file( found_pyi_pxd_file ${found_pyi_file_basename}.pxd - PATHS "${found_pyi_path}" ${cmake_include_directories} NO_DEFAULT_PATH ) - if (found_pyi_pxd_file) - list( APPEND pxd_dependencies "${found_pyi_pxd_file}" ) - endif() - endif() - endforeach() # for each include statement found - - # pxd files to check for additional dependencies. - set( pxds_to_check "${pyx_file}" "${pxd_dependencies}" ) - set( pxds_checked "" ) - set( number_pxds_to_check 1 ) - while( ${number_pxds_to_check} GREATER 0 ) - foreach( pxd ${pxds_to_check} ) - list( APPEND pxds_checked "${pxd}" ) - list( REMOVE_ITEM pxds_to_check "${pxd}" ) - - # check for C header dependencies - file( STRINGS "${pxd}" extern_from_statements - REGEX "cdef[ ]+extern[ ]+from.*$" ) - foreach( statement ${extern_from_statements} ) - # Had trouble getting the quote in the regex - string( REGEX REPLACE "cdef[ ]+extern[ ]+from[ ]+[\"]([^\"]+)[\"].*" "\\1" header "${statement}" ) - unset( header_location CACHE ) - find_file( header_location ${header} PATHS ${cmake_include_directories} ) - if( header_location ) - list( FIND c_header_dependencies "${header_location}" header_idx ) - if( ${header_idx} LESS 0 ) - list( APPEND c_header_dependencies "${header_location}" ) - endif() - endif() - endforeach() - - # check for pxd dependencies - - # Look for cimport statements. - set( module_dependencies "" ) - file( STRINGS "${pxd}" cimport_statements REGEX cimport ) - foreach( statement ${cimport_statements} ) - if( ${statement} MATCHES from ) - string( REGEX REPLACE "from[ ]+([^ ]+).*" "\\1" module "${statement}" ) - else() - string( REGEX REPLACE "cimport[ ]+([^ ]+).*" "\\1" module "${statement}" ) - endif() - list( APPEND module_dependencies ${module} ) - endforeach() - list( REMOVE_DUPLICATES module_dependencies ) - # Add the module to the files to check, if appropriate. - foreach( module ${module_dependencies} ) - unset( pxd_location CACHE ) - find_file( pxd_location ${module}.pxd - PATHS "${pyx_path}" ${cmake_include_directories} NO_DEFAULT_PATH ) - if( pxd_location ) - list( FIND pxds_checked ${pxd_location} pxd_idx ) - if( ${pxd_idx} LESS 0 ) - list( FIND pxds_to_check ${pxd_location} pxd_idx ) - if( ${pxd_idx} LESS 0 ) - list( APPEND pxds_to_check ${pxd_location} ) - list( APPEND pxd_dependencies ${pxd_location} ) - endif() # if it is not already going to be checked - endif() # if it has not already been checked - endif() # if pxd file can be found - endforeach() # for each module dependency discovered - endforeach() # for each pxd file to check - list( LENGTH pxds_to_check number_pxds_to_check ) - endwhile() - - - - endforeach() # pyx_file - - # Set additional flags. - if( CYTHON_ANNOTATE ) - set( annotate_arg "--annotate" ) - endif() - - if( CYTHON_NO_DOCSTRINGS ) - set( no_docstrings_arg "--no-docstrings" ) - endif() - - set( cython_debug_arg "$<$,$>:--gdb>" ) - - if( Python3_VERSION_MAJOR EQUAL 3 ) - set( version_arg "-3" ) - else() - set( version_arg ) - endif() - - # Include directory arguments. - list( REMOVE_DUPLICATES cython_include_directories ) - set( include_directory_arg "" ) - foreach( _include_dir ${cython_include_directories} ) - set( include_directory_arg ${include_directory_arg} "-I" "${_include_dir}" ) - endforeach() - - # Determining generated file name. - set( _generated_file "${CMAKE_CURRENT_BINARY_DIR}/${_name}.${extension}" ) - set_source_files_properties( ${_generated_file} PROPERTIES GENERATED TRUE ) - set( ${generated_file} ${_generated_file} PARENT_SCOPE ) - - list( REMOVE_DUPLICATES pxd_dependencies ) - list( REMOVE_DUPLICATES c_header_dependencies ) - - # Add the command to run the compiler. - add_custom_command( OUTPUT ${_generated_file} - COMMAND ${CYTHON_EXECUTABLE} - ARGS ${cxx_arg} ${include_directory_arg} ${version_arg} - ${annotate_arg} ${no_docstrings_arg} ${cython_debug_arg} ${CYTHON_FLAGS} - --output-file ${_generated_file} ${pyx_locations} - DEPENDS ${pyx_locations} ${pxd_dependencies} ${pxi_dependencies} - IMPLICIT_DEPENDS ${pyx_lang} ${c_header_dependencies} - COMMENT ${comment} - ) - - # Remove their visibility to the user. - set( corresponding_pxd_file "" CACHE INTERNAL "" ) - set( header_location "" CACHE INTERNAL "" ) - set( pxd_location "" CACHE INTERNAL "" ) -endfunction() - -# cython_add_module( src1 src2 ... srcN ) -# Build the Cython Python module. -function( cython_add_module _name ) - set( pyx_module_sources "" ) - set( other_module_sources "" ) - foreach( _file ${ARGN} ) - if( ${_file} MATCHES ".*\\.py[x]?$" ) - list( APPEND pyx_module_sources ${_file} ) - else() - list( APPEND other_module_sources ${_file} ) - endif() - endforeach() - compile_pyx( ${_name} generated_file ${pyx_module_sources} ) - python3_add_library ( ${_name} MODULE ${generated_file} ${other_module_sources} ) - # Python3_SOABI is not defined during cross-compilation - if (Python3_SOABI AND NOT PYTHON_MODULE_EXTENSION MATCHES "^\.${Python3_SOABI}.+$") - message(FATAL_ERROR "Python3_SOABI (${Python3_SOABI}) and PYTHON_MODULE_EXTENSION (${PYTHON_MODULE_EXTENSION}) are not matching") - endif() - pybind11_extension( ${_name} ) - if( APPLE ) - set_target_properties( ${_name} PROPERTIES LINK_FLAGS "-undefined dynamic_lookup" ) - else() - target_link_libraries( ${_name} PRIVATE ${Python3_LIBRARIES} ) - endif() -endfunction() diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt b/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt deleted file mode 100644 index dd83bad5f367a0..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(TARGET_NAME "ie_api") - -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine) -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine) -set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine) -set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine) - -file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx - ${CMAKE_CURRENT_SOURCE_DIR}/*.pxd - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) - -file(GLOB PYX_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.pyx) -set_source_files_properties(${PYX_SOURCES} PROPERTIES CYTHON_IS_CXX ON) - -if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # 'argument': conversion from 'size_t' to 'int', possible loss of data - ov_add_compiler_flags(/wd4267) - ov_add_compiler_flags(/wd4244) - ov_add_compiler_flags(/wd4551) -endif() -if(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX) - ov_add_compiler_flags(-Wno-undef) - if(OV_COMPILER_IS_CLANG) - ov_add_compiler_flags(-Wno-parentheses-equality) - endif() -endif() -if(UNUSED_BUT_SET_VARIABLE_SUPPORTED) - ov_add_compiler_flags(-Wno-unused-but-set-variable) -endif() - -# create target - -cython_add_module(${TARGET_NAME} ${SOURCES}) -ov_python_disable_intel_warnings(${TARGET_NAME}) -set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) - -set(INSTALLED_TARGETS ${TARGET_NAME}) -list(REMOVE_ITEM PYX_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx") - -foreach(PYX_FILE IN LISTS PYX_SOURCES) - get_filename_component(PYX_NAME "${PYX_FILE}" NAME_WE) - cython_add_module(${PYX_NAME} ${PYX_FILE}) - ov_python_disable_intel_warnings(${PYX_NAME}) - add_dependencies(${TARGET_NAME} ${PYX_NAME}) - target_include_directories(${PYX_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") - target_link_libraries(${PYX_NAME} PRIVATE openvino::runtime) - list(APPEND INSTALLED_TARGETS ${PYX_NAME}) - ov_python_minimal_api(${PYX_NAME}) - set_target_properties(${PYX_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) -endforeach() - -if(COMMAND ov_add_vs_version_file) - foreach(target IN LISTS INSTALLED_TARGETS) - ov_add_vs_version_file(NAME ${target} - FILEDESCRIPTION "Inference Engine Python library") - endforeach() -endif() - -function(python_ov_disable_deprecated_warnings) - ov_disable_deprecated_warnings() - set(pyx_file "${CMAKE_CURRENT_BINARY_DIR}/ie_api.cxx" "${CMAKE_CURRENT_BINARY_DIR}/constants.cxx") - set_source_files_properties(${pyx_file} PROPERTIES COMPILE_OPTIONS ${ov_c_cxx_deprecated}) -endfunction() - -python_ov_disable_deprecated_warnings() -ov_python_minimal_api(${TARGET_NAME}) - -target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") -target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) - -# Compatibility with python 2.7 which has deprecated "register" specifier -if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - target_compile_options(${TARGET_NAME} PRIVATE "-Wno-error=register") -endif() - -# perform copy -add_custom_command(TARGET ${TARGET_NAME} - POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py -) - -foreach(target IN LISTS INSTALLED_TARGETS) - ov_set_install_rpath(${target} ${OV_CPACK_PYTHONDIR}/openvino/inference_engine ${OV_CPACK_RUNTIMEDIR}) -endforeach() - -# install - -install(TARGETS ${INSTALLED_TARGETS} - RUNTIME DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/inference_engine - COMPONENT ${PYTHON_COMPONENT} ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} - LIBRARY DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/inference_engine - COMPONENT ${PYTHON_COMPONENT} ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - -install(PROGRAMS __init__.py - DESTINATION ${OV_CPACK_PYTHONDIR}/openvino/inference_engine - COMPONENT ${PYTHON_COMPONENT} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - -ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} - EXCLUDE_PATTERNS ".*\\.cxx;.*\\.pxd;.*\\.pyx") diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/__init__.py b/src/bindings/python/src/compatibility/openvino/inference_engine/__init__.py deleted file mode 100644 index b7ece2fcbbd817..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import sys -import warnings - - -if sys.platform == "win32": - # Installer, yum, pip installs openvino dlls to the different directories - # and those paths need to be visible to the openvino modules - # - # If you're using a custom installation of openvino, - # add the location of openvino dlls to your system PATH. - # - # looking for the libs in the pip installation path by default. - openvino_libs = [os.path.join(os.path.dirname(__file__), "..", "..", "openvino", "libs")] - # setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable. - openvino_libs_installer = os.getenv("OPENVINO_LIB_PATHS") - if openvino_libs_installer: - openvino_libs.extend(openvino_libs_installer.split(";")) - for lib in openvino_libs: - lib_path = os.path.join(os.path.dirname(__file__), lib) - if os.path.isdir(lib_path): - # On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH. - if (3, 8) <= sys.version_info: - os.add_dll_directory(os.path.abspath(lib_path)) - else: - os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"] - -from .ie_api import * - -warnings.warn( - message="OpenVINO Inference Engine Python API is deprecated and will be removed in 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html", - category=FutureWarning, - stacklevel=2, -) - -__all__ = ["IENetwork", "TensorDesc", "IECore", "Blob", "PreProcessInfo", "get_version"] -__version__ = get_version() # type: ignore diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/constants.pyx b/src/bindings/python/src/compatibility/openvino/inference_engine/constants.pyx deleted file mode 100644 index d1ef004e86f853..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/constants.pyx +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -#cython: language_level=3 - -from .cimport ie_api_impl_defs as C - -import numpy as np -from enum import Enum - -supported_precisions = ['FP32', 'FP64', 'FP16', 'I64', 'U64', 'I32', 'U32', - 'I16', 'I4', 'I8', 'U16', 'U4', 'U8', 'BOOL', 'BIN', 'BF16'] - -known_plugins = ['CPU', 'GPU', 'HETERO', 'MULTI'] - -layout_int_to_str_map = {0: 'ANY', 1: 'NCHW', 2: 'NHWC', 3: 'NCDHW', 4: 'NDHWC', 64: 'OIHW', 95: 'SCALAR', 96: 'C', - 128: 'CHW', 192: 'HW', 193: 'NC', 194: 'CN', 200: 'BLOCKED'} - -format_map = {'FP32' : np.float32, - 'FP64' : np.float64, - 'FP16' : np.float16, - 'I64' : np.int64, - 'U64' : np.uint64, - 'I32' : np.int32, - 'U32' : np.uint32, - 'I16' : np.int16, - 'U16' : np.uint16, - 'I4' : np.int8, - 'I8' : np.int8, - 'U4' : np.int8, - 'U8' : np.uint8, - 'BOOL' : np.uint8, - 'BIN' : np.int8, - 'BF16' : np.float16, - } - -layout_str_to_enum = {'ANY': C.Layout.ANY, - 'NHWC': C.Layout.NHWC, - 'NCHW': C.Layout.NCHW, - 'NCDHW': C.Layout.NCDHW, - 'NDHWC': C.Layout.NDHWC, - 'OIHW': C.Layout.OIHW, - 'GOIHW': C.Layout.GOIHW, - 'OIDHW': C.Layout.OIDHW, - 'GOIDHW': C.Layout.GOIDHW, - 'SCALAR': C.Layout.SCALAR, - 'C': C.Layout.C, - 'CHW': C.Layout.CHW, - 'HW': C.Layout.HW, - 'NC': C.Layout.NC, - 'CN': C.Layout.CN, - 'BLOCKED': C.Layout.BLOCKED - } - - -class MeanVariant(Enum): - MEAN_IMAGE = 0 - MEAN_VALUE = 1 - NONE = 2 - - -class ResizeAlgorithm(Enum): - NO_RESIZE = 0 - RESIZE_BILINEAR = 1 - RESIZE_AREA = 2 - - -class ColorFormat(Enum): - RAW = 0 - RGB = 1 - BGR = 2 - RGBX = 3 - BGRX = 4 - - -cpdef enum StatusCode: - OK = 0 - GENERAL_ERROR = -1 - NOT_IMPLEMENTED = -2 - NETWORK_NOT_LOADED = -3 - PARAMETER_MISMATCH = -4 - NOT_FOUND = -5 - OUT_OF_BOUNDS = -6 - UNEXPECTED = -7 - REQUEST_BUSY = -8 - RESULT_NOT_READY = -9 - NOT_ALLOCATED = -10 - INFER_NOT_STARTED = -11 - NETWORK_NOT_READ = -12 - - -cpdef enum WaitMode: - RESULT_READY = -1 - STATUS_ONLY = 0 diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pxd b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pxd deleted file mode 100644 index fd884b701800c1..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pxd +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -#cython: language_level=3 - -from .cimport ie_api_impl_defs as C -from .ie_api_impl_defs cimport CBlob, CTensorDesc, InputInfo, CPreProcessChannel, CPreProcessInfo, CExecutableNetwork, CVariableState - -import os - -from libcpp.string cimport string -from libcpp.vector cimport vector -from libcpp cimport bool -from libcpp.memory cimport unique_ptr, shared_ptr - -cdef class Blob: - cdef CBlob.Ptr _ptr - cdef object _is_const - cdef public object _array_data - cdef public object _initial_shape - -cdef class BlobBuffer: - cdef CBlob.Ptr ptr - cdef char*format - cdef vector[Py_ssize_t] shape - cdef vector[Py_ssize_t] strides - cdef reset(self, CBlob.Ptr &, vector[size_t] representation_shape = ?) - cdef char*_get_blob_format(self, const CTensorDesc & desc) - - cdef public: - total_stride, item_size - -cdef class InferRequest: - cdef C.InferRequestWrap *impl - - cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name) - - cpdef infer(self, inputs = ?) - cpdef async_infer(self, inputs = ?) - cpdef wait(self, timeout = ?) - cpdef get_perf_counts(self) - cdef void user_callback(self, int status) with gil - cdef public: - _inputs_list, _outputs_list, _py_callback, _py_data, _user_blobs - -cdef class IENetwork: - cdef C.IENetwork impl - cdef shared_ptr[CExecutableNetwork] _ptr_plugin - -cdef class ExecutableNetwork: - cdef unique_ptr[C.IEExecNetwork] impl - cpdef wait(self, num_requests = ?, timeout = ?) - cpdef get_idle_request_id(self) - cdef public: - _requests, _infer_requests - -cdef class IECore: - cdef C.IECore impl - cpdef IENetwork read_network(self, model : [str, bytes, os.PathLike], - weights : [str, bytes, os.PathLike] = ?, bool init_from_buffer = ?) - cpdef ExecutableNetwork load_network(self, network: [IENetwork, str], - device_name = ?, config = ?, int num_requests = ?) - cpdef ExecutableNetwork import_network(self, str model_file, str device_name, config = ?, int num_requests = ?) - - -cdef class DataPtr: - cdef C.DataPtr _ptr - cdef C.IENetwork * _ptr_network - cdef shared_ptr[CExecutableNetwork] _ptr_plugin - -cdef class CDataPtr: - cdef C.CDataPtr _ptr - cdef shared_ptr[CExecutableNetwork] _ptr_plugin - -cdef class TensorDesc: - cdef C.CTensorDesc impl - -cdef class InputInfoPtr: - cdef InputInfo.Ptr _ptr - cdef C.IENetwork * _ptr_network - -cdef class InputInfoCPtr: - cdef InputInfo.CPtr _ptr - cdef shared_ptr[CExecutableNetwork] _ptr_plugin - -cdef class PreProcessInfo: - cdef CPreProcessInfo* _ptr - cdef const CPreProcessInfo* _cptr - cdef object _user_data - -cdef class PreProcessChannel: - cdef CPreProcessChannel.Ptr _ptr - -cdef class VariableState: - cdef C.CVariableState impl diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pyx b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pyx deleted file mode 100644 index 7dade4aa4d871c..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api.pyx +++ /dev/null @@ -1,1854 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -#distutils: language=c++ -#cython: embedsignature=True -#cython: language_level=3 - -from cython.operator cimport dereference as deref -from libcpp.string cimport string -from libcpp.vector cimport vector -from libcpp cimport bool -from libcpp.map cimport map -from libcpp.memory cimport unique_ptr -from libc.stdlib cimport malloc, free -from libc.stdint cimport int64_t, uint8_t, int8_t, int32_t, uint16_t, int16_t, uint32_t, uint64_t -from libc.stddef cimport size_t -from libc.string cimport memcpy - -import os -from fnmatch import fnmatch -import threading -import warnings -from copy import deepcopy -from collections import namedtuple - -from .cimport ie_api_impl_defs as C -from .ie_api_impl_defs cimport SizeVector, Precision -from .constants import WaitMode, StatusCode, MeanVariant, layout_str_to_enum, format_map, layout_int_to_str_map,\ - known_plugins, supported_precisions, ResizeAlgorithm, ColorFormat - -import numpy as np - -warnings.filterwarnings(action="module", category=DeprecationWarning) - -cdef extern from "" namespace "std" nogil: - cdef unique_ptr[C.IEExecNetwork] move(unique_ptr[C.IEExecNetwork]) - -cdef to_py_string(const string & std_string): - return bytes(std_string).decode() - -cdef dict_to_c_map(py_dict): - cdef map[string, string] c_map - for k, v in py_dict.items(): - if type(k) != str or type(v) != str: - raise TypeError("Only string keys and values are allowed!") - c_map[k.encode()] = v.encode() - return c_map - -cdef c_map_to_dict(map[string, string] c_map): - py_dict = {} - for v in c_map: - py_dict[v.first.decode()] = v.second.decode() - return py_dict - - -def get_version(): - return C.get_version().decode() - - -def read_network(path_to_xml : str, path_to_bin : str): - cdef IENetwork net = IENetwork() - net.impl = C.read_network(path_to_xml.encode(), path_to_bin.encode()) - return net - - -cdef class VariableState: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class manages data for reset operations - """ - - def reset(self): - """ - Reset internal variable state for relevant infer request - to a value specified as default for according ReadValue node - """ - self.impl.reset() - - - @property - def state(self): - """ - Returns the value of the variable state. - """ - blob = Blob() - blob._ptr = self.impl.getState() - blob._is_const = True - return blob - - @state.setter - def state(self, blob : Blob): - self.impl.setState(blob._ptr) - - @property - def name(self): - """ - A string representing a state name - """ - return to_py_string(self.impl.getName()) - - -cdef class TensorDesc: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class defines Tensor description - """ - - def __eq__(self, other : TensorDesc): - return self.layout == other.layout and self.precision == other.precision and self.dims == other.dims - - def __ne__(self, other : TensorDesc): - return self.layout != other.layout or self.precision != other.precision or self.dims != other.dims - - def __deepcopy__(self, memodict={}): - return TensorDesc(deepcopy(self.precision, memodict), deepcopy(self.dims, memodict), deepcopy(self.layout, memodict)) - - - def __cinit__(self, precision : str, dims : [list, tuple], layout : str): - """Class constructor - - :param precision: target memory precision - :param dims: target memory dimensions - :param layout: target memory layout - :return: Instance of defines class - """ - if precision not in supported_precisions: - raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}") - self.impl = C.CTensorDesc(C.Precision.FromStr(precision.encode()), dims, layout_str_to_enum[layout]) - - - @property - def dims(self): - """ - Shape (dimensions) of the :class:`TensorDesc` object - """ - return self.impl.getDims() - - @dims.setter - def dims(self, dims_array : [list, tuple]): - self.impl.setDims(dims_array) - - - @property - def precision(self): - """ - Precision of the :class:`TensorDesc` object - """ - return self.impl.getPrecision().name().decode() - - @precision.setter - def precision(self, precision : str): - if precision not in supported_precisions: - raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}") - self.impl.setPrecision(C.Precision.FromStr(precision.encode())) - - - @property - def layout(self): - """ - Layout of the :class:`TensorDesc` object - """ - return layout_int_to_str_map[self.impl.getLayout()] - - @layout.setter - def layout(self, layout : str): - if layout not in layout_str_to_enum.keys(): - raise ValueError(f"Unsupported layout {layout}! " - f"List of supported layouts: {list(layout_str_to_enum.keys())}") - self.impl.setLayout(layout_str_to_enum[layout]) - - -cdef class Blob: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class represents Blob - """ - - def __cinit__(self, TensorDesc tensor_desc = None, array : np.ndarray = None): - """Class constructor - - :param tensor_desc: :class:`TensorDesc` object describing creating Blob object. - :param array: numpy.ndarray with data to fill blob memory, The array have to have same elements count - as specified in tensor_desc.dims attribute and same elements precision corresponding to - tensor_desc.precision. If array isn't provided empty numpy.ndarray will be created accorsing - to parameters of tensor_desc - :return: Instance of Blob class - """ - cdef CTensorDesc c_tensor_desc - cdef float[::1] fp32_array_memview - cdef double[::1] fp64_array_memview - cdef int16_t[::1] I16_array_memview - cdef uint16_t[::1] U16_array_memview - cdef uint8_t[::1] U8_array_memview - cdef int8_t[::1] I8_array_memview - cdef int32_t[::1] I32_array_memview - cdef int64_t[::1] I64_array_memview - cdef uint32_t[::1] U32_array_memview - cdef uint64_t[::1] U64_array_memview - - self._is_const = False - self._array_data = array - self._initial_shape = array.shape if array is not None else None - - if self._array_data is not None: - if np.isfortran(self._array_data): - self._array_data = self._array_data.ravel(order="F") - else: - self._array_data = self._array_data.ravel(order="C") - if self._array_data is None and tensor_desc is not None: - c_tensor_desc = tensor_desc.impl - precision = tensor_desc.precision - if precision == "FP32": - self._ptr = C.make_shared_blob[float](c_tensor_desc) - elif precision == "FP64": - self._ptr = C.make_shared_blob[double](c_tensor_desc) - elif precision == "FP16" or precision == "I16" or precision == "BF16": - self._ptr = C.make_shared_blob[int16_t](c_tensor_desc) - elif precision == "Q78" or precision == "U16": - self._ptr = C.make_shared_blob[uint16_t](c_tensor_desc) - elif precision == "U8" or precision == "BOOL": - self._ptr = C.make_shared_blob[uint8_t](c_tensor_desc) - elif precision == "I8" or precision == "BIN" or precision == "I4" or precision == "U4": - self._ptr = C.make_shared_blob[int8_t](c_tensor_desc) - elif precision == "I32": - self._ptr = C.make_shared_blob[int32_t](c_tensor_desc) - elif precision == "U32": - self._ptr = C.make_shared_blob[uint32_t](c_tensor_desc) - elif precision == "I64": - self._ptr = C.make_shared_blob[int64_t](c_tensor_desc) - elif precision == "U64": - self._ptr = C.make_shared_blob[uint64_t](c_tensor_desc) - else: - raise AttributeError(f"Unsupported precision {precision} for blob") - deref(self._ptr).allocate() - elif tensor_desc is not None and self._array_data is not None: - c_tensor_desc = tensor_desc.impl - precision = tensor_desc.precision - size_td = C.product(c_tensor_desc.getDims()) - if array.size != size_td: - raise AttributeError(f"Number of elements in provided numpy array {array.size} and " - f"required by TensorDesc {size_td} are not equal") - if self._array_data.dtype != format_map[precision]: - raise ValueError(f"Data type {self._array_data.dtype} of provided numpy array " - f"doesn't match to TensorDesc precision {precision}") - if not self._array_data.flags['C_CONTIGUOUS']: - self._array_data = np.ascontiguousarray(self._array_data) - if precision == "FP32": - fp32_array_memview = self._array_data - self._ptr = C.make_shared_blob[float](c_tensor_desc, &fp32_array_memview[0], fp32_array_memview.shape[0]) - elif precision == "FP64": - fp64_array_memview = self._array_data - self._ptr = C.make_shared_blob[double](c_tensor_desc, &fp64_array_memview[0], fp64_array_memview.shape[0]) - elif precision == "FP16" or precision == "BF16": - I16_array_memview = self._array_data.view(dtype=np.int16) - self._ptr = C.make_shared_blob[int16_t](c_tensor_desc, &I16_array_memview[0], I16_array_memview.shape[0]) - elif precision == "I16": - I16_array_memview = self._array_data - self._ptr = C.make_shared_blob[int16_t](c_tensor_desc, &I16_array_memview[0], I16_array_memview.shape[0]) - elif precision == "Q78" or precision == "U16": - U16_array_memview = self._array_data - self._ptr = C.make_shared_blob[uint16_t](c_tensor_desc, &U16_array_memview[0], U16_array_memview.shape[0]) - elif precision == "U8" or precision == "BOOL": - U8_array_memview = self._array_data - self._ptr = C.make_shared_blob[uint8_t](c_tensor_desc, &U8_array_memview[0], U8_array_memview.shape[0]) - elif precision == "I8" or precision == "BIN" or precision == "I4" or precision == "U4": - I8_array_memview = self._array_data - self._ptr = C.make_shared_blob[int8_t](c_tensor_desc, &I8_array_memview[0], I8_array_memview.shape[0]) - elif precision == "I32": - I32_array_memview = self._array_data - self._ptr = C.make_shared_blob[int32_t](c_tensor_desc, &I32_array_memview[0], I32_array_memview.shape[0]) - elif precision == "U32": - U32_array_memview = self._array_data - self._ptr = C.make_shared_blob[uint32_t](c_tensor_desc, &U32_array_memview[0], U32_array_memview.shape[0]) - elif precision == "I64": - I64_array_memview = self._array_data - self._ptr = C.make_shared_blob[int64_t](c_tensor_desc, &I64_array_memview[0], I64_array_memview.shape[0]) - elif precision == "U64": - U64_array_memview = self._array_data - self._ptr = C.make_shared_blob[uint64_t](c_tensor_desc, &U64_array_memview[0], U64_array_memview.shape[0]) - else: - raise AttributeError(f"Unsupported precision {precision} for blob") - - def __deepcopy__(self, memodict): - res = Blob(deepcopy(self.tensor_desc, memodict), deepcopy(self._array_data, memodict)) - res.buffer[:] = deepcopy(self.buffer[:], memodict) - return res - - @property - def buffer(self): - """ - Blob's memory as :class:`numpy.ndarray` representation - """ - representation_shape = self._initial_shape if self._initial_shape is not None else [] - cdef BlobBuffer buffer = BlobBuffer() - buffer.reset(self._ptr, representation_shape) - return buffer.to_numpy(self._is_const) - - - @property - def tensor_desc(self): - """ - :class:`TensorDesc` of created Blob - """ - cdef CTensorDesc c_tensor_desc = deref(self._ptr).getTensorDesc() - precision = c_tensor_desc.getPrecision().name().decode() - layout = c_tensor_desc.getLayout() - dims = c_tensor_desc.getDims() - tensor_desc = TensorDesc(precision, dims, layout_int_to_str_map[layout]) - return tensor_desc - - def set_shape(self, new_shape): - self._initial_shape = new_shape - deref(self._ptr).setShape(new_shape) - - -## This class represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces. -cdef class IECore: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces. - """ - - def __cinit__(self, xml_config_file: str = ""): - """Class constructor - - :param xml_config_file: A full path to `.xml` file containing plugins configuration. - If the parameter is not specified, the default configuration is handled automatically. - :return: Instance of IECore class - """ - cdef string c_xml_config_file = xml_config_file.encode() - with nogil: - self.impl = C.IECore(c_xml_config_file) - - - def get_versions(self, device_name: str): - """Get a :class:`collections.namedtuple` object with versions of the plugin specified - - :param device_name: Name of the the registered plugin - :return: Dictionary mapping a plugin name and `Versions` :class:`collections.namedtuple` object with the following fields: - - * `major` - major plugin integer version - * `minor` - minor plugin integer version - * `build_number` - plugin build number string - * `description` - plugin description string - """ - cdef map[string, C.Version] versions_ - versions_ = self.impl.getVersions(device_name.encode()) - versions = {} - for v in versions_: - device = v.first.decode() - ver = v.second - versions[device] = namedtuple("Versions", ["major", "minor", "build_number", "description"]) - versions[device].build_number = ver.buildNumber.decode() - versions[device].description = ver.description.decode() - versions[device].minor = ver.apiVersion.minor - versions[device].major = ver.apiVersion.major - return versions - - ## Reads a network from Intermediate Representation (IR) or ONNX formats and creates an `IENetwork`. - # @param model: A `.xml` or `.onnx` model file or string with IR. - # @param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or - # bytes with file content. - # @param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted. - # If `False`, attributes are interpreted as strings with paths to .xml and .bin files - # of IR. If `True`, they are interpreted as Python `bytes` object with .xml and .bin files content. - # @return An `IENetwork` object - # - # Usage example:\n - # ```python - # ie = IECore() - # net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - # ``` - cpdef IENetwork read_network(self, model: [str, bytes, os.PathLike], weights: [str, bytes, os.PathLike] = "", init_from_buffer: bool = False): - """Reads a network from Intermediate Representation (IR) or ONNX formats and creates an :class:`IENetwork`. - - :param model: A `.xml`, `.onnx`or `.prototxt` model file or string with IR. - :param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or - bytes with file content. - :param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted. - If `False`, attributes are interpreted as strings with paths to `.xml` and `.bin` files - of IR. If `True`, they are interpreted as Python `bytes` object with `.xml` and `.bin` files content. - - :return: An :class:`IENetwork` object - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - """ - cdef uint8_t*bin_buffer - cdef string weights_ - cdef string model_ - cdef IENetwork net = IENetwork() - cdef size_t bin_size - if init_from_buffer: - model_ = bytes(model) - bin_buffer = weights - bin_size = len(weights) - with nogil: - net.impl = self.impl.readNetwork(model_, bin_buffer, bin_size) - else: - weights_ = "".encode() - model = os.fspath(model) - if not os.path.isfile(model): - raise Exception(f"Path to the model {model} doesn't exist or it's a directory") - model_ = model.encode() - - if not (fnmatch(model, "*.onnx") or fnmatch(model, "*.prototxt")) and weights: - weights = os.fspath(weights) - if not os.path.isfile(weights): - raise Exception(f"Path to the weights {weights} doesn't exist or it's a directory") - weights_ = weights.encode() - with nogil: - net.impl = self.impl.readNetwork(model_, weights_) - return net - - cpdef ExecutableNetwork load_network(self, network: [IENetwork, str], device_name=None, config=None, int num_requests=1): - """Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name - and creates an :class:`ExecutableNetwork` object of the :class:`IENetwork` class. - You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware - resources). - - :param network: A valid :class:`IENetwork` instance. Model file name .xml, .onnx can also be passed as argument - :param device_name: A device name of a target plugin, if no device_name is set then it will use AUTO device as default. - :param config: A dictionary of plugin configuration keys and their values - :param num_requests: A positive integer value of infer requests to be created. - Number of infer requests is limited by device capabilities. Value `0` indicates that optimal number of infer requests will be created. - - :return: An :class:`ExecutableNetwork` object - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2) - """ - cdef ExecutableNetwork exec_net = ExecutableNetwork() - cdef map[string, string] c_config - cdef string c_device_name - cdef string c_network_path - if num_requests < 0: - raise ValueError(f"Incorrect number of requests specified: {num_requests}. Expected positive integer number " - "or zero for auto detection") - if config: - c_config = dict_to_c_map(config) - if device_name: - c_device_name = device_name.encode() - if isinstance(network, str): - c_network_path = network.encode() - if device_name: - with nogil: - exec_net.impl = move(self.impl.loadNetworkFromFile(c_network_path, c_device_name, c_config, num_requests)) - else: - with nogil: - exec_net.impl = move(self.impl.loadNetworkFromFile(c_network_path, c_config, num_requests)) - else: - if device_name: - with nogil: - exec_net.impl = move(self.impl.loadNetwork((network).impl, c_device_name, c_config, num_requests)) - else: - with nogil: - exec_net.impl = move(self.impl.loadNetwork((network).impl, c_config, num_requests)) - return exec_net - - cpdef ExecutableNetwork import_network(self, str model_file, str device_name, config=None, int num_requests=1): - """Creates an executable network from a previously exported network - - :param device_name: Name of device load executable network on - :param model_file: Full path to the location of the exported file - :param config: A dictionary of plugin configuration keys and their values - :param num_requests: A positive integer value of infer requests to be created. Number of infer requests is limited - by device capabilities. - Value `0` indicates that optimal number of infer requests will be created. - - :return: An :class:`ExecutableNetwork` object - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2) - # export executable network - exec_net.export(path_to_file_to_save) - # import previously exported executable network - exec_net_imported = ie.import_network(model_file=path_to_file_to_save, device_name="CPU") - """ - cdef ExecutableNetwork exec_net = ExecutableNetwork() - cdef map[string, string] c_config - if num_requests < 0: - raise ValueError(f"Incorrect number of requests specified: {num_requests}. Expected positive integer number " - "or zero for auto detection") - if config: - c_config = dict_to_c_map(config) - exec_net.impl = move(self.impl.importNetwork(model_file.encode(), device_name.encode(), c_config, num_requests)) - return exec_net - - - def query_network(self, IENetwork network, str device_name, config=None): - """Queries the plugin with specified device name what network layers are supported in the current configuration. - Please note that layers support depends on plugin configuration and loaded extensions. - - :param network: A valid :class:`IENetwork` instance - :param device_name: A device name of a target plugin - :param config: A dictionary of plugin configuration keys and their values - :return: A dictionary mapping layers and device names on which they are supported - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - layers_map = ie.query_network(network=net, device_name="HETERO:GPU,CPU") - """ - cdef map[string, string] c_config - if config: - c_config = dict_to_c_map(config) - res = self.impl.queryNetwork(network.impl, device_name.encode(), c_config) - return c_map_to_dict(res) - - - def set_config(self, config: dict, device_name: str): - """Sets a configuration for a plugin - - .. note:: When specifying a key value of a config, the "KEY_" prefix is omitted. - - :param config: a dictionary of configuration parameters as keys and their values - :param device_name: a device name of a target plugin - :return: None - - Usage examples:\n - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - ie.set_config(config={"PERF_COUNT": "YES"}, device_name="CPU") - """ - cdef map[string, string] c_config = dict_to_c_map(config) - self.impl.setConfig(c_config, device_name.encode()) - - - def register_plugin(self, plugin_name: str, device_name: str = ""): - """Register a new device and plugin that enables this device inside OpenVINO Runtime. - - :param plugin_name: A path (absolute or relative) or name of a plugin. Depending on platform, - `plugin` is wrapped with shared library suffix and prefix to identify library full name - :param device_name: A target device name for the plugin. If not specified, the method registers - a plugin with the default name. - - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.register_plugin(plugin_name="openvino_intel_cpu_plugin", device_name="MY_NEW_PLUGIN") - """ - self.impl.registerPlugin(plugin_name.encode(), device_name.encode()) - - - def register_plugins(self, xml_config_file: str): - """Registers plugins specified in an `.xml` configuration file - - :param xml_config_file: A full path to `.xml` file containing plugins configuration - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.register_plugins("/localdisk/plugins/my_custom_cfg.xml") - """ - self.impl.registerPlugins(xml_config_file.encode()) - - - def unregister_plugin(self, device_name: str): - """Unregisters a plugin with a specified device name - - :param device_name: A device name of the plugin to unregister - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.unregister_plugin(device_name="GPU") - """ - self.impl.unregisterPlugin(device_name.encode()) - - - def add_extension(self, extension_path: str, device_name: str): - """Loads extension library to the plugin with a specified device name - - :param extension_path: Path to the extensions library file to load to a plugin - :param device_name: A device name of a plugin to load the extensions to - :return: None - - Usage example:\n - - .. code-block:: python - - ie = IECore() - ie.add_extension(extension_path="/some_dir/libcpu_extension_avx2.so", device_name="CPU") - """ - self.impl.addExtension(extension_path.encode(), device_name.encode()) - - def get_metric(self, device_name: str, metric_name: str): - """ - Gets a general runtime metric for dedicated hardware. Enables to request common device properties, - which are :class:`ExecutableNetwork` agnostic, such as device name, temperature, and other devices-specific values. - - :param device_name: A name of a device to get a metric value. - :param metric_name: A metric name to request. - :return: A metric value corresponding to a metric key. - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.get_metric(metric_name="SUPPORTED_METRICS", device_name="CPU") - """ - return self.impl.getMetric(device_name.encode(), metric_name.encode()) - - - def get_config(self, device_name: str, config_name: str): - """Gets a configuration dedicated to device behavior. The method targets to extract information - which can be set via set_config method. - - .. note:: When specifying a key value of a config, the "KEY_" prefix is omitted. - - :param device_name: A name of a device to get a config value. - :param config_name: A config name to request. - :return: A config value corresponding to a config key. - - Usage example: - - .. code-block:: python - - ie = IECore() - ie.get_config(device_name="CPU", config_name="CPU_BIND_THREAD") - """ - return self.impl.getConfig(device_name.encode(), config_name.encode()) - - ## A list of devices. The devices are returned as \[CPU, GPU.0, GPU.1\]. - # If there are more than one device of a specific type, they all are listed followed by a dot and a number. - @property - def available_devices(self): - """ - A list of devices. The devices are returned as \[CPU, GPU.0, GPU.1\]. - If there are more than one device of a specific type, they all are listed followed by a dot and a number. - """ - cdef vector[string] c_devices = self.impl.getAvailableDevices() - return [d.decode() for d in c_devices] - -cdef class PreProcessChannel: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This structure stores info about pre-processing of network inputs (scale, mean image, ...) - """ - - property mean_value: - def __get__(self): - return deref(self._ptr).meanValue - - def __set__(self, float mean_value): - deref(self._ptr).meanValue = mean_value - property std_scale: - def __get__(self): - return deref(self._ptr).stdScale - - def __set__(self, float std_scale): - deref(self._ptr).stdScale = std_scale - property mean_data: - def __get__(self): - blob = Blob() - blob._ptr = deref(self._ptr).meanData - return blob - - def __set__(self, Blob mean_data): - deref(self._ptr).meanData = mean_data._ptr - - -cdef class PreProcessInfo: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class stores pre-process information for the input - """ - - def __cinit__(self): - self._ptr = new CPreProcessInfo() - self._cptr = self._ptr - self._user_data = True - - def __dealloc__(self): - if self._user_data: - del self._ptr - - def __getitem__(self, size_t index): - cdef CPreProcessChannel.Ptr c_channel = deref(self._cptr)[index] - channel = PreProcessChannel() - channel._ptr = c_channel - return channel - - - def get_number_of_channels(self): - """ - Returns a number of channels to preprocess - """ - return deref(self._cptr).getNumberOfChannels() - - - def init(self, const size_t number_of_channels): - """ - Initializes with given number of channels - """ - if not self._ptr: - raise TypeError("Cannot initialized when created from constant") - deref(self._ptr).init(number_of_channels) - - - def set_mean_image(self, Blob mean_image): - """ - Sets mean image values if operation is applicable. - Also sets the mean type to MEAN_IMAGE for all channels - """ - if not self._ptr: - raise TypeError("Cannot set mean image when called from constant") - deref(self._ptr).setMeanImage(mean_image._ptr) - - - def set_mean_image_for_channel(self, Blob mean_image, size_t channel): - """ - Sets mean image values if operation is applicable. - Also sets the mean type to MEAN_IMAGE for a particular channel - """ - if not self._ptr: - raise TypeError("Cannot set mean image for channel when called from constant") - deref(self._ptr).setMeanImageForChannel(mean_image._ptr, channel) - - @property - def mean_variant(self): - """Mean Variant to be applied for input before inference if needed. - - Usage example: - - .. code-block:: python - - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.input_info['data'].preprocess_info.mean_variant = MeanVariant.MEAN_IMAGE - """ - return MeanVariant(deref(self._cptr).getMeanVariant()) - - @mean_variant.setter - def mean_variant(self, variant : MeanVariant): - if not self._ptr: - raise TypeError("Cannot set mean image when called from constant") - deref(self._ptr).setVariant(variant.value) - - - @property - def resize_algorithm(self): - """ - Resize Algorithm to be applied for input before inference if needed. - .. note:: - - It's need to set your input via the set_blob method. - - Usage example: - - .. code-block:: python - - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.input_info['data'].preprocess_info.resize_algorithm = ResizeAlgorithm.RESIZE_BILINEAR - exec_net = ie_core.load_network(net, 'CPU') - tensor_desc = ie.TensorDesc("FP32", [1, 3, image.shape[2], image.shape[3]], "NCHW") - img_blob = ie.Blob(tensor_desc, image) - request = exec_net.requests[0] - request.set_blob('data', img_blob) - request.infer() - """ - return ResizeAlgorithm(deref(self._cptr).getResizeAlgorithm()) - - @resize_algorithm.setter - def resize_algorithm(self, alg : ResizeAlgorithm): - if not self._ptr: - raise TypeError("Cannot set resize algorithm when called from constant") - deref(self._ptr).setResizeAlgorithm(alg.value) - - - @property - def color_format(self): - """ - Color format to be used in on-demand color conversions applied to input before inference - - Usage example: - - .. code-block:: python - - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.input_info['data'].preprocess_info.color_format = ColorFormat.BGR - """ - return ColorFormat(deref(self._cptr).getColorFormat()) - - @color_format.setter - def color_format(self, fmt : ColorFormat): - if not self._ptr: - raise TypeError("Cannot set color format when called from constant") - deref(self._ptr).setColorFormat(fmt.value) - - -cdef class InputInfoPtr: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class contains information about each input of the network - """ - - @property - def name(self): - """ - Name of this input - """ - return deref(self._ptr).name().decode() - - @property - def precision(self): - """ - Precision of this input - """ - return deref(self._ptr).getPrecision().name().decode() - - @precision.setter - def precision(self, precision : str): - if precision not in supported_precisions: - raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}") - deref(self._ptr).setPrecision(C.Precision.FromStr(precision.encode())) - - @property - def layout(self): - """ - Layout of this input - """ - return layout_int_to_str_map[deref(self._ptr).getLayout()] - - @layout.setter - def layout(self, layout : str): - if layout not in layout_str_to_enum.keys(): - raise ValueError(f"Unsupported layout {layout}! " - f"List of supported layouts: {list(layout_str_to_enum.keys())}") - deref(self._ptr).setLayout(layout_str_to_enum[layout]) - - - @property - def preprocess_info(self): - """Gets pre-process info for the input - - Usage example: - - .. code-block:: python - - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.input_info['data'].preprocess_info.color_format = ColorFormat.BGR - """ - cdef CPreProcessInfo* c_preprocess_info = &deref(self._ptr).getPreProcess() - preprocess_info = PreProcessInfo() - del preprocess_info._ptr - preprocess_info._user_data = False - preprocess_info._ptr = c_preprocess_info - preprocess_info._cptr = c_preprocess_info - return preprocess_info - - @property - def tensor_desc(self): - cdef CTensorDesc c_tensor_desc = deref(self._ptr).getTensorDesc() - precision = c_tensor_desc.getPrecision().name().decode() - layout = c_tensor_desc.getLayout() - dims = c_tensor_desc.getDims() - tensor_desc = TensorDesc(precision, dims, layout_int_to_str_map[layout]) - tensor_desc.impl = c_tensor_desc - return tensor_desc - - @property - def input_data(self): - """ - Get access to DataPtr object - """ - cdef C.DataPtr c_data_ptr = deref(self._ptr).getInputData() - data_ptr = DataPtr() - data_ptr._ptr_network = self._ptr_network - data_ptr._ptr = c_data_ptr - return data_ptr - - @input_data.setter - def input_data(self, input_ptr : DataPtr): - deref(self._ptr).setInputData(input_ptr._ptr) - - -cdef class InputInfoCPtr: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class contains const information about each input of the network. - Provides same interface as InputInfoPtr object except properties setters - """ - - @property - def name(self): - """ - Name of this input - """ - return deref(self._ptr).name().decode() - - @property - def precision(self): - """ - Precision of this input - """ - return deref(self._ptr).getPrecision().name().decode() - - @property - def input_data(self): - """ - Get access to DataPtr object - """ - cdef C.DataPtr c_data_ptr = deref(self._ptr).getInputData() - data_ptr = DataPtr() - data_ptr._ptr = c_data_ptr - data_ptr._ptr_plugin = self._ptr_plugin - return data_ptr - - @property - def tensor_desc(self): - """ - tensor_desc of this input - """ - cdef CTensorDesc c_tensor_desc = deref(self._ptr).getTensorDesc() - precision = c_tensor_desc.getPrecision().name().decode() - layout = c_tensor_desc.getLayout() - dims = c_tensor_desc.getDims() - tensor_desc = TensorDesc(precision, dims, layout_int_to_str_map[layout]) - tensor_desc.impl = c_tensor_desc - return tensor_desc - - -cdef class DataPtr: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class is the layer data representation. - """ - - def __init__(self): - """ - Default constructor - """ - self._ptr_network = NULL - - @property - def name(self): - """ - Name of the data object - """ - return deref(self._ptr).getName().decode() - - @property - def precision(self): - """ - Precision of the data object - """ - return deref(self._ptr).getPrecision().name().decode() - - @precision.setter - def precision(self, precision): - if precision not in supported_precisions: - raise ValueError(f"Unsupported precision {precision}! List of supported precisions: {supported_precisions}") - deref(self._ptr).setPrecision(C.Precision.FromStr(precision.encode())) - - @property - def shape(self): - """ - Shape (dimensions) of the data object - """ - return deref(self._ptr).getDims() - - @property - def layout(self): - """ - Layout of the data object - """ - return layout_int_to_str_map[deref(self._ptr).getLayout()] - - @layout.setter - def layout(self, layout): - if layout not in layout_str_to_enum.keys(): - raise ValueError(f"Unsupported layout {layout}! " - f"List of supported layouts: {list(layout_str_to_enum.keys())}") - deref(self._ptr).setLayout(layout_str_to_enum[layout]) - - @property - def initialized(self): - """ - Checks if the current data object is resolved - """ - return deref(self._ptr).isInitialized() - - -cdef class CDataPtr: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class is the layer constant data representation. Provides same interface as DataPtr object except properties setters - """ - - @property - def name(self): - """ - Name of the data object - """ - return deref(self._ptr).getName().decode() - - @property - def precision(self): - """ - Precision of the data object - """ - return deref(self._ptr).getPrecision().name().decode() - - @property - def shape(self): - """ - Shape (dimensions) of the data object - """ - return deref(self._ptr).getDims() - - @property - def layout(self): - """ - Layout of the data object - """ - return layout_int_to_str_map[deref(self._ptr).getLayout()] - - @property - def initialized(self): - """ - Checks if the current data object is resolved - """ - return deref(self._ptr).isInitialized() - - -cdef class ExecutableNetwork: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class represents a network instance loaded to plugin and ready for inference. - """ - - def __init__(self): - """ - There is no explicit class constructor. To make a valid instance of :class:`ExecutableNetwork`, - use :func:`IECore.load_network` method of the :class:`IECore` class. - """ - self._infer_requests = [] - - def infer(self, inputs=None): - """Starts synchronous inference for the first infer request of the executable network and returns output data. - Wraps :func:`InferRequest.infer` method of the :class:`InferRequest` class - - :param inputs: A dictionary that maps input layer names to :class:`numpy.ndarray` objects of proper shape with - input data for the layer - :return: A dictionary that maps output layer names to :class:`numpy.ndarray` objects with output data of the layer - - Usage example: - - .. code-block:: python - - ie_core = IECore() - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2) - res = exec_net.infer({'data': img}) - res - {'prob': array([[[[2.83426580e-08]], - [[2.40166020e-08]], - [[1.29469613e-09]], - [[2.95946148e-08]] - ...... - ]])} - """ - current_request = self.requests[0] - current_request.infer(inputs) - res = {} - for name, value in current_request.output_blobs.items(): - res[name] = deepcopy(value.buffer) - return res - - def start_async(self, request_id, inputs=None): - """ - Starts asynchronous inference for specified infer request. - Wraps :func:`InferRequest.async_infer` method of the :class:`InferRequest` class. - - :param request_id: Index of infer request to start inference - :param inputs: A dictionary that maps input layer names to :class:`numpy.ndarray` objects of proper - shape with input data for the layer - :return: A handler of specified infer request, which is an instance of the :class:`InferRequest` class. - - Usage example: - - .. code-block:: python - - infer_request_handle = exec_net.start_async(request_id=0, inputs={input_blob: image}) - infer_status = infer_request_handle.wait() - res = infer_request_handle.output_blobs[out_blob_name] - """ - if request_id not in list(range(len(self.requests))): - raise ValueError("Incorrect request_id specified!") - current_request = self.requests[request_id] - current_request.async_infer(inputs) - return current_request - - - @property - def requests(self): - """ - A tuple of :class:`InferRequest` instances - """ - cdef int c_infer_requests_size = deref(self.impl).infer_requests.size() - if len(self._infer_requests) == 0: - for i in range(c_infer_requests_size): - infer_request = InferRequest() - infer_request.impl = &(deref(self.impl).infer_requests[i]) - infer_request._inputs_list = list(self.input_info.keys()) - infer_request._outputs_list = list(self.outputs.keys()) - self._infer_requests.append(infer_request) - - if len(self._infer_requests) != c_infer_requests_size: - raise Exception("Mismatch of infer requests number!") - - return self._infer_requests - - @property - def input_info(self): - """ - A dictionary that maps input layer names to InputInfoCPtr objects - """ - cdef map[string, C.InputInfo.CPtr] c_inputs = deref(self.impl).getInputsInfo() - inputs = {} - cdef InputInfoCPtr input_info_ptr - for in_ in c_inputs: - input_info_ptr = InputInfoCPtr() - input_info_ptr._ptr = in_.second - input_info_ptr._ptr_plugin = deref(self.impl).getPluginLink() - inputs[in_.first.decode()] = input_info_ptr - return inputs - - ## A dictionary that maps output layer names to CDataPtr objects - @property - def outputs(self): - """ - A dictionary that maps output layer names to CDataPtr objects - """ - cdef map[string, C.CDataPtr] c_outputs = deref(self.impl).getOutputs() - outputs = {} - cdef CDataPtr data_ptr - for in_ in c_outputs: - data_ptr = CDataPtr() - data_ptr._ptr = in_.second - data_ptr._ptr_plugin = deref(self.impl).getPluginLink() - outputs[in_.first.decode()] = data_ptr - return outputs - - - def get_exec_graph_info(self): - """Gets executable graph information from a device - - :return: An instance of :class:`IENetwork` - - Usage example: - - .. code-block:: python - - ie_core = IECore() - net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie_core.load_network(net, device, num_requests=2) - exec_graph = exec_net.get_exec_graph_info() - """ - ie_network = IENetwork() - ie_network.impl = deref(self.impl).GetExecGraphInfo() - ie_network._ptr_plugin = deref(self.impl).getPluginLink() - return ie_network - - - def get_metric(self, metric_name: str): - """Gets general runtime metric for an executable network. It can be network name, actual device ID on - which executable network is running or all other properties which cannot be changed dynamically. - - :param metric_name: A metric name to request. - :return: A metric value corresponding to a metric key. - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(net, "CPU") - exec_net.get_metric("NETWORK_NAME") - """ - return deref(self.impl).getMetric(metric_name.encode()) - - - def get_config(self, config_name: str): - """Gets configuration for current executable network. The method is responsible to extract information - which affects executable network execution - - :param config_name: A configuration parameter name to request. - :return: A configuration value corresponding to a configuration key. - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(net, "CPU") - config = exec_net.get_config("CPU_BIND_THREAD") - """ - return deref(self.impl).getConfig(config_name.encode()) - - ## Sets configuration for current executable network. - # - # @param config: a dictionary of configuration parameters as keys and their values - # @return None - # - # Usage example:\n - # ```python - # ie = IECore() - # net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - # exec_net = ie.load_network(net, "GNA") - # config = exec_net.set_config({"DEVICE_MODE" : "GNA_SW_EXACT"}) - # ``` - def set_config(self, config: dict): - cdef map[string, string] c_config = dict_to_c_map(config) - deref(self.impl).setConfig(c_config) - - ## Exports the current executable network. - # @param model_file Full path to the target exported file location - # @return None - # - # ```python - # ie = IECore() - # net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - # exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2) - # exec_net.export(path_to_file_to_save) - # ``` - def export(self, model_file: str): - """Exports the current executable network. - - :param model_file: Full path to the target exported file location - :return: None - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2) - exec_net.export(path_to_file_to_save) - """ - deref(self.impl).exportNetwork(model_file.encode()) - - cpdef wait(self, num_requests=None, timeout=None): - """Waits when the result from any request becomes available. Blocks until specified timeout elapses or the result. - - :param num_requests: Number of idle requests for which wait. - If not specified, `num_requests` value is set to number of requests by default. - :param timeout: Time to wait in milliseconds or special (0, -1) cases described above. - If not specified, `timeout` value is set to -1 by default. - :return: Request status code: `OK` or `RESULT_NOT_READY` - """ - cdef int status_code - cdef int64_t c_timeout - cdef int c_num_requests - if num_requests is None: - num_requests = len(self.requests) - c_num_requests = num_requests - if timeout is None: - timeout = WaitMode.RESULT_READY - c_timeout = timeout - with nogil: - status_code = deref(self.impl).wait(c_num_requests, c_timeout) - return status_code - - - cpdef get_idle_request_id(self): - """ - Get idle request ID - - :return: Request index - """ - return deref(self.impl).getIdleRequestId() - -ctypedef extern void (*cb_type)(void*, int) with gil - - -cdef class InferRequest: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - This class provides an interface to infer requests of :class:`ExecutableNetwork` and serves - to handle infer requests execution and to set and get output data. - """ - - def __init__(self): - """ - There is no explicit class constructor. To make a valid :class:`InferRequest` instance, use :func:`IECore.load_network` - method of the :class:`IECore` class with specified number of requests to get :class:`ExecutableNetwork` instance - which stores infer requests. - """ - self._user_blobs = {} - self._inputs_list = [] - self._outputs_list = [] - self._py_callback = lambda *args, **kwargs: None - self._py_data = None - - cdef void user_callback(self, int status) with gil: - if self._py_callback: - self._py_callback(status, self._py_data) - - def set_completion_callback(self, py_callback, py_data = None): - """Description: Sets a callback function that is called on success or failure of an asynchronous request - - :param py_callback: Any defined or lambda function - :param py_data: Data that is passed to the callback function - :return: None - - Usage example: - - .. code-block:: python - - callback = lambda status, py_data: print(f"Request with id {py_data} finished with status {status}") - ie = IECore() - net = ie.read_network(model="./model.xml", weights="./model.bin") - exec_net = ie.load_network(net, "CPU", num_requests=4) - for id, req in enumerate(exec_net.requests): - req.set_completion_callback(py_callback=callback, py_data=id) - - for req in exec_net.requests: - req.async_infer({"data": img}) - """ - self._py_callback = py_callback - self._py_data = py_data - deref(self.impl).setCyCallback( self.user_callback, self) - - cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name): - cdef BlobBuffer buffer = BlobBuffer() - cdef CBlob.Ptr blob_ptr - blob_ptr = deref(self.impl).getBlobPtr(blob_name) - buffer.reset(blob_ptr) - return buffer - - - @property - def input_blobs(self): - """ - Dictionary that maps input layer names to corresponding Blobs - """ - input_blobs = {} - for input in self._inputs_list: - # TODO: will not work for setting data via .inputs['data'][:] - if input in self._user_blobs: - input_blobs[input] = self._user_blobs[input] - else: - blob = Blob() - blob._ptr = deref(self.impl).getBlobPtr(input.encode()) - input_blobs[input] = blob - return input_blobs - - @property - def output_blobs(self): - """ - Dictionary that maps output layer names to corresponding Blobs - """ - output_blobs = {} - for output in self._outputs_list: - blob = Blob() - blob._ptr = deref(self.impl).getBlobPtr(output.encode()) - output_blobs[output] = deepcopy(blob) - return output_blobs - - @property - def preprocess_info(self): - """ - Dictionary that maps input layer names to corresponding preprocessing information - """ - preprocess_info = {} - for input_blob in self.input_blobs.keys(): - preprocess = PreProcessInfo() - del preprocess._ptr - preprocess._user_data = False - preprocess._ptr = NULL - preprocess._cptr = &deref(self.impl).getPreProcess(input_blob.encode()) - preprocess_info[input_blob] = preprocess - return preprocess_info - - def query_state(self): - """Gets state control interface for given infer request - State control essential for recurrent networks - :return: A vector of Memory State objects - """ - cdef vector[C.CVariableState] c_mem_state_vec = deref(self.impl).queryState() - mem_state_vec = [] - for ms in c_mem_state_vec: - state = VariableState() - state.impl = ms - mem_state_vec.append(state) - return mem_state_vec - - def set_blob(self, blob_name : str, blob : Blob): - """Sets user defined Blob for the infer request - - :param blob_name: A name of input blob - :param blob: Blob object to set for the infer request - :param preprocess_info: PreProcessInfo object to set for the infer request. - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - net = IENetwork("./model.xml", "./model.bin") - exec_net = ie.load_network(net, "CPU", num_requests=2) - td = TensorDesc("FP32", (1, 3, 224, 224), "NCHW") - blob_data = np.ones(shape=(1, 3, 224, 224), dtype=np.float32) - blob = Blob(td, blob_data) - exec_net.requests[0].set_blob(blob_name="input_blob_name", blob=blob), - """ - deref(self.impl).setBlob(blob_name.encode(), blob._ptr) - self._user_blobs[blob_name] = blob - - cpdef infer(self, inputs=None): - """Starts synchronous inference of the infer request and fill outputs array - - :param inputs: A dictionary that maps input layer names to :class:`numpy.ndarray` objects of proper shape with - input data for the layer - :return: None - - Usage example: - - .. code-block:: python - - exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2) - exec_net.requests[0].infer({input_blob: image}) - res = exec_net.requests[0].output_blobs['prob'] - np.flip(np.sort(np.squeeze(res)),0) - - # array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01, - # 5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03, - # 2.26027006e-03, 2.12283316e-03 ...]) - """ - if inputs is not None: - self._fill_inputs(inputs) - deref(self.impl).infer() - - cpdef async_infer(self, inputs=None): - """Starts asynchronous inference of the infer request and fill outputs array - - :param inputs: A dictionary that maps input layer names to :class:`numpy.ndarray` objects - of proper shape with input data for the layer - :return: None - - Usage example: - - .. code-block:: python - - exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2) - exec_net.requests[0].async_infer({input_blob: image}) - request_status = exec_net.requests[0].wait() - res = exec_net.requests[0].output_blobs['prob'] - """ - if inputs is not None: - self._fill_inputs(inputs) - deref(self.impl).infer_async() - - cpdef wait(self, timeout=None): - """Waits for the result to become available. Blocks until specified timeout elapses or the result - becomes available, whichever comes first. - - :param timeout: Time to wait in milliseconds or special (0, -1) cases described above. - If not specified, `timeout` value is set to -1 by default. - :return: Request status code. - - .. note:: - - There are special values of the timeout parameter: - - * 0 - Immediately returns the inference status. It does not block or interrupt execution. - To find statuses meaning, please refer to :ref:`enum_InferenceEngine_StatusCode` in Inference Engine C++ documentation - * -1 - Waits until inference result becomes available (default value) - - Usage example: See :func:`InferRequest.async_infer` method of the the :class:`InferRequest` class. - """ - cdef int status - cdef int64_t c_timeout - if timeout is None: - timeout = WaitMode.RESULT_READY - c_timeout = timeout - with nogil: - status = deref(self.impl).wait(c_timeout) - return status - - - cpdef get_perf_counts(self): - """Queries performance measures per layer to get feedback of what is the most time consuming layer. - - .. note:: Performance counters data and format depends on the plugin - - :return: Dictionary containing per-layer execution information. - - Usage example: - - .. code-block:: python - - exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2) - exec_net.requests[0].infer({input_blob: image}) - exec_net.requests[0].get_perf_counts() - # {'Conv2D': {'exec_type': 'jit_avx2_1x1', - # 'real_time': 154, - # 'cpu_time': 154, - # 'status': 'EXECUTED', - # 'layer_type': 'Convolution'}, - # 'Relu6': {'exec_type': 'undef', - # 'real_time': 0, - # 'cpu_time': 0, - # 'status': 'NOT_RUN', - # 'layer_type': 'Clamp'} - # ... - # } - """ - cdef map[string, C.ProfileInfo] c_profile = deref(self.impl).getPerformanceCounts() - profile = {} - for line in c_profile: - info = line.second - # TODO: add execution index. Check if unsigned int is properly converted to int in python. - profile[line.first.decode()] = {"status": info.status.decode(), "exec_type": info.exec_type.decode(), - "layer_type": info.layer_type.decode(), "real_time": info.real_time, - "cpu_time": info.cpu_time, "execution_index": info.execution_index} - return profile - - ## Current infer request inference time in milliseconds - @property - def latency(self): - """ - Current infer request inference time in milliseconds - """ - return self.impl.exec_time - - - def _fill_inputs(self, inputs): - for k, v in inputs.items(): - assert k in self._inputs_list, f"No input with name {k} found in network" - if self.input_blobs[k].tensor_desc.precision == "FP16": - self.input_blobs[k].buffer[:] = v.view(dtype=np.int16) - else: - self.input_blobs[k].buffer[:] = v - - -cdef class IENetwork: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - """ - ## Class constructor - # - # @param model: A PyCapsule containing smart pointer to nGraph function. - # - # @return Instance of IENetwork class - # - # Usage example:\n - # Initializing `IENetwork` object from IR files: - # ```python - # func = Function([relu], [param], 'test') - # caps = Function.to_capsule(func) - # net = IENetwork(caps) - # ``` - def __cinit__(self, model = None): - # Try to create Inference Engine network from capsule - if model is not None: - self.impl = C.IENetwork(model) - else: - with nogil: - self.impl = C.IENetwork() - - @property - def name(self): - """ - Name of the loaded network - """ - name = bytes(self.impl.name) - return name.decode() - - @property - def input_info(self): - """ - A dictionary that maps input layer names to InputInfoPtr objects. - """ - cdef map[string, C.InputInfo.Ptr] c_inputs = self.impl.getInputsInfo() - inputs = {} - cdef InputInfoPtr input_info_ptr - for input in c_inputs: - input_info_ptr = InputInfoPtr() - input_info_ptr._ptr = input.second - input_info_ptr._ptr_network = &self.impl - inputs[input.first.decode()] = input_info_ptr - return inputs - - ## A dictionary that maps output layer names to DataPtr objects - @property - def outputs(self): - """ - A dictionary that maps output layer names to DataPtr objects - """ - cdef map[string, C.DataPtr] c_outputs = self.impl.getOutputs() - outputs = {} - cdef DataPtr data_ptr - for output in c_outputs: - data_ptr = DataPtr() - data_ptr._ptr_network = &self.impl - data_ptr._ptr = output.second - outputs[output.first.decode()] = data_ptr - return outputs - - - @property - def batch_size(self): - """Batch size of the network. Provides getter and setter interfaces to get and modify the - network batch size. For example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - print(net.batch_size) - net.batch_size = 4 - print(net.batch_size) - print(net.input_info['data'].input_data.shape) - """ - return self.impl.getBatch() - - @batch_size.setter - def batch_size(self, batch: int): - if batch <= 0: - raise AttributeError(f"Invalid batch size {batch}! Batch size should be positive integer value") - self.impl.setBatch(batch) - - def add_outputs(self, outputs): - """Marks any intermediate layer as output layer to retrieve the inference results from the specified layers. - - :param outputs: List of layers to be set as model outputs. The list can contain strings with layer names to be set - as outputs or tuples with layer name as first element and output port id as second element. - In case of setting one layer as output, string or tuple with one layer can be provided. - - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - net.add_outputs(["conv5_1', conv2_1', (split_2, 1)])] - """ - if not isinstance(outputs, list): - outputs = [outputs] - for i, line in enumerate(outputs): - if isinstance(line, str): - self.impl.addOutput(line.encode(), 0) - elif isinstance(line, tuple) and len(line) == 2: - self.impl.addOutput(line[0].encode(), line[1]) - else: - raise TypeError(f"Incorrect type {type(line)} for layer to add at index {i}. " - "Expected string with layer name or tuple with two elements: layer name as " - "first element and port id as second") - - def serialize(self, path_to_xml, path_to_bin: str = ""): - """Serializes the network and stores it in files. - - :param path_to_xml: Path to a file, where a serialized model will be stored - :param path_to_bin: Path to a file, where serialized weights will be stored - :return: None - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml, weights=path_to_bin) - net.serialize(path_to_xml, path_to_bin) - """ - self.impl.serialize(path_to_xml.encode(), path_to_bin.encode()) - - def reshape(self, input_shapes: dict): - """Reshapes the network to change spatial dimensions, batch size, or any dimension. - - :param input_shapes: A dictionary that maps input layer names to tuples with the target shape - :return: None - - .. note:: - - Before using this method, make sure that the target shape is applicable for the network. - Changing the network shape to an arbitrary value may lead to unpredictable behaviour. - - Usage example: - - .. code-block:: python - - ie = IECore() - net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file) - input_layer = next(iter(net.input_info)) - n, c, h, w = net.input_info[input_layer].input_data.shape - net.reshape({input_layer: (n, c, h*2, w*2)}) - """ - cdef map[string, vector[size_t]] c_input_shapes - cdef vector[size_t] c_shape - net_inputs = self.input_info - for input, shape in input_shapes.items(): - c_shape = [] - if input not in net_inputs: - raise AttributeError(f"Specified '{input}' layer not in network inputs '{net_inputs}'! ") - for v in shape: - try: - c_shape.push_back(v) - except OverflowError: - raise ValueError(f"Detected dynamic dimension in the shape {shape} of the `{input}` input. Dynamic shapes are supported since OpenVINO Runtime API 2022.1.") - - c_input_shapes[input.encode()] = c_shape - self.impl.reshape(c_input_shapes) - - def _get_function_capsule(self): - return self.impl.getFunction() - - def get_ov_name_for_tensor(self, orig_name: str): - name = bytes(orig_name, 'utf-8') - return self.impl.getOVNameForTensor(name).decode('utf-8') - -cdef class BlobBuffer: - """ - OpenVINO Inference Engine Python API is deprecated and will be removed in the 2024.0 release. For instructions on - transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html - - Copy-less accessor for Inference Engine Blob - """ - - cdef reset(self, CBlob.Ptr & ptr, vector[size_t] representation_shape = []): - self.ptr = ptr - cdef CTensorDesc desc = deref(ptr).getTensorDesc() - cdef SizeVector shape - if len(representation_shape) == 0: - shape = desc.getDims() - if layout_int_to_str_map[desc.getLayout()] == 'SCALAR': - shape = [1] - else: - shape = representation_shape - cdef Py_ssize_t itemsize = deref(ptr).element_size() - self.strides.resize(shape.size()) - self.shape.resize(shape.size()) - - total_stride = itemsize - # dims are in row major (C - style), - # thence strides are computed starting from latest dimension - for i in reversed(range(shape.size())): - self.strides[i] = total_stride - self.shape[i] = shape[i] - total_stride *= shape[i] - - self.total_stride = total_stride - self.format = self._get_blob_format(desc) - self.item_size = itemsize - - def __getbuffer__(self, Py_buffer *buffer, int flags): - buffer.buf = C.get_buffer[char](deref(self.ptr)) - buffer.format = self.format - buffer.internal = NULL - buffer.itemsize = self.item_size - buffer.len = self.total_stride - buffer.ndim = self.shape.size() - buffer.obj = self - buffer.readonly = 0 - buffer.shape = self.shape.data() - buffer.strides = self.strides.data() - buffer.suboffsets = NULL - - cdef char*_get_blob_format(self, const CTensorDesc & desc): - cdef Precision precision = desc.getPrecision() - name = bytes(precision.name()).decode() - # todo: half floats - precision_to_format = { - 'FP32': 'f', # float - 'FP64': 'd', # double - 'FP16': 'h', # signed short - 'U8': 'B', # unsigned char - 'U16': 'H', # unsigned short - 'I8': 'b', # signed char - 'I16': 'h', # signed short - 'I32': 'i', # signed int - 'U32': 'I', # unsigned int - 'I64': 'q', # signed long int - 'U64': 'Q', # unsigned long int - 'BOOL': 'B', # unsigned char - 'BF16': 'h', # signed short - 'BIN': 'b', # signed char - } - if name not in precision_to_format: - raise ValueError(f"Unknown Blob precision: {name}") - - return precision_to_format[name].encode() - - def to_numpy(self, is_const= False): - precision = deref(self.ptr).getTensorDesc().getPrecision() - name = bytes(precision.name()).decode() - arr = np.asarray(self) - if is_const: - arr.flags.writeable = False - if name == "FP16": - return arr.view(dtype=np.float16) - else: - return arr diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.cpp b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.cpp deleted file mode 100644 index 6f8a94b8854824..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.cpp +++ /dev/null @@ -1,680 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ie_api_impl.hpp" - -#include "ie_plugin_config.hpp" -#include "openvino/op/util/framework_node.hpp" - -const std::string EXPORTED_NETWORK_NAME = "undefined"; -std::map precision_map = {{"FP32", InferenceEngine::Precision::FP32}, - {"FP64", InferenceEngine::Precision::FP64}, - {"FP16", InferenceEngine::Precision::FP16}, - {"I8", InferenceEngine::Precision::I8}, - {"I16", InferenceEngine::Precision::I16}, - {"I32", InferenceEngine::Precision::I32}, - {"I64", InferenceEngine::Precision::I64}, - {"U8", InferenceEngine::Precision::U8}, - {"U16", InferenceEngine::Precision::U16}, - {"U32", InferenceEngine::Precision::U32}, - {"U64", InferenceEngine::Precision::U64}}; - -std::map layout_map = {{"ANY", InferenceEngine::Layout::ANY}, - {"NCHW", InferenceEngine::Layout::NCHW}, - {"NHWC", InferenceEngine::Layout::NHWC}, - {"OIHW", InferenceEngine::Layout::OIHW}, - {"C", InferenceEngine::Layout::C}, - {"CHW", InferenceEngine::Layout::CHW}, - {"HW", InferenceEngine::Layout::HW}, - {"NC", InferenceEngine::Layout::NC}, - {"CN", InferenceEngine::Layout::CN}, - {"NCDHW", InferenceEngine::Layout::NCDHW}, - {"BLOCKED", InferenceEngine::Layout::BLOCKED}}; -#define stringify(name) #name -#define IE_CHECK_CALL(expr) \ - { \ - auto ret = (expr); \ - if (ret != InferenceEngine::StatusCode::OK) { \ - IE_THROW() << response.msg; \ - } \ - } - -static uint32_t getOptimalNumberOfRequests(const InferenceEngine::ExecutableNetwork& actual) { - try { - auto parameter_value = actual.GetMetric(METRIC_KEY(SUPPORTED_METRICS)); - auto supported_metrics = parameter_value.as>(); - const std::string key = METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS); - if (std::find(supported_metrics.begin(), supported_metrics.end(), key) != supported_metrics.end()) { - parameter_value = actual.GetMetric(key); - if (parameter_value.is()) - return parameter_value.as(); - else - IE_THROW() << "Unsupported format for " << key << "!" - << " Please specify number of infer requests directly!"; - } else { - IE_THROW() << "Can't load network: " << key << " is not supported!" - << " Please specify number of infer requests directly!"; - } - } catch (const std::exception& ex) { - IE_THROW() << "Can't load network: " << ex.what() << " Please specify number of infer requests directly!"; - } -} - -static PyObject* parse_parameter(const InferenceEngine::Parameter& param) { - // Check for std::string - if (param.is()) { - return PyUnicode_FromString(param.as().c_str()); - } - // Check for int - else if (param.is()) { - auto val = param.as(); - return PyLong_FromLong((long)val); - } - // Check for unsigned int - else if (param.is()) { - auto val = param.as(); - return PyLong_FromLong((unsigned long)val); - } - // Check for uint64_t - else if (param.is()) { - auto val = param.as(); - return PyLong_FromLong((unsigned long)val); - } - // Check for float - else if (param.is()) { - auto val = param.as(); - return PyFloat_FromDouble((double)val); - } - // Check for bool - else if (param.is()) { - auto val = param.as(); - return val ? Py_True : Py_False; - } - // Check for std::vector - else if (param.is>()) { - auto val = param.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyObject* str_val = PyUnicode_InternFromString(it.c_str()); - PyList_Append(list, str_val); - } - return list; - } - // Check for std::vector - else if (param.is>()) { - auto val = param.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyLong_FromLong(it)); - } - return list; - } - // Check for std::vector - else if (param.is>()) { - auto val = param.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyLong_FromLong(it)); - } - return list; - } - // Check for std::vector - else if (param.is>()) { - auto val = param.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyFloat_FromDouble((double)it)); - } - return list; - } - // Check for std::tuple - else if (param.is>()) { - auto val = param.as>(); - PyObject* tuple = PyTuple_New(2); - PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); - PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); - return tuple; - } - // Check for std::tuple - else if (param.is>()) { - auto val = param.as>(); - PyObject* tuple = PyTuple_New(3); - PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); - PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); - PyTuple_SetItem(tuple, 2, PyLong_FromUnsignedLong((unsigned long)std::get<2>(val))); - return tuple; - } - // Check for std::map - else if (param.is>()) { - auto val = param.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - PyDict_SetItemString(dict, it.first.c_str(), PyUnicode_FromString(it.second.c_str())); - } - return dict; - } - // Check for std::map - else if (param.is>()) { - auto val = param.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second)); - } - return dict; - } else if (param.is>()) { - auto val = param.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - std::stringstream s; - s << it.first; - PyDict_SetItemString(dict, s.str().c_str(), PyFloat_FromDouble((double)it.second)); - } - return dict; - } else if (param.is()) { - auto val = param.as(); - using namespace InferenceEngine; - std::stringstream s; - s << val; - return PyUnicode_FromString(s.str().c_str()); - } else { - PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!"); - return (PyObject*)NULL; - } -} - -/* FrameworkNodeExtension is a temporary extension that is needed to enable FrameworkNode usage - * in IRReader for all unknown opsets and operations. To have a connection between Extension and - * IRReader we register extensions with specific version equal to "framework_node_ext" which - * triggers FrameworkNode usage - */ -class FrameworkNodeExtension : public InferenceEngine::IExtension { -public: - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override { - static InferenceEngine::Version ExtensionDescription = {{1, 0}, "1.0", "framework_node_ext"}; - - versionInfo = &ExtensionDescription; - } - - std::map getOpSets() override { - std::map opsets; - ngraph::OpSet opset; - opset.insert(); - opsets["util"] = opset; - return opsets; - } - - void Unload() noexcept override {} -}; - -InferenceEnginePython::IENetwork InferenceEnginePython::read_network(std::string path_to_xml, std::string path_to_bin) { - InferenceEngine::Core core; - core.AddExtension(std::make_shared()); - auto net = core.ReadNetwork(path_to_xml, path_to_bin); - return InferenceEnginePython::IENetwork(std::make_shared(net)); -} - -InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr& cnn_network) - : actual(cnn_network) { - if (actual == nullptr) - IE_THROW() << "IENetwork was not initialized."; - name = actual->getName(); - batch_size = actual->getBatchSize(); -} - -InferenceEnginePython::IENetwork::IENetwork(PyObject* network) { - auto* capsule_ptr = PyCapsule_GetPointer(network, "ngraph_function"); - auto* function_sp = static_cast*>(capsule_ptr); - if (function_sp == nullptr) - IE_THROW() << "Cannot create CNNNetwork from capsule! Capsule doesn't " - "contain nGraph function!"; - - InferenceEngine::CNNNetwork cnnNetwork(*function_sp); - actual = std::make_shared(cnnNetwork); - name = actual->getName(); - batch_size = actual->getBatchSize(); -} - -void InferenceEnginePython::IENetwork::serialize(const std::string& path_to_xml, const std::string& path_to_bin) { - actual->serialize(path_to_xml, path_to_bin); -} - -PyObject* InferenceEnginePython::IENetwork::getFunction() { - const char* py_capsule_name = "ngraph_function"; - auto ngraph_func_ptr = actual->getFunction(); - // create a shared pointer on the heap before putting it in the capsule - // this secures the lifetime of the object transferred by the capsule - auto* sp_copy = new std::shared_ptr(ngraph_func_ptr); - - // a destructor callback that will delete the heap allocated shared_ptr - // when the capsule is destructed - auto sp_deleter = [](PyObject* capsule) { - auto* capsule_ptr = PyCapsule_GetPointer(capsule, "ngraph_function"); - auto* function_sp = static_cast*>(capsule_ptr); - if (function_sp) { - delete function_sp; - } - }; - if (ngraph_func_ptr) { - // return PyCapsule_New(&ngraph_func_ptr, py_capsule_name, NULL); - return PyCapsule_New(sp_copy, py_capsule_name, sp_deleter); - } else { - return nullptr; - } -} - -const std::map InferenceEnginePython::IENetwork::getInputsInfo() { - std::map inputs; - const InferenceEngine::InputsDataMap& inputsInfo = actual->getInputsInfo(); - for (auto& in : inputsInfo) { - inputs[in.first] = in.second; - } - return inputs; -} - -const std::map InferenceEnginePython::IENetwork::getOutputs() { - std::map outputs; - const InferenceEngine::OutputsDataMap& outputsInfo = actual->getOutputsInfo(); - for (auto& out : outputsInfo) { - outputs[out.first] = out.second; - } - return outputs; -} - -std::string InferenceEnginePython::IENetwork::getOVNameForTensor(const std::string& orig_name) { - return actual->getOVNameForTensor(orig_name); -} - -void InferenceEnginePython::IENetwork::addOutput(const std::string& out_layer, size_t port_id) { - actual->addOutput(out_layer, port_id); -} - -void InferenceEnginePython::IENetwork::setBatch(const size_t size) { - actual->setBatchSize(size); -} - -size_t InferenceEnginePython::IENetwork::getBatch() { - return actual->getBatchSize(); -} - -void InferenceEnginePython::IENetwork::reshape(const std::map>& input_shapes) { - actual->reshape(input_shapes); -} - -InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string& name, size_t num_requests) - : infer_requests(num_requests), - name(name) { - request_queue_ptr = std::make_shared(); -} - -void InferenceEnginePython::IEExecNetwork::infer() { - InferRequestWrap& request = infer_requests[0]; - request.infer(); -} - -InferenceEnginePython::IENetwork InferenceEnginePython::IEExecNetwork::GetExecGraphInfo() { - return IENetwork(std::make_shared(actual->GetExecGraphInfo())); -} - -PyObject* InferenceEnginePython::IEExecNetwork::getMetric(const std::string& metric_name) { - return parse_parameter(actual->GetMetric(metric_name)); -} - -PyObject* InferenceEnginePython::IEExecNetwork::getConfig(const std::string& name) { - return parse_parameter(actual->GetConfig(name)); -} - -void InferenceEnginePython::IEExecNetwork::setConfig(const std::map& config) { - std::map newConfig; - for (const auto& item : config) { - newConfig[item.first] = InferenceEngine::Parameter(item.second); - } - actual->SetConfig(newConfig); -} - -void InferenceEnginePython::IEExecNetwork::exportNetwork(const std::string& model_file) { - actual->Export(model_file); -} - -std::map InferenceEnginePython::IEExecNetwork::getInputsInfo() { - InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo(); - std::map pyInputs; - for (const auto& item : inputsDataMap) { - pyInputs[item.first] = item.second; - } - return pyInputs; -} - -std::map InferenceEnginePython::IEExecNetwork::getOutputs() { - InferenceEngine::ConstOutputsDataMap outputsDataMap = actual->GetOutputsInfo(); - std::map pyOutputs; - for (const auto& item : outputsDataMap) { - pyOutputs[item.first] = item.second; - } - return pyOutputs; -} - -std::shared_ptr InferenceEnginePython::IEExecNetwork::getPluginLink() { - return actual; -} - -void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, - const InferenceEngine::Blob::Ptr& blob_ptr) { - request_ptr.SetBlob(blob_name.c_str(), blob_ptr); -} - -const InferenceEngine::PreProcessInfo& InferenceEnginePython::InferRequestWrap::getPreProcess( - const std::string& blob_name) { - return request_ptr.GetPreProcess(blob_name.c_str()); -} - -InferenceEngine::Blob::Ptr InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string& blob_name) { - return request_ptr.GetBlob(blob_name.c_str()); -} - -std::vector InferenceEnginePython::InferRequestWrap::queryState() { - auto queryStateVec = request_ptr.QueryState(); - std::vector memoryStates; - for (const auto& state : queryStateVec) { - InferenceEnginePython::CVariableState st; - st.variableState = state; - memoryStates.push_back(st); - } - return memoryStates; -} - -void InferenceEnginePython::InferRequestWrap::setCyCallback(cy_callback callback, void* data) { - user_callback = callback; - user_data = data; -} - -void InferenceEnginePython::InferRequestWrap::infer() { - start_time = Time::now(); - request_ptr.Infer(); - auto end_time = Time::now(); - auto execTime = std::chrono::duration_cast(end_time - start_time); - exec_time = static_cast(execTime.count()) * 0.000001; -} - -void InferenceEnginePython::InferRequestWrap::infer_async() { - request_queue_ptr->setRequestBusy(index); - start_time = Time::now(); - request_ptr.StartAsync(); -} - -int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) { - InferenceEngine::StatusCode code = request_ptr.Wait(timeout); - if (code != InferenceEngine::RESULT_NOT_READY) { - request_queue_ptr->setRequestIdle(index); - } - return static_cast(code); -} - -std::map -InferenceEnginePython::InferRequestWrap::getPerformanceCounts() { - std::map perf_counts = request_ptr.GetPerformanceCounts(); - std::map perf_map; - - for (auto it : perf_counts) { - InferenceEnginePython::ProfileInfo profile_info; - switch (it.second.status) { - case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: - profile_info.status = "EXECUTED"; - break; - case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: - profile_info.status = "NOT_RUN"; - break; - case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: - profile_info.status = "OPTIMIZED_OUT"; - break; - default: - profile_info.status = "UNKNOWN"; - } - profile_info.exec_type = it.second.exec_type; - profile_info.layer_type = it.second.layer_type; - profile_info.cpu_time = it.second.cpu_uSec; - profile_info.real_time = it.second.realTime_uSec; - profile_info.execution_index = it.second.execution_index; - perf_map[it.first] = profile_info; - } - return perf_map; -} - -std::string InferenceEnginePython::get_version() { - auto version = InferenceEngine::GetInferenceEngineVersion(); - return version->buildNumber; -} - -InferenceEnginePython::IECore::IECore(const std::string& xmlConfigFile) { - actual = InferenceEngine::Core(xmlConfigFile); -} - -std::map InferenceEnginePython::IECore::getVersions( - const std::string& deviceName) { - return actual.GetVersions(deviceName); -} - -int InferenceEnginePython::IEExecNetwork::wait(int num_requests, int64_t timeout) { - return request_queue_ptr->wait(num_requests, timeout); -} - -int InferenceEnginePython::IEExecNetwork::getIdleRequestId() { - return request_queue_ptr->getIdleRequestId(); -} - -int InferenceEnginePython::IdleInferRequestQueue::wait(int num_requests, int64_t timeout) { - std::unique_lock lock(mutex); - if (timeout > 0) { - if (!cv.wait_for(lock, std::chrono::milliseconds(timeout), [this, num_requests]() { - return static_cast(idle_ids.size()) >= num_requests; - })) - return static_cast(InferenceEngine::StatusCode::RESULT_NOT_READY); - } else - cv.wait(lock, [this, num_requests]() { - return static_cast(idle_ids.size()) >= num_requests; - }); - return static_cast(InferenceEngine::StatusCode::OK); -} - -void InferenceEnginePython::IdleInferRequestQueue::setRequestIdle(int index) { - std::unique_lock lock(mutex); - idle_ids.emplace_back(index); - cv.notify_all(); -} - -void InferenceEnginePython::IdleInferRequestQueue::setRequestBusy(int index) { - std::lock_guard lock(mutex); - idle_ids.remove(index); -} - -int InferenceEnginePython::IdleInferRequestQueue::getIdleRequestId() { - std::lock_guard lock(mutex); - return idle_ids.size() ? idle_ids.front() : -1; -} - -void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests) { - if (0 == num_requests) { - num_requests = getOptimalNumberOfRequests(*actual); - } - infer_requests.resize(num_requests); - - for (int i = 0; i < num_requests; ++i) { - InferRequestWrap& infer_request = infer_requests[i]; - infer_request.index = i; - request_queue_ptr->setRequestIdle(i); - infer_request.request_queue_ptr = request_queue_ptr; - infer_request.request_ptr = actual->CreateInferRequest(); - - infer_request.request_ptr - .SetCompletionCallback>( - [&](InferenceEngine::InferRequest request, InferenceEngine::StatusCode code) { - if (code != InferenceEngine::StatusCode::OK) { - IE_EXCEPTION_SWITCH(code, - ExceptionType, - InferenceEngine::details::ThrowNow{IE_LOCATION_PARAM} <<= - std::stringstream{}); - } - - auto end_time = Time::now(); - auto execTime = std::chrono::duration_cast(end_time - infer_request.start_time); - infer_request.exec_time = static_cast(execTime.count()) * 0.000001; - if (infer_request.user_callback) { - infer_request.user_callback(infer_request.user_data, code); - } - infer_request.request_queue_ptr->setRequestIdle(infer_request.index); - }); - } -} - -InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, - const std::string& binPath) { - InferenceEngine::CNNNetwork net = actual.ReadNetwork(modelPath, binPath); - return IENetwork(std::make_shared(net)); -} - -InferenceEnginePython::IENetwork InferenceEnginePython::IECore::readNetwork(const std::string& model, - const uint8_t* bin, - size_t bin_size) { - InferenceEngine::MemoryBlob::Ptr weights_blob; - if (bin_size != 0) { - InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, {bin_size}, InferenceEngine::Layout::C); - weights_blob = InferenceEngine::make_shared_blob(tensorDesc); - weights_blob->allocate(); - memcpy(weights_blob->rwmap().as(), bin, bin_size); - } - InferenceEngine::CNNNetwork net = actual.ReadNetwork(model, weights_blob); - return IENetwork(std::make_shared(net)); -} - -std::unique_ptr InferenceEnginePython::IECore::loadNetwork( - IENetwork network, - const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(network.name, num_requests); - exec_network->actual = - std::make_shared(actual.LoadNetwork(*network.actual, deviceName, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::unique_ptr InferenceEnginePython::IECore::loadNetwork( - IENetwork network, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(network.name, num_requests); - exec_network->actual = - std::make_shared(actual.LoadNetwork(*network.actual, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::unique_ptr InferenceEnginePython::IECore::loadNetworkFromFile( - const std::string& modelPath, - const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(modelPath, num_requests); - exec_network->actual = - std::make_shared(actual.LoadNetwork(modelPath, deviceName, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::unique_ptr InferenceEnginePython::IECore::loadNetworkFromFile( - const std::string& modelPath, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(modelPath, num_requests); - exec_network->actual = std::make_shared(actual.LoadNetwork(modelPath, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::unique_ptr InferenceEnginePython::IECore::importNetwork( - const std::string& modelFIle, - const std::string& deviceName, - const std::map& config, - int num_requests) { - auto exec_network = - InferenceEnginePython::make_unique(EXPORTED_NETWORK_NAME, num_requests); - exec_network->actual = - std::make_shared(actual.ImportNetwork(modelFIle, deviceName, config)); - exec_network->createInferRequests(num_requests); - - return exec_network; -} - -std::map InferenceEnginePython::IECore::queryNetwork( - InferenceEnginePython::IENetwork network, - const std::string& deviceName, - const std::map& config) { - auto res = actual.QueryNetwork(*network.actual, deviceName, config); - return res.supportedLayersMap; -} - -void InferenceEnginePython::IECore::setConfig(const std::map& config, - const std::string& deviceName) { - actual.SetConfig(config, deviceName); -} - -void InferenceEnginePython::IECore::registerPlugin(const std::string& pluginName, const std::string& deviceName) { - actual.RegisterPlugin(pluginName, deviceName); -} - -void InferenceEnginePython::IECore::unregisterPlugin(const std::string& deviceName) { - actual.UnregisterPlugin(deviceName); -} - -void InferenceEnginePython::IECore::registerPlugins(const std::string& xmlConfigFile) { - actual.RegisterPlugins(xmlConfigFile); -} - -void InferenceEnginePython::IECore::addExtension(const std::string& ext_lib_path, const std::string& deviceName) { - auto extension_ptr = std::make_shared(ext_lib_path); - auto extension = std::dynamic_pointer_cast(extension_ptr); - actual.AddExtension(extension, deviceName); -} - -std::vector InferenceEnginePython::IECore::getAvailableDevices() { - return actual.GetAvailableDevices(); -} - -PyObject* InferenceEnginePython::IECore::getMetric(const std::string& deviceName, const std::string& name) { - InferenceEngine::Parameter param = actual.GetMetric(deviceName, name); - return parse_parameter(param); -} - -PyObject* InferenceEnginePython::IECore::getConfig(const std::string& deviceName, const std::string& name) { - InferenceEngine::Parameter param = actual.GetConfig(deviceName, name); - return parse_parameter(param); -} - -void InferenceEnginePython::CVariableState::reset() { - variableState.Reset(); -} - -std::string InferenceEnginePython::CVariableState::getName() { - return variableState.GetName(); -} - -InferenceEngine::Blob::Ptr InferenceEnginePython::CVariableState::getState() { - InferenceEngine::Blob::CPtr c_blob = variableState.GetState(); - return std::const_pointer_cast(c_blob); -} - -void InferenceEnginePython::CVariableState::setState(InferenceEngine::Blob::Ptr state) { - variableState.SetState(state); -} - -const size_t InferenceEnginePython::product(const InferenceEngine::SizeVector& dims) { - return std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies{}); -} diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.hpp b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.hpp deleted file mode 100644 index 3c350e6508ad28..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl.hpp +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "Python.h" -#include "ie_core.hpp" - -typedef std::chrono::high_resolution_clock Time; -typedef std::chrono::nanoseconds ns; - -namespace InferenceEnginePython { - -struct ProfileInfo { - std::string status; - std::string exec_type; - std::string layer_type; - int64_t real_time; - int64_t cpu_time; - unsigned execution_index; -}; - -struct CVariableState { - InferenceEngine::VariableState variableState; - void reset(); - std::string getName(); - InferenceEngine::Blob::Ptr getState(); - void setState(InferenceEngine::Blob::Ptr state); -}; - -struct IENetwork { - std::shared_ptr actual; - std::string name; - std::size_t batch_size; - PyObject* getFunction(); - - void setBatch(const size_t size); - - size_t getBatch(); - - void addOutput(const std::string& out_layer, size_t port_id); - - const std::map getInputsInfo(); - - const std::map getOutputs(); - - void reshape(const std::map>& input_shapes); - - void serialize(const std::string& path_to_xml, const std::string& path_to_bin); - - IENetwork(const std::shared_ptr& cnn_network); - - IENetwork(PyObject* network); - - IENetwork() = default; - - void convertToOldRepresentation(); - - std::string getOVNameForTensor(const std::string& orig_name); -}; - -struct IdleInferRequestQueue { - std::list idle_ids; - std::mutex mutex; - std::condition_variable cv; - - void setRequestIdle(int index); - void setRequestBusy(int index); - - int wait(int num_requests, int64_t timeout); - - int getIdleRequestId(); - - using Ptr = std::shared_ptr; -}; - -struct InferRequestWrap { - int index; - using cy_callback = void (*)(void*, int); - - InferenceEngine::InferRequest request_ptr; - Time::time_point start_time; - double exec_time; - cy_callback user_callback; - void* user_data; - IdleInferRequestQueue::Ptr request_queue_ptr; - - void infer(); - - void infer_async(); - - int wait(int64_t timeout); - - void setCyCallback(cy_callback callback, void* data); - - InferenceEngine::Blob::Ptr getBlobPtr(const std::string& blob_name); - - void setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr); - - const InferenceEngine::PreProcessInfo& getPreProcess(const std::string& blob_name); - - std::map getPerformanceCounts(); - - std::vector queryState(); -}; - -struct IEExecNetwork { - std::shared_ptr actual; - std::vector infer_requests; - std::string name; - IdleInferRequestQueue::Ptr request_queue_ptr; - - IEExecNetwork(const std::string& name, size_t num_requests); - - IENetwork GetExecGraphInfo(); - - void infer(); - void exportNetwork(const std::string& model_file); - - std::map getInputsInfo(); - std::map getOutputs(); - - PyObject* getMetric(const std::string& metric_name); - PyObject* getConfig(const std::string& name); - void setConfig(const std::map& config); - - int wait(int num_requests, int64_t timeout); - int getIdleRequestId(); - - void createInferRequests(int num_requests); - - // binds plugin to InputInfo and Data, so that they can be destroyed before plugin (ussue 28996) - std::shared_ptr getPluginLink(); -}; - -struct IECore { - InferenceEngine::Core actual; - explicit IECore(const std::string& xmlConfigFile = std::string()); - std::map getVersions(const std::string& deviceName); - InferenceEnginePython::IENetwork readNetwork(const std::string& modelPath, const std::string& binPath); - InferenceEnginePython::IENetwork readNetwork(const std::string& model, const uint8_t* bin, size_t bin_size); - std::unique_ptr loadNetwork(IENetwork network, - const std::string& deviceName, - const std::map& config, - int num_requests); - std::unique_ptr loadNetwork(IENetwork network, - const std::map& config, - int num_requests); - std::unique_ptr loadNetworkFromFile( - const std::string& modelPath, - const std::string& deviceName, - const std::map& config, - int num_requests); - std::unique_ptr loadNetworkFromFile( - const std::string& modelPath, - const std::map& config, - int num_requests); - std::unique_ptr importNetwork( - const std::string& modelFIle, - const std::string& deviceName, - const std::map& config, - int num_requests); - std::map queryNetwork(IENetwork network, - const std::string& deviceName, - const std::map& config); - void setConfig(const std::map& config, const std::string& deviceName = std::string()); - void registerPlugin(const std::string& pluginName, const std::string& deviceName); - void unregisterPlugin(const std::string& deviceName); - void registerPlugins(const std::string& xmlConfigFile); - void addExtension(const std::string& ext_lib_path, const std::string& deviceName); - std::vector getAvailableDevices(); - PyObject* getMetric(const std::string& deviceName, const std::string& name); - PyObject* getConfig(const std::string& deviceName, const std::string& name); -}; - -template -T* get_buffer(InferenceEngine::Blob& blob) { - return blob.buffer().as(); -} - -template -std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - -std::string get_version(); - -InferenceEnginePython::IENetwork read_network(std::string path_to_xml, std::string path_to_bin); - -const size_t product(const InferenceEngine::SizeVector& dims); - -}; // namespace InferenceEnginePython diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl_defs.pxd b/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl_defs.pxd deleted file mode 100644 index f2eb928321c832..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/ie_api_impl_defs.pxd +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from libc.stddef cimport size_t -from libcpp cimport bool -from libcpp.string cimport string -from libcpp.vector cimport vector -from libcpp.map cimport map -from libcpp.memory cimport unique_ptr, shared_ptr, weak_ptr -from libc.stdint cimport int64_t, uint8_t - - -cdef extern from "" namespace "InferenceEngine": - ctypedef vector[size_t] SizeVector - - cdef cppclass CExecutableNetwork "InferenceEngine::ExecutableNetwork" - - cdef cppclass TBlob[T]: - ctypedef shared_ptr[TBlob[T]] Ptr - - cdef cppclass CBlob "InferenceEngine::Blob": - ctypedef shared_ptr[CBlob] Ptr - const CTensorDesc& getTensorDesc() except + - size_t element_size() except + - void allocate() - void setShape(const SizeVector& dims) except + - - cdef TBlob[Type].Ptr make_shared_blob[Type](const CTensorDesc& tensorDesc) - - cdef TBlob[Type].Ptr make_shared_blob[Type](const CTensorDesc& tensorDesc, Type* ptr, size_t size) - - cdef cppclass CTensorDesc "InferenceEngine::TensorDesc": - CTensorDesc() except + - CTensorDesc(const Precision& precision, SizeVector dims, Layout layout) except + - SizeVector& getDims() except + - void setDims(const SizeVector& dims) except + - Layout getLayout() except + - void setLayout(Layout l) except + - const Precision& getPrecision() except + - void setPrecision(const Precision& p) except + - - - cdef cppclass Data: - const Precision getPrecision() const - void setPrecision(const Precision& precision) const - const SizeVector getDims() except + - const string& getName() except + - const Layout getLayout() except + - void setLayout(Layout layout) except + - const bool isInitialized() except + - - ctypedef shared_ptr[Data] DataPtr - ctypedef weak_ptr[Data] DataWeakPtr - ctypedef shared_ptr[const Data] CDataPtr - - cdef cppclass InputInfo: - ctypedef shared_ptr[InputInfo] Ptr - ctypedef shared_ptr[const InputInfo] CPtr - Precision getPrecision() const - void setPrecision(Precision p) - Layout getLayout() - void setLayout(Layout l) - const string& name() const - DataPtr getInputData() const - CPreProcessInfo& getPreProcess() - const CTensorDesc& getTensorDesc() const - void setInputData(DataPtr inputPtr) - - - cdef cppclass CPreProcessChannel "InferenceEngine::PreProcessChannel": - ctypedef shared_ptr[CPreProcessChannel] Ptr - CBlob.Ptr meanData - float stdScale - float meanValue - - cdef cppclass CPreProcessInfo "InferenceEngine::PreProcessInfo": - CPreProcessChannel.Ptr& operator[](size_t index) - size_t getNumberOfChannels() const - void init(const size_t numberOfChannels) - void setMeanImage(const CBlob.Ptr& meanImage) - void setMeanImageForChannel(const CBlob.Ptr& meanImage, const size_t channel) - vector[CPreProcessChannel.Ptr] _channelsInfo - ColorFormat getColorFormat() const - void setColorFormat(ColorFormat fmt) - ResizeAlgorithm getResizeAlgorithm() const - void setResizeAlgorithm(const ResizeAlgorithm& alg) - MeanVariant getMeanVariant() const - void setVariant(const MeanVariant& variant) - - ctypedef map[string, InputInfo.CPtr] InputsDataMap - - cdef cppclass Precision: - const char*name() const - @staticmethod - const Precision FromStr(const string& str) - - cdef struct apiVersion: - int minor - int major - - cdef cppclass Version: - const char *buildNumber - const char *description - apiVersion apiVersion - - cpdef enum MeanVariant: - pass - - cpdef enum ResizeAlgorithm: - pass - - cpdef enum ColorFormat: - pass - - cdef enum Layout: - ANY - NCHW - NHWC - NCDHW - NDHWC - OIHW - GOIHW - OIDHW - GOIDHW - SCALAR - C - CHW - HW - NC - CN - BLOCKED - - -cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython": - - cdef cppclass CVariableState: - void reset() except + - string getName() except + - CBlob.Ptr getState() except + - void setState(CBlob.Ptr state) except + - - cdef cppclass ProfileInfo: - string status - string exec_type - string layer_type - long long real_time - long long cpu_time - unsigned int execution_index - - cdef cppclass WeightsInfo: - CBlob.Ptr & weights; - CBlob.Ptr & biases; - map[string, CBlob.Ptr] custom_blobs; - - cdef cppclass IEExecNetwork: - vector[InferRequestWrap] infer_requests - IENetwork GetExecGraphInfo() except + - map[string, CDataPtr] getOutputs() except + - map[string, InputInfo.CPtr] getInputsInfo() - void exportNetwork(const string & model_file) except + - object getMetric(const string & metric_name) except + - object getConfig(const string & metric_name) except + - void setConfig(const map[string, string]& config) except + - int wait(int num_requests, int64_t timeout) nogil - int getIdleRequestId() - shared_ptr[CExecutableNetwork] getPluginLink() except + - - cdef cppclass IENetwork: - IENetwork() nogil except + - IENetwork(object) except + - string name - size_t batch_size - string precision - map[string, vector[size_t]] inputs - const map[string, InputInfo.Ptr] getInputsInfo() except + - map[string, DataPtr] getOutputs() except + - void addOutput(string &, size_t) except + - void setAffinity(map[string, string] & types_affinity_map, map[string, string] & layers_affinity_map) except + - void setBatch(size_t size) except + - size_t getBatch() except + - void setLayerParams(map[string, map[string, string]] params_map) except + - void serialize(const string& path_to_xml, const string& path_to_bin) except + - void reshape(map[string, vector[size_t]] input_shapes) except + - object getFunction() except + - void convertToOldRepresentation() except + - string getOVNameForTensor(const string &) except + - - cdef cppclass InferRequestWrap: - double exec_time; - int index; - CBlob.Ptr getBlobPtr(const string & blob_name) except + - void setBlob(const string & blob_name, const CBlob.Ptr & blob_ptr) except + - void setBlob(const string &blob_name, const CBlob.Ptr &blob_ptr, CPreProcessInfo& info) except + - const CPreProcessInfo& getPreProcess(const string& blob_name) except + - map[string, ProfileInfo] getPerformanceCounts() except + - void infer() except + - void infer_async() except + - int wait(int64_t timeout) nogil except + - void setBatch(int size) except + - void setCyCallback(void (*)(void*, int), void *) except + - vector[CVariableState] queryState() except + - - cdef cppclass IECore: - IECore() nogil except + - IECore(const string & xml_config_file) nogil except + - map[string, Version] getVersions(const string & deviceName) except + - IENetwork readNetwork(const string& modelPath, const string& binPath) nogil except + - IENetwork readNetwork(const string& modelPath,uint8_t*bin, size_t bin_size) nogil except + - unique_ptr[IEExecNetwork] loadNetwork(IENetwork network, const string deviceName, - const map[string, string] & config, int num_requests) nogil except + - unique_ptr[IEExecNetwork] loadNetwork(IENetwork network, - const map[string, string] & config, int num_requests) nogil except + - unique_ptr[IEExecNetwork] loadNetworkFromFile(const string & modelPath, const string & deviceName, - const map[string, string] & config, int num_requests) nogil except + - unique_ptr[IEExecNetwork] loadNetworkFromFile(const string & modelPath, - const map[string, string] & config, int num_requests) nogil except + - unique_ptr[IEExecNetwork] importNetwork(const string & modelFIle, const string & deviceName, - const map[string, string] & config, int num_requests) except + - map[string, string] queryNetwork(IENetwork network, const string deviceName, - const map[string, string] & config) except + - void setConfig(const map[string, string] & config, const string & deviceName) except + - void registerPlugin(const string & pluginName, const string & deviceName) except + - void unregisterPlugin(const string & deviceName) except + - void registerPlugins(const string & xmlConfigFile) except + - void addExtension(const string & ext_lib_path, const string & deviceName) except + - vector[string] getAvailableDevices() except + - object getMetric(const string & deviceName, const string & name) except + - object getConfig(const string & deviceName, const string & name) except + - - cdef T*get_buffer[T](CBlob &) - - cdef string get_version() - - cdef IENetwork read_network(string path_to_xml, string path_to_bin) - - cdef const size_t product(const SizeVector& dims) diff --git a/src/bindings/python/src/compatibility/openvino/requirements-dev.txt b/src/bindings/python/src/compatibility/openvino/requirements-dev.txt deleted file mode 100644 index cc35217dcbebf6..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/requirements-dev.txt +++ /dev/null @@ -1 +0,0 @@ -cython>=3.0.2 diff --git a/src/bindings/python/src/compatibility/openvino/setup.cfg b/src/bindings/python/src/compatibility/openvino/setup.cfg deleted file mode 100644 index af37819e5f3566..00000000000000 --- a/src/bindings/python/src/compatibility/openvino/setup.cfg +++ /dev/null @@ -1,24 +0,0 @@ -[flake8] -# D104 - Missing docstring in public package -inline-quotes = double -filename = *.py, *.pyx -max-line-length = 160 -ignore = E203,D104 -max-parameters-amount = 8 -show_source = True -docstring-convention = google -enable-extensions = G -per-file-ignores = - *.pyx: E225, E226, E251, E999, E800, E265, E203, E266, E227, E211 - *__init__.py: F403, F405, F401 - -[pydocstyle] -convention = google - -[mypy] -ignore_missing_imports = True -disable_error_code = attr-defined -show_column_numbers = True -show_error_context = True -show_absolute_path = True -pretty = True diff --git a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt b/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt deleted file mode 100644 index 8d3ac1ab0c02a0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -cmake_minimum_required (VERSION 3.13) - -project (pyngraph) - -if(NOT DEFINED OpenVINO_SOURCE_DIR) - find_package(OpenVINO REQUIRED) - find_package(OpenVINODeveloperPackage QUIET - PATHS "${InferenceEngineDeveloperPackage_DIR}") -endif() - -# Python3_VERSION_MAJOR and Python3_VERSION_MINOR are defined in FindPython3 -set(pyversion python${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}) - -if(OV_GENERATOR_MULTI_CONFIG) - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/) -else() - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/) -endif() - -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - -# compile options - -if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # disable warning: This operator was deprecated and will be removed with v0 operation. - add_compile_options(/wd4996) -elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - add_compile_options(-Wno-deprecated-register -Wno-range-loop-analysis) -elseif(OV_COMPILER_IS_APPLECLANG) - add_link_options(-stdlib=libc++) - add_compile_options(-Wno-unused-value -Wno-range-loop-analysis) -elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - # WA for GCC 7.5 "PYBIND11_NOINLINE inline" warning - add_compile_options(-Wno-error=attributes) -endif() - -if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # for proper fix need to update pybind to version which does not use PyEval_InitThreads() - add_compile_options(-Wno-deprecated-declarations -Wno-undef) -endif() - -# create target - -file(GLOB_RECURSE SOURCES *.cpp) - -pybind11_add_module(_${PROJECT_NAME} MODULE NO_EXTRAS ${SOURCES}) - -target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../") - -target_link_libraries(_${PROJECT_NAME} PRIVATE openvino::runtime openvino::core::dev) - -set_target_properties(_${PROJECT_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) - -# perform copy -add_custom_command(TARGET _${PROJECT_NAME} - POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/../ngraph ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/ngraph -) - -ov_set_install_rpath(_${PROJECT_NAME} ${OV_CPACK_PYTHONDIR} ${OV_CPACK_RUNTIMEDIR}) - -# Install - -ov_python_minimal_api(_${PROJECT_NAME}) -ov_add_clang_format_target(_${PROJECT_NAME}_clang FOR_TARGETS _${PROJECT_NAME}) - -ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} HIDDEN) - -install(TARGETS _${PROJECT_NAME} - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - -install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../ngraph - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} - ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} - USE_SOURCE_PERMISSIONS) diff --git a/src/bindings/python/src/compatibility/pyngraph/axis_set.cpp b/src/bindings/python/src/compatibility/pyngraph/axis_set.cpp deleted file mode 100644 index 0aa59b9a055e27..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/axis_set.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/axis_set.hpp" // ngraph::AxisSet - -#include -#include - -#include -#include -#include - -#include "pyngraph/axis_set.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_AxisSet(py::module m) { - py::class_> axis_set(m, "AxisSet", py::module_local()); - axis_set.doc() = "ngraph.impl.AxisSet wraps ngraph::AxisSet"; - axis_set.def(py::init&>(), py::arg("axes")); - axis_set.def(py::init&>(), py::arg("axes")); - axis_set.def(py::init&>(), py::arg("axes")); - axis_set.def(py::init(), py::arg("axes")); - - axis_set.def("__len__", [](const ngraph::AxisSet& v) { - return v.size(); - }); - - axis_set.def( - "__iter__", - [](ngraph::AxisSet& v) { - return py::make_iterator(v.begin(), v.end()); - }, - py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ - - axis_set.def("__repr__", [](const ngraph::AxisSet& self) -> std::string { - std::stringstream data_ss; - std::copy(self.begin(), self.end(), std::ostream_iterator(data_ss, ", ")); - std::string data_str = data_ss.str(); - return ""; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/axis_set.hpp b/src/bindings/python/src/compatibility/pyngraph/axis_set.hpp deleted file mode 100644 index e7232ec17806a3..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/axis_set.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_AxisSet(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/axis_vector.cpp b/src/bindings/python/src/compatibility/pyngraph/axis_vector.cpp deleted file mode 100644 index f8b133625ab288..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/axis_vector.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/axis_vector.hpp" // ngraph::AxisVector - -#include -#include - -#include "pyngraph/axis_vector.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_AxisVector(py::module m) { - py::class_> axis_vector(m, - "AxisVector", - py::module_local()); - axis_vector.doc() = "ngraph.impl.AxisVector wraps ngraph::AxisVector"; - axis_vector.def(py::init&>(), py::arg("axes")); - axis_vector.def(py::init&>(), py::arg("axes")); - axis_vector.def(py::init(), py::arg("axes")); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/axis_vector.hpp b/src/bindings/python/src/compatibility/pyngraph/axis_vector.hpp deleted file mode 100644 index 74c452474340df..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/axis_vector.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_AxisVector(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/coordinate.cpp b/src/bindings/python/src/compatibility/pyngraph/coordinate.cpp deleted file mode 100644 index faa965d63de808..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/coordinate.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/core/coordinate.hpp" // ov::Coordinate - -#include -#include - -#include "openvino/core/shape.hpp" -#include "pyngraph/coordinate.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Coordinate(py::module m) { - py::class_> coordinate(m, "Coordinate", py::module_local()); - coordinate.doc() = "ngraph.impl.Coordinate wraps ov::Coordinate"; - coordinate.def(py::init&>()); - coordinate.def(py::init()); - coordinate.def(py::init&>()); - coordinate.def(py::init()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/coordinate.hpp b/src/bindings/python/src/compatibility/pyngraph/coordinate.hpp deleted file mode 100644 index b9bf9f6574e99b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/coordinate.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Coordinate(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp b/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp deleted file mode 100644 index 6c91879ad3ca60..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/coordinate_diff.hpp" // ngraph::CoordinateDiff - -#include -#include - -#include -#include -#include - -#include "pyngraph/coordinate_diff.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_CoordinateDiff(py::module m) { - py::class_> coordinate_diff(m, - "CoordinateDiff", - py::module_local()); - coordinate_diff.doc() = "ngraph.impl.CoordinateDiff wraps ngraph::CoordinateDiff"; - coordinate_diff.def(py::init&>()); - coordinate_diff.def(py::init&>()); - coordinate_diff.def(py::init()); - - coordinate_diff.def("__str__", [](const ngraph::CoordinateDiff& self) -> std::string { - std::stringstream stringstream; - std::copy(self.begin(), self.end(), std::ostream_iterator(stringstream, ", ")); - std::string string = stringstream.str(); - return string.substr(0, string.size() - 2); - }); - - coordinate_diff.def("__repr__", [](const ngraph::CoordinateDiff& self) -> std::string { - std::string class_name = py::cast(self).get_type().attr("__name__").cast(); - std::string shape_str = py::cast(self).attr("__str__")().cast(); - return "<" + class_name + ": (" + shape_str + ")>"; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp b/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp deleted file mode 100644 index b5ec670888266d..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_CoordinateDiff(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp b/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp deleted file mode 100644 index e83206afde05d6..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// These are not used here, but needed in order to not violate ODR, since -// these are included in other translation units, and specialize some types. -// Related: https://github.com/pybind/pybind11/issues/1055 -#include "dict_attribute_visitor.hpp" - -#include -#include - -#include "ngraph/op/loop.hpp" -#include "ngraph/op/util/sub_graph_base.hpp" - -namespace py = pybind11; - -util::DictAttributeDeserializer::DictAttributeDeserializer( - const py::dict& attributes, - std::unordered_map>& variables) - : m_attributes(attributes), - m_variables(variables) {} - -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - if (const auto& a = ov::as_type< - ngraph::AttributeAdapter>>>( - &adapter)) { - std::vector> input_descs; - const py::dict& input_desc = m_attributes[name.c_str()].cast(); - - if (input_desc.contains("slice_input_desc") && !input_desc["slice_input_desc"].is_none()) { - for (py::handle h : input_desc["slice_input_desc"].cast()) { - const py::dict& desc = h.cast(); - auto slice_in = std::make_shared( - desc["input_idx"].cast(), - desc["body_parameter_idx"].cast(), - desc["start"].cast(), - desc["stride"].cast(), - desc["part_size"].cast(), - desc["end"].cast(), - desc["axis"].cast()); - input_descs.push_back(slice_in); - } - } - - if (input_desc.contains("merged_input_desc") && !input_desc["merged_input_desc"].is_none()) { - for (py::handle h : input_desc["merged_input_desc"].cast()) { - const py::dict& desc = h.cast(); - auto merged_in = std::make_shared( - desc["input_idx"].cast(), - desc["body_parameter_idx"].cast(), - desc["body_value_idx"].cast()); - input_descs.push_back(merged_in); - } - } - - if (input_desc.contains("invariant_input_desc") && !input_desc["invariant_input_desc"].is_none()) { - for (py::handle h : input_desc["invariant_input_desc"].cast()) { - const py::dict& desc = h.cast(); - auto invariant_in = std::make_shared( - desc["input_idx"].cast(), - desc["body_parameter_idx"].cast()); - input_descs.push_back(invariant_in); - } - } - a->set(input_descs); - } else if (const auto& a = ov::as_type>>>(&adapter)) { - std::vector> output_descs; - const py::dict& output_desc = m_attributes[name.c_str()].cast(); - if (output_desc.contains("body_output_desc") && !output_desc["body_output_desc"].is_none()) { - for (py::handle h : output_desc["body_output_desc"].cast()) { - const py::dict& desc = h.cast(); - auto body_output = std::make_shared( - desc["body_value_idx"].cast(), - desc["output_idx"].cast(), - desc["iteration"].cast()); - output_descs.push_back(body_output); - } - } - - if (output_desc.contains("concat_output_desc") && !output_desc["concat_output_desc"].is_none()) { - for (py::handle h : output_desc["concat_output_desc"].cast()) { - const py::dict& desc = h.cast(); - auto concat_output = std::make_shared( - desc["body_value_idx"].cast(), - desc["output_idx"].cast(), - desc["start"].cast(), - desc["stride"].cast(), - desc["part_size"].cast(), - desc["end"].cast(), - desc["axis"].cast()); - output_descs.push_back(concat_output); - } - } - a->set(output_descs); - } else if (const auto& a = - ov::as_type>(&adapter)) { - ngraph::op::v5::Loop::SpecialBodyPorts special_body_ports; - const py::dict& special_ports_dict = m_attributes[name.c_str()].cast(); - special_body_ports.body_condition_output_idx = - special_ports_dict["body_condition_output_idx"].cast(); - special_body_ports.current_iteration_input_idx = - special_ports_dict["current_iteration_input_idx"].cast(); - a->set(special_body_ports); - } else if (const auto& a = ov::as_type>>(&adapter)) { - std::string variable_id = m_attributes[name.c_str()].cast(); - if (!m_variables.count(variable_id)) { - m_variables[variable_id] = std::make_shared( - ngraph::VariableInfo{ngraph::PartialShape::dynamic(), ngraph::element::dynamic, variable_id}); - } - a->set(m_variables[variable_id]); - } else { - NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); - } - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - adapter.set(m_attributes[name.c_str()].cast>()); - } -} - -void util::DictAttributeDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - if (m_attributes.contains(name)) { - if (name == "body" || name == "then_body" || name == "else_body") { - const py::dict& body_attrs = m_attributes[name.c_str()].cast(); - const auto& body_outputs = as_output_vector(body_attrs["results"].cast()); - const auto& body_parameters = body_attrs["parameters"].cast(); - auto body = std::make_shared(body_outputs, body_parameters); - adapter.set(body); - } else { - NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); - } - } -} - -util::DictAttributeSerializer::DictAttributeSerializer(const std::shared_ptr& node) { - node->visit_attributes(*this); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - if (m_attributes.contains(name)) { - NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); - } -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} -void util::DictAttributeSerializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - m_attributes[name.c_str()] = adapter.get(); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp b/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp deleted file mode 100644 index 6cad47a10d0599..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "ngraph/attribute_visitor.hpp" -#include "ngraph/function.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/util/variable.hpp" - -#include - -namespace py = pybind11; - -namespace util -{ - class DictAttributeDeserializer : public ngraph::AttributeVisitor - { - public: - DictAttributeDeserializer( - const py::dict& attributes, - std::unordered_map>& variables); - - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - - protected: - const py::dict& m_attributes; - std::unordered_map>& m_variables; - }; - - class DictAttributeSerializer : public ngraph::AttributeVisitor - { - public: - explicit DictAttributeSerializer(const std::shared_ptr& node); - - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - - template - T get_attribute(const std::string& name) - { - NGRAPH_CHECK(m_attributes.contains(name), - "Couldn't find attribute \"", - name, - "\" in serialized node attribute dictionary."); - return m_attributes[name.c_str()].cast(); - } - - py::dict get_attributes() const { return m_attributes; } - - protected: - py::dict m_attributes; - }; -} // namespace util diff --git a/src/bindings/python/src/compatibility/pyngraph/dimension.cpp b/src/bindings/python/src/compatibility/pyngraph/dimension.cpp deleted file mode 100644 index 37b7210644eef6..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/dimension.cpp +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/core/dimension.hpp" // ov::Dimension - -#include -#include - -#include -#include -#include - -#include "pyngraph/dimension.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Dimension(py::module m) { - using value_type = ov::Dimension::value_type; - - py::class_> dim(m, "Dimension", py::module_local()); - dim.doc() = "ngraph.impl.Dimension wraps ov::Dimension"; - dim.def(py::init<>()); - dim.def(py::init(), - py::arg("dimension"), - R"( - Construct a static dimension. - - :param dimension: Value of the dimension. - :type dimension: int - )"); - dim.def(py::init(), - py::arg("min_dimension"), - py::arg("max_dimension"), - R"( - Construct a dynamic dimension with bounded range. - - :param min_dimension: The lower inclusive limit for the dimension. - :type min_dimension: int - :param max_dimension: inclusive limit for the dimension. - :type max_dimension: The upper inclusive limit for the dimension. - )"); - - dim.def_static("dynamic", &ov::Dimension::dynamic); - - dim.def_property_readonly("is_dynamic", - &ov::Dimension::is_dynamic, - R"( - Check if Dimension is dynamic. - - :return: True if dynamic, else False. - :rtype: bool - )"); - dim.def_property_readonly("is_static", - &ov::Dimension::is_static, - R"( - Check if Dimension is static. - - :return: True if static, else False. - :rtype: bool - )"); - - dim.def( - "__eq__", - [](const ov::Dimension& a, const ov::Dimension& b) { - return a == b; - }, - py::is_operator()); - dim.def( - "__eq__", - [](const ov::Dimension& a, const int64_t& b) { - return a == b; - }, - py::is_operator()); - - dim.def("__len__", &ov::Dimension::get_length); - dim.def("get_length", - &ov::Dimension::get_length, - R"( - Return this dimension as integer. - This dimension must be static and non-negative. - - :return Value of the dimension. - :rtype: int - )"); - dim.def("get_min_length", - &ov::Dimension::get_min_length, - R"( - Return this dimension's min_dimension as integer. - This dimension must be dynamic and non-negative. - - :return: Value of the dimension. - :rtype: int - )"); - dim.def("get_max_length", - &ov::Dimension::get_max_length, - R"( - Return this dimension's max_dimension as integer. - This dimension must be dynamic and non-negative. - - :return: Value of the dimension. - :rtype: int - )"); - - dim.def("same_scheme", - &ov::Dimension::same_scheme, - py::arg("dim"), - R"( - Return this dimension's max_dimension as integer. - This dimension must be dynamic and non-negative. - - :param dim: The other dimension to compare this dimension to. - :type dim: Dimension - :return: True if this dimension and dim are both dynamic, - or if they are both static and equal, otherwise False. - :rtype: bool - )"); - dim.def("compatible", - &ov::Dimension::compatible, - py::arg("d"), - R"( - Check whether this dimension is capable of being merged - with the argument dimension. - - :param d: The dimension to compare this dimension with. - :type d: Dimension - :return: True if this dimension is compatible with d, else False. - :rtype: bool - )"); - dim.def("relaxes", - &ov::Dimension::relaxes, - py::arg("d"), - R"( - Check whether this dimension is a relaxation of the argument. - This dimension relaxes (or is a relaxation of) d if: - - (1) this and d are static and equal - (2) this dimension contains d dimension - - this.relaxes(d) is equivalent to d.refines(this). - - :param d: The dimension to compare this dimension with. - :type d: Dimension - :return: True if this dimension relaxes d, else False. - :rtype: bool - )"); - dim.def("refines", - &ov::Dimension::refines, - py::arg("d"), - R"( - Check whether this dimension is a refinement of the argument. - This dimension refines (or is a refinement of) d if: - - (1) this and d are static and equal - (2) d dimension contains this dimension - - this.refines(d) is equivalent to d.relaxes(this). - - :param d: The dimension to compare this dimension with. - :type d: Dimension - :return: True if this dimension refines d, else False. - :rtype: bool - )"); - - dim.def("__str__", [](const ov::Dimension& self) -> std::string { - std::stringstream ss; - ss << self; - return ss.str(); - }); - - dim.def("__repr__", [](const ov::Dimension& self) -> std::string { - return "() + ">"; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/dimension.hpp b/src/bindings/python/src/compatibility/pyngraph/dimension.hpp deleted file mode 100644 index a0a5ec80f67d4d..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/dimension.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Dimension(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.cpp b/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.cpp deleted file mode 100644 index 9c7df295f0a39d..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/discrete_type_info.hpp" - -#include -#include -#include - -#include "openvino/core/type.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_DiscreteTypeInfo(py::module m) { - py::class_> discrete_type_info(m, - "DiscreteTypeInfo", - py::module_local()); - discrete_type_info.doc() = "ngraph.impl.DiscreteTypeInfo wraps ov::DiscreteTypeInfo"; - - // operator overloading - discrete_type_info.def(py::self < py::self); - discrete_type_info.def(py::self <= py::self); - discrete_type_info.def(py::self > py::self); - discrete_type_info.def(py::self >= py::self); - discrete_type_info.def(py::self == py::self); - discrete_type_info.def(py::self != py::self); - - discrete_type_info.def_readonly("name", &ov::DiscreteTypeInfo::name); - discrete_type_info.def_readonly("version_id", &ov::DiscreteTypeInfo::version_id); - discrete_type_info.def_readonly("parent", &ov::DiscreteTypeInfo::parent); - - discrete_type_info.def("__repr__", [](const ov::DiscreteTypeInfo& self) { - std::string name = std::string(self.name); - std::string version = std::string(self.version_id); - if (self.parent != nullptr) { - std::string parent_version = std::string(self.parent->version_id); - std::string parent_name = self.parent->name; - return ""; - } - return ""; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.hpp b/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.hpp deleted file mode 100644 index ec80f48f9a2a67..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/discrete_type_info.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_DiscreteTypeInfo(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/function.cpp b/src/bindings/python/src/compatibility/pyngraph/function.cpp deleted file mode 100644 index a64dd0a36dcc5b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/function.cpp +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/function.hpp" // ngraph::Function - -#include -#include - -#include "ngraph/op/parameter.hpp" // ngraph::op::Parameter -#include "ngraph/op/sink.hpp" -#include "pyngraph/function.hpp" - -namespace py = pybind11; - -static const char* CAPSULE_NAME = "ngraph_function"; - -void regclass_pyngraph_Function(py::module m) { - py::class_> function(m, "Function", py::module_local()); - function.doc() = "ngraph.impl.Function wraps ngraph::Function"; - - function.def(py::init([](const ngraph::ResultVector& res, - const std::vector>& nodes, - const ngraph::ParameterVector& params, - const std::string& name) { - ngraph::SinkVector sinks; - for (const auto& node : nodes) { - auto sink = std::dynamic_pointer_cast(node); - NGRAPH_CHECK(sink != nullptr, "Node {} is not instance of Sink"); - sinks.push_back(sink); - } - return std::make_shared(res, sinks, params, name); - }), - py::arg("results"), - py::arg("sinks"), - py::arg("parameters"), - py::arg("name"), - R"( - Create user-defined Function which is a representation of a model. - - Parameters - ---------- - results : List[op.Result] - List of results. - - sinks : List[Node] - List of Nodes to be used as Sinks (e.g. Assign ops). - - parameters : List[op.Parameter] - List of parameters. - - name : str - String to set as function's friendly name. - )"); - - function.def(py::init>&, - const std::vector>&, - const std::string&>(), - py::arg("results"), - py::arg("parameters"), - py::arg("name"), - R"( - Create user-defined Function which is a representation of a model. - - Parameters - ---------- - results : List[Node] - List of Nodes to be used as results. - - parameters : List[op.Parameter] - List of parameters. - - name : str - String to set as function's friendly name. - )"); - - function.def(py::init&, - const std::vector>&, - const std::string&>(), - py::arg("result"), - py::arg("parameters"), - py::arg("name"), - R"( - Create user-defined Function which is a representation of a model. - - Parameters - ---------- - results : Node - Node to be used as result. - - parameters : List[op.Parameter] - List of parameters. - - name : str - String to set as function's friendly name. - )"); - function.def("get_output_size", - &ngraph::Function::get_output_size, - R"( - Return the number of outputs for the function. - - Returns - ---------- - get_output_size : int - Number of outputs. - )"); - function.def("get_ops", - &ngraph::Function::get_ops, - R"( - Return ops used in the function. - - Returns - ---------- - get_ops : List[Node] - List of Nodes representing ops used in function. - )"); - function.def("get_ordered_ops", - &ngraph::Function::get_ordered_ops, - R"( - Return ops used in the function in topological order. - - Returns - ---------- - get_ordered_ops : List[Node] - List of sorted Nodes representing ops used in function. - )"); - function.def("get_output_op", - &ngraph::Function::get_output_op, - py::arg("i"), - R"( - Return the op that generates output i - - Parameters - ---------- - i : int - output index - - Returns - ---------- - get_output_op : Node - Node object that generates output i - )"); - function.def("get_output_element_type", - &ngraph::Function::get_output_element_type, - py::arg("i"), - R"( - Return the element type of output i - - Parameters - ---------- - i : int - output index - - Returns - ---------- - get_output_op : Type - Type object of output i - )"); - function.def("get_output_shape", - &ngraph::Function::get_output_shape, - py::arg("i"), - R"( - Return the shape of element i - - Parameters - ---------- - i : int - element index - - Returns - ---------- - get_output_shape : Shape - Shape object of element i - )"); - function.def("get_output_partial_shape", - &ngraph::Function::get_output_partial_shape, - py::arg("i"), - R"( - Return the partial shape of element i - - Parameters - ---------- - i : int - element index - - Returns - ---------- - get_output_partial_shape : PartialShape - PartialShape object of element i - )"); - function.def("get_parameters", - &ngraph::Function::get_parameters, - R"( - Return the function parameters. - - Returns - ---------- - get_parameters : ParameterVector - ParameterVector containing function parameters. - )"); - function.def("get_results", - &ngraph::Function::get_results, - R"( - Return a list of function outputs. - - Returns - ---------- - get_results : ResultVector - ResultVector containing function parameters. - )"); - function.def("get_result", - &ngraph::Function::get_result, - R"( - Return single result. - - Returns - ---------- - get_result : Node - Node object representing result. - )"); - function.def("get_name", - &ngraph::Function::get_name, - R"( - Get the unique name of the function. - - Returns - ---------- - get_name : str - String with a name of the function. - )"); - function.def("get_friendly_name", - &ngraph::Function::get_friendly_name, - R"( - Gets the friendly name for a function. If no - friendly name has been set via set_friendly_name - then the function's unique name is returned. - - Returns - ---------- - get_friendly_name : str - String with a friendly name of the function. - )"); - function.def("set_friendly_name", - &ngraph::Function::set_friendly_name, - py::arg("name"), - R"( - Sets a friendly name for a function. This does - not overwrite the unique name of the function and - is retrieved via get_friendly_name(). Used mainly - for debugging. - - Parameters - ---------- - name : str - String to set as the friendly name. - )"); - function.def("is_dynamic", - &ngraph::Function::is_dynamic, - R"( - Returns true if any of the op's defined in the function - contains partial shape. - - Returns - ---------- - is_dynamic : bool - )"); - function.def("__repr__", [](const ngraph::Function& self) { - std::string class_name = py::cast(self).get_type().attr("__name__").cast(); - std::stringstream shapes_ss; - for (size_t i = 0; i < self.get_output_size(); ++i) { - if (i > 0) { - shapes_ss << ", "; - } - shapes_ss << self.get_output_partial_shape(i); - } - return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>"; - }); - function.def_static("from_capsule", [](py::object* capsule) { - // get the underlying PyObject* which is a PyCapsule pointer - auto* pybind_capsule_ptr = capsule->ptr(); - // extract the pointer stored in the PyCapsule under the name CAPSULE_NAME - auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME); - - auto* ngraph_function = static_cast*>(capsule_ptr); - if (ngraph_function && *ngraph_function) { - return *ngraph_function; - } else { - throw std::runtime_error("The provided capsule does not contain an ngraph::Function"); - } - }); - function.def_static("to_capsule", [](std::shared_ptr& ngraph_function) { - // create a shared pointer on the heap before putting it in the capsule - // this secures the lifetime of the object transferred by the capsule - auto* sp_copy = new std::shared_ptr(ngraph_function); - - // a destructor callback that will delete the heap allocated shared_ptr - // when the capsule is destructed - auto sp_deleter = [](PyObject* capsule) { - auto* capsule_ptr = PyCapsule_GetPointer(capsule, CAPSULE_NAME); - auto* function_sp = static_cast*>(capsule_ptr); - if (function_sp) { - delete function_sp; - } - }; - - // put the shared_ptr in a new capsule under the same name as in "from_capsule" - auto pybind_capsule = py::capsule(sp_copy, CAPSULE_NAME, sp_deleter); - - return pybind_capsule; - }); - - function.def_property_readonly("name", &ngraph::Function::get_name); - function.def_property("friendly_name", &ngraph::Function::get_friendly_name, &ngraph::Function::set_friendly_name); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/function.hpp b/src/bindings/python/src/compatibility/pyngraph/function.hpp deleted file mode 100644 index 7bfb8328004242..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/function.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Function(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/node.cpp b/src/bindings/python/src/compatibility/pyngraph/node.cpp deleted file mode 100644 index f696a4297cad7f..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node.cpp +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/node.hpp" - -#include -#include -#include - -#include "dict_attribute_visitor.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/divide.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/subtract.hpp" -#include "pyngraph/node.hpp" -#include "pyngraph/rt_map.hpp" -#include "pyngraph/variant.hpp" - -class PyNode : public ngraph::Node { -public: - std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& inputs) const override { - PYBIND11_OVERRIDE_PURE(std::shared_ptr, ngraph::Node, clone_with_new_inputs, inputs); - } - - const type_info_t& get_type_info() const override { - PYBIND11_OVERRIDE_PURE(type_info_t&, ngraph::Node, get_type_info, ); - } -}; - -namespace impl { -namespace { -py::dict get_attributes(const std::shared_ptr& node) { - util::DictAttributeSerializer dict_serializer(node); - return dict_serializer.get_attributes(); -} - -void set_attribute(std::shared_ptr& node, const std::string& atr_name, py::object value) { - py::dict attr_dict; - attr_dict[atr_name.c_str()] = value; - std::unordered_map> variables; - util::DictAttributeDeserializer dict_deserializer(attr_dict, variables); - node->visit_attributes(dict_deserializer); -} -} // namespace -} // namespace impl - -namespace py = pybind11; - -using PyRTMap = ngraph::Node::RTMap; - -PYBIND11_MAKE_OPAQUE(PyRTMap); - -void regclass_pyngraph_Node(py::module m) { - py::class_, PyNode> node(m, - "Node", - py::dynamic_attr(), - py::module_local()); - node.doc() = "ngraph.impl.Node wraps ngraph::Node"; - node.def( - "__add__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - node.def( - "__sub__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - node.def( - "__mul__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - node.def( - "__div__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - node.def( - "__truediv__", - [](const std::shared_ptr& a, const std::shared_ptr b) { - return std::make_shared(a, b); - }, - py::is_operator()); - - node.def("__repr__", [](const ngraph::Node& self) { - std::string type_name = self.get_type_name(); - std::stringstream shapes_ss; - for (size_t i = 0; i < self.get_output_size(); ++i) { - if (i > 0) { - shapes_ss << ", "; - } - shapes_ss << self.get_output_partial_shape(i); - } - return "<" + type_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>"; - }); - - node.def("get_element_type", - &ngraph::Node::get_element_type, - R"( - Checks that there is exactly one output and returns it's element type. - - Returns - ---------- - get_element_type : Type - Type of the output. - )"); - node.def("get_output_size", - &ngraph::Node::get_output_size, - R"( - Returns the number of outputs from the node. - - Returns - ---------- - get_element_type : int - Number of outputs. - )"); - node.def("get_output_element_type", - &ngraph::Node::get_output_element_type, - py::arg("i"), - R"( - Returns the element type for output i - - Parameters - ---------- - i : int - Index of the output. - - Returns - ---------- - get_output_element_type : Type - Type of the output i - )"); - node.def("get_output_shape", - &ngraph::Node::get_output_shape, - py::arg("i"), - R"( - Returns the shape for output i - - Parameters - ---------- - i : int - Index of the output. - - Returns - ---------- - get_output_shape : Shape - Shape of the output i - )"); - node.def("get_output_partial_shape", - &ngraph::Node::get_output_partial_shape, - py::arg("i"), - R"( - Returns the partial shape for output i - - Parameters - ---------- - i : int - Index of the output. - - Returns - ---------- - get_output_partial_shape : PartialShape - PartialShape of the output i - )"); - node.def("get_type_name", - &ngraph::Node::get_type_name, - R"( - Returns Type's name from the node. - - Returns - ---------- - get_type_name : str - String repesenting Type's name. - )"); - node.def("get_name", - &ngraph::Node::get_name, - R"( - Get the unique name of the node - - Returns - ---------- - get_name : str - Unique name of the node. - )"); - node.def("get_friendly_name", - &ngraph::Node::get_friendly_name, - R"( - Gets the friendly name for a node. If no friendly name has - been set via set_friendly_name then the node's unique name - is returned. - - Returns - ---------- - get_name : str - Friendly name of the node. - )"); - node.def("get_type_info", &ngraph::Node::get_type_info); - node.def("set_friendly_name", - &ngraph::Node::set_friendly_name, - py::arg("name"), - R"( - Sets a friendly name for a node. This does not overwrite the unique name - of the node and is retrieved via get_friendly_name(). Used mainly for - debugging. The friendly name may be set exactly once. - - Parameters - ---------- - name : str - Friendly name to set. - )"); - node.def("input", - (ngraph::Input(ngraph::Node::*)(size_t)) & ngraph::Node::input, - py::arg("input_index"), - R"( - A handle to the input_index input of this node. - - Parameters - ---------- - input_index : int - Index of Input. - - Returns - ---------- - input : Input - Input of this node. - )"); - node.def("inputs", - (std::vector>(ngraph::Node::*)()) & ngraph::Node::inputs, - R"( - A list containing a handle for each of this node's inputs, in order. - - Returns - ---------- - inputs : List[Input] - List of node's inputs. - )"); - node.def("output", - (ngraph::Output(ngraph::Node::*)(size_t)) & ngraph::Node::output, - py::arg("output_index"), - R"( - A handle to the output_index output of this node. - - Parameters - ---------- - output_index : int - Index of Output. - - Returns - ---------- - input : Output - Output of this node. - )"); - node.def("outputs", - (std::vector>(ngraph::Node::*)()) & ngraph::Node::outputs, - R"( - A list containing a handle for each of this node's outputs, in order. - - Returns - ---------- - inputs : List[Output] - List of node's outputs. - )"); - node.def("get_rt_info", - (PyRTMap & (ngraph::Node::*)()) & ngraph::Node::get_rt_info, - py::return_value_policy::reference_internal, - R"( - Returns PyRTMap which is a dictionary of user defined runtime info. - - Returns - ---------- - get_rt_info : PyRTMap - A dictionary of user defined data. - )"); - - node.def("set_argument", &ngraph::Node::set_argument); - node.def("set_arguments", [](const std::shared_ptr& self, const ngraph::NodeVector& args) { - self->set_arguments(args); - }); - node.def("set_arguments", [](const std::shared_ptr& self, const ngraph::OutputVector& args) { - self->set_arguments(args); - }); - - node.def_property_readonly("shape", &ngraph::Node::get_shape); - node.def_property_readonly("name", &ngraph::Node::get_name); - node.def_property_readonly("rt_info", - (PyRTMap & (ngraph::Node::*)()) & ngraph::Node::get_rt_info, - py::return_value_policy::reference_internal); - node.def_property_readonly("type_info", &ngraph::Node::get_type_info); - node.def_property("friendly_name", &ngraph::Node::get_friendly_name, &ngraph::Node::set_friendly_name); - - node.def("get_attributes", &impl::get_attributes); - node.def("set_attribute", &impl::set_attribute); - // for backwards compatibility, this is how this method was named until 2021.4 - node.def("_get_attributes", &impl::get_attributes); - // for backwards compatibility, this is how this method was named until 2021.4 - node.def("_set_attribute", &impl::set_attribute); - node.def("set_arguments", [](const std::shared_ptr& self, const ngraph::OutputVector& arguments) { - return self->set_arguments(arguments); - }); - node.def("validate", [](const std::shared_ptr& self) { - return self->constructor_validate_and_infer_types(); - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/node.hpp b/src/bindings/python/src/compatibility/pyngraph/node.hpp deleted file mode 100644 index 03734ae1f98504..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Node(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/node_factory.cpp b/src/bindings/python/src/compatibility/pyngraph/node_factory.cpp deleted file mode 100644 index 65e1646f742ae4..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_factory.cpp +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "node_factory.hpp" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dict_attribute_visitor.hpp" -#include "ngraph/check.hpp" -#include "ngraph/except.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/opsets/opset.hpp" -#include "openvino/opsets/opset.hpp" - -namespace py = pybind11; - -namespace { -class NodeFactory { -public: - NodeFactory() {} - NodeFactory(const std::string& opset_name) : m_opset(get_opset(opset_name)) {} - - std::shared_ptr create(const std::string op_type_name, - const ngraph::OutputVector& arguments, - const py::dict& attributes = py::dict()) { - std::shared_ptr op_node = std::shared_ptr(m_opset.create(op_type_name)); - - NGRAPH_CHECK(op_node != nullptr, "Couldn't create operator: ", op_type_name); - NGRAPH_CHECK(!ngraph::op::is_constant(op_node), - "Currently NodeFactory doesn't support Constant node: ", - op_type_name); - - util::DictAttributeDeserializer visitor(attributes, m_variables); - - op_node->set_arguments(arguments); - op_node->visit_attributes(visitor); - op_node->constructor_validate_and_infer_types(); - - return op_node; - } - - std::shared_ptr create(const std::string op_type_name) { - std::shared_ptr op_node = std::shared_ptr(m_opset.create(op_type_name)); - - NGRAPH_CHECK(op_node != nullptr, "Couldn't create operator: ", op_type_name); - NGRAPH_CHECK(!ngraph::op::is_constant(op_node), - "Currently NodeFactory doesn't support Constant node: ", - op_type_name); - - return op_node; - } - -private: - const ngraph::OpSet& get_opset(std::string opset_ver) { - std::locale loc; - std::transform(opset_ver.begin(), opset_ver.end(), opset_ver.begin(), [&loc](char c) { - return std::tolower(c, loc); - }); - - const auto& s_opsets = ngraph::get_available_opsets(); - - auto it = s_opsets.find(opset_ver); - if (it == s_opsets.end()) { - OPENVINO_THROW("Unsupported opset version requested."); - } - return it->second(); - } - - const ngraph::OpSet& m_opset = ngraph::get_opset11(); - std::unordered_map> m_variables; -}; -} // namespace - -void regclass_pyngraph_NodeFactory(py::module m) { - py::class_ node_factory(m, "NodeFactory", py::module_local()); - node_factory.doc() = "NodeFactory creates nGraph nodes"; - - node_factory.def(py::init()); - node_factory.def(py::init()); - - node_factory.def("create", [](NodeFactory& self, const std::string name) { - return self.create(name); - }); - node_factory.def("create", - [](NodeFactory& self, - const std::string name, - const ngraph::OutputVector& arguments, - const py::dict& attributes) { - return self.create(name, arguments, attributes); - }); - - node_factory.def("__repr__", [](const NodeFactory& self) { - return ""; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/node_factory.hpp b/src/bindings/python/src/compatibility/pyngraph/node_factory.hpp deleted file mode 100644 index d7835165d669a0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_factory.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_NodeFactory(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/node_input.cpp b/src/bindings/python/src/compatibility/pyngraph/node_input.cpp deleted file mode 100644 index c6af1e05d50225..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_input.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/node_input.hpp" - -#include - -#include "dict_attribute_visitor.hpp" -#include "pyngraph/node_input.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Input(py::module m) { - py::class_, std::shared_ptr>> input(m, - "Input", - py::dynamic_attr(), - py::module_local()); - input.doc() = "ngraph.impl.Input wraps ngraph::Input"; - - input.def("get_node", - &ngraph::Input::get_node, - R"( - Get node referenced by this input handle. - - Returns - ---------- - get_node : Node - Node object referenced by this input handle. - )"); - input.def("get_index", - &ngraph::Input::get_index, - R"( - The index of the input referred to by this input handle. - - Returns - ---------- - get_index : int - Index value as integer. - )"); - input.def("get_element_type", - &ngraph::Input::get_element_type, - R"( - The element type of the input referred to by this input handle. - - Returns - ---------- - get_element_type : Type - Type of the input. - )"); - input.def("get_shape", - &ngraph::Input::get_shape, - R"( - The shape of the input referred to by this input handle. - - Returns - ---------- - get_shape : Shape - Shape of the input. - )"); - input.def("get_partial_shape", - &ngraph::Input::get_partial_shape, - R"( - The partial shape of the input referred to by this input handle. - - Returns - ---------- - get_partial_shape : PartialShape - PartialShape of the input. - )"); - input.def("get_source_output", - &ngraph::Input::get_source_output, - R"( - A handle to the output that is connected to this input. - - Returns - ---------- - get_source_output : Output - Output that is connected to the input. - )"); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/node_input.hpp b/src/bindings/python/src/compatibility/pyngraph/node_input.hpp deleted file mode 100644 index f4eaa0aa7acca0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_input.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Input(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/node_output.cpp b/src/bindings/python/src/compatibility/pyngraph/node_output.cpp deleted file mode 100644 index 569f1bdf6e1ff0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_output.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/node_output.hpp" - -#include - -#include "dict_attribute_visitor.hpp" -#include "pyngraph/node_output.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Output(py::module m) { - py::class_, std::shared_ptr>> output(m, - "Output", - py::dynamic_attr(), - py::module_local()); - output.doc() = "ngraph.impl.Output wraps ngraph::Output"; - - output.def("get_node", - &ngraph::Output::get_node, - R"( - Get node referenced by this output handle. - - Returns - ---------- - get_node : Node - Node object referenced by this output handle. - )"); - output.def("get_index", - &ngraph::Output::get_index, - R"( - The index of the output referred to by this output handle. - - Returns - ---------- - get_index : int - Index value as integer. - )"); - output.def("get_element_type", - &ngraph::Output::get_element_type, - R"( - The element type of the output referred to by this output handle. - - Returns - ---------- - get_element_type : Type - Type of the output. - )"); - output.def("get_shape", - &ngraph::Output::get_shape, - R"( - The shape of the output referred to by this output handle. - - Returns - ---------- - get_shape : Shape - Shape of the output. - )"); - output.def("get_partial_shape", - &ngraph::Output::get_partial_shape, - R"( - The partial shape of the output referred to by this output handle. - - Returns - ---------- - get_partial_shape : PartialShape - PartialShape of the output. - )"); - output.def("get_target_inputs", - &ngraph::Output::get_target_inputs, - R"( - A set containing handles for all inputs targeted by the output - referenced by this output handle. - Returns - ---------- - get_target_inputs : Set[Input] - Set of Inputs. - )"); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/node_output.hpp b/src/bindings/python/src/compatibility/pyngraph/node_output.hpp deleted file mode 100644 index db94e760d58a58..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/node_output.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Output(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/constant.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/constant.cpp deleted file mode 100644 index 5b4fd01ea162d8..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/constant.cpp +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/constant.hpp" - -#include -#include -#include -#include - -#include -#include - -#include "ngraph/shape.hpp" -#include "pyngraph/ops/constant.hpp" - -namespace py = pybind11; - -template -std::vector _get_byte_strides(const ngraph::Shape& s) { - std::vector byte_strides; - std::vector element_strides = ngraph::row_major_strides(s); - for (auto v : element_strides) { - byte_strides.push_back(static_cast(v) * sizeof(T)); - } - return byte_strides; -} - -template -py::buffer_info _get_buffer_info(const ngraph::op::Constant& c) { - ngraph::Shape shape = c.get_shape(); - return py::buffer_info(const_cast(c.get_data_ptr()), /* Pointer to buffer */ - static_cast(c.get_element_type().size()), /* Size of one scalar */ - py::format_descriptor::format(), /* Python struct-style format descriptor */ - static_cast(shape.size()), /* Number of dimensions */ - std::vector{shape.begin(), shape.end()}, /* Buffer dimensions */ - _get_byte_strides(shape) /* Strides (in bytes) for each index */ - ); -} - -template <> -py::buffer_info _get_buffer_info(const ngraph::op::Constant& c) { - ngraph::Shape shape = c.get_shape(); - return py::buffer_info(const_cast(c.get_data_ptr()), /* Pointer to buffer */ - static_cast(c.get_element_type().size()), /* Size of one scalar */ - std::string(1, 'H'), /* Python struct-style format descriptor */ - static_cast(shape.size()), /* Number of dimensions */ - std::vector{shape.begin(), shape.end()}, /* Buffer dimensions */ - _get_byte_strides(shape) /* Strides (in bytes) for each index */ - ); -} - -template -py::array _cast_vector(const ngraph::op::Constant& self) { - auto vec = self.cast_vector(); - return py::array(vec.size(), vec.data()); -} - -void regclass_pyngraph_op_Constant(py::module m) { - py::class_, ngraph::Node> constant( - m, - "Constant", - py::buffer_protocol(), - py::module_local()); - constant.doc() = "ngraph.impl.op.Constant wraps ngraph::op::Constant"; - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - constant.def(py::init&>()); - - constant.def("get_value_strings", &ngraph::op::Constant::get_value_strings); - - constant.def("get_vector", [](const ngraph::op::Constant& self) { - auto element_type = self.get_element_type(); - if (element_type == ngraph::element::boolean) { - return _cast_vector(self); - } else if (element_type == ngraph::element::f16) { - return _cast_vector(self); - } else if (element_type == ngraph::element::f32) { - return _cast_vector(self); - } else if (element_type == ngraph::element::f64) { - return _cast_vector(self); - } else if (element_type == ngraph::element::i8) { - return _cast_vector(self); - } else if (element_type == ngraph::element::i16) { - return _cast_vector(self); - } else if (element_type == ngraph::element::i32) { - return _cast_vector(self); - } else if (element_type == ngraph::element::i64) { - return _cast_vector(self); - } else if (element_type == ngraph::element::u8 || element_type == ngraph::element::u1) { - return _cast_vector(self); - } else if (element_type == ngraph::element::u16) { - return _cast_vector(self); - } else if (element_type == ngraph::element::u32) { - return _cast_vector(self); - } else if (element_type == ngraph::element::u64) { - return _cast_vector(self); - } else { - throw std::runtime_error("Unsupported data type!"); - } - }); - - // Provide buffer access - constant.def_buffer([](const ngraph::op::Constant& self) -> py::buffer_info { - auto element_type = self.get_element_type(); - if (element_type == ngraph::element::boolean) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::f16) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::f32) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::f64) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::i8) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::i16) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::i32) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::i64) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::u8 || element_type == ngraph::element::u1) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::u16) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::u32) { - return _get_buffer_info(self); - } else if (element_type == ngraph::element::u64) { - return _get_buffer_info(self); - } else { - throw std::runtime_error("Unsupported data type!"); - } - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/constant.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/constant.hpp deleted file mode 100644 index e1a4324b778e26..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/constant.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_Constant(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp deleted file mode 100644 index 43f0358ea04199..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/parameter.hpp" - -#include -#include - -#include - -#include "ngraph/node.hpp" -#include "ngraph/partial_shape.hpp" // ngraph::PartialShape -#include "pyngraph/ops/parameter.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_Parameter(py::module m) { - py::class_, ngraph::Node> parameter( - m, - "Parameter", - py::module_local()); - parameter.doc() = "ngraph.impl.op.Parameter wraps ngraph::op::Parameter"; - parameter.def("__repr__", [](const ngraph::Node& self) { - std::string class_name = py::cast(self).get_type().attr("__name__").cast(); - std::string shape = py::cast(self.get_output_partial_shape(0)).attr("__str__")().cast(); - std::string type = self.get_element_type().c_type_string(); - return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shape + ", " + type + ")>"; - }); - - parameter.def(py::init()); - parameter.def(py::init()); - // parameter.def_property_readonly("description", &ngraph::op::Parameter::description); - - parameter.def( - "get_partial_shape", - (const ngraph::PartialShape& (ngraph::op::Parameter::*)() const) & ngraph::op::Parameter::get_partial_shape); - parameter.def("get_partial_shape", - (ngraph::PartialShape & (ngraph::op::Parameter::*)()) & ngraph::op::Parameter::get_partial_shape); - parameter.def("set_partial_shape", &ngraph::op::Parameter::set_partial_shape); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp deleted file mode 100644 index d09e1dbe238753..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_Parameter(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/result.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/result.cpp deleted file mode 100644 index 92b05896754ac0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/result.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/result.hpp" - -#include -#include - -#include - -#include "ngraph/node.hpp" -#include "pyngraph/ops/result.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_Result(py::module m) { - py::class_, ngraph::Node> result(m, - "Result", - py::module_local()); - result.doc() = "ngraph.impl.op.Result wraps ngraph::op::Result"; -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/result.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/result.hpp deleted file mode 100644 index 3a62bcffda4dcb..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/result.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_Result(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp deleted file mode 100644 index f4ea868ed46cd3..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/arithmetic_reduction.hpp" - -#include -#include - -#include "ngraph/op/op.hpp" -#include "pyngraph/ops/util/arithmetic_reduction.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_ArithmeticReduction(py::module m) { - py::class_> - arithmeticReduction(m, "ArithmeticReduction", py::module_local()); - // arithmeticReduction.def(py::init&, - // const ngraph::AxisSet& >()); - arithmeticReduction.def("get_reduction_axes", &ngraph::op::util::ArithmeticReduction::get_reduction_axes); - arithmeticReduction.def("set_reduction_axes", &ngraph::op::util::ArithmeticReduction::set_reduction_axes); - - arithmeticReduction.def_property("reduction_axes", - &ngraph::op::util::ArithmeticReduction::get_reduction_axes, - &ngraph::op::util::ArithmeticReduction::set_reduction_axes); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp deleted file mode 100644 index ff3aa03d2fe27c..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_ArithmeticReduction(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp deleted file mode 100644 index 698afbe72124e1..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/binary_elementwise_arithmetic.hpp" - -#include -#include - -#include "pyngraph/ops/util/binary_elementwise_arithmetic.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseArithmetic(py::module m) { - py::class_> - binaryElementwiseArithmetic(m, "BinaryElementwiseArithmetic", py::module_local()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp deleted file mode 100644 index dbaf2d6adf89b4..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseArithmetic(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp deleted file mode 100644 index b86b3d52b90eb7..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/binary_elementwise_comparison.hpp" - -#include -#include - -#include "pyngraph/ops/util/binary_elementwise_comparison.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseComparison(py::module m) { - py::class_> - binaryElementwiseComparison(m, "BinaryElementwiseComparison", py::module_local()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp deleted file mode 100644 index 2f4043cdff420d..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseComparison(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp deleted file mode 100644 index 8db524492ea39b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/binary_elementwise_logical.hpp" - -#include -#include - -#include "pyngraph/ops/util/binary_elementwise_logical.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseLogical(py::module m) { - py::class_> - binaryElementwiseLogical(m, "BinaryElementwiseLogical", py::module_local()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp deleted file mode 100644 index 4f8ba39f532376..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_BinaryElementwiseLogical(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp deleted file mode 100644 index be9132386a95e7..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/index_reduction.hpp" - -#include -#include - -#include "ngraph/op/op.hpp" -#include "pyngraph/ops/util/index_reduction.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_IndexReduction(py::module m) { - py::class_> indexReduction( - m, - "IndexReduction", - py::module_local()); - - indexReduction.def("get_reduction_axis", &ngraph::op::util::IndexReduction::get_reduction_axis); - indexReduction.def("set_reduction_axis", &ngraph::op::util::IndexReduction::set_reduction_axis); - indexReduction.def("get_index_element_type", &ngraph::op::util::IndexReduction::get_index_element_type); - indexReduction.def("set_index_element_type", &ngraph::op::util::IndexReduction::set_index_element_type); - - indexReduction.def_property("reduction_axis", - &ngraph::op::util::IndexReduction::get_reduction_axis, - &ngraph::op::util::IndexReduction::set_reduction_axis); - indexReduction.def_property("index_element_type", - &ngraph::op::util::IndexReduction::get_index_element_type, - &ngraph::op::util::IndexReduction::set_index_element_type); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp deleted file mode 100644 index 756e839ac610ff..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_IndexReduction(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp deleted file mode 100644 index a3da02357a4048..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/op_annotations.hpp" - -#include -#include - -#include "pyngraph/ops/util/op_annotations.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_OpAnnotations(py::module m) { - py::class_> opAnnotations( - m, - "OpAnnotations", - py::module_local()); - opAnnotations.def(py::init<>()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp deleted file mode 100644 index 699e1531dc5d1c..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_OpAnnotations(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp deleted file mode 100644 index 7a5e5821138099..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/ops/util/regmodule_pyngraph_op_util.hpp" - -#include - -namespace py = pybind11; - -void regmodule_pyngraph_op_util(py::module m) { - py::module m_util = m.def_submodule("util", "module pyngraph.op.util"); - regclass_pyngraph_op_util_OpAnnotations(m_util); - regclass_pyngraph_op_util_ArithmeticReduction(m_util); - regclass_pyngraph_op_util_BinaryElementwiseArithmetic(m_util); - regclass_pyngraph_op_util_BinaryElementwiseComparison(m_util); - regclass_pyngraph_op_util_BinaryElementwiseLogical(m_util); - regclass_pyngraph_op_util_UnaryElementwiseArithmetic(m_util); - regclass_pyngraph_op_util_IndexReduction(m_util); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp deleted file mode 100644 index 57d73c3f8ecbb5..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include "pyngraph/ops/util/arithmetic_reduction.hpp" -#include "pyngraph/ops/util/binary_elementwise_arithmetic.hpp" -#include "pyngraph/ops/util/binary_elementwise_comparison.hpp" -#include "pyngraph/ops/util/binary_elementwise_logical.hpp" -#include "pyngraph/ops/util/index_reduction.hpp" -#include "pyngraph/ops/util/op_annotations.hpp" -#include "pyngraph/ops/util/unary_elementwise_arithmetic.hpp" - -namespace py = pybind11; - -void regmodule_pyngraph_op_util(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp deleted file mode 100644 index 98b524a2dc94ac..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/op/util/unary_elementwise_arithmetic.hpp" - -#include -#include - -#include "pyngraph/ops/util/unary_elementwise_arithmetic.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_op_util_UnaryElementwiseArithmetic(py::module m) { - py::class_> - unaryElementwiseArithmetic(m, "UnaryElementwiseArithmetic", py::module_local()); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp b/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp deleted file mode 100644 index 9744721a0c1b88..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_op_util_UnaryElementwiseArithmetic(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/partial_shape.cpp b/src/bindings/python/src/compatibility/pyngraph/partial_shape.cpp deleted file mode 100644 index 69c333d729bb30..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/partial_shape.cpp +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/partial_shape.hpp" // ngraph::PartialShape - -#include -#include - -#include -#include -#include - -#include "ngraph/shape.hpp" // ngraph::Shape -#include "openvino/core/dimension.hpp" // ov::Dimension -#include "pyngraph/partial_shape.hpp" - -namespace py = pybind11; - -static const char* CAPSULE_NAME = "ngraph_partial_shape"; - -void regclass_pyngraph_PartialShape(py::module m) { - py::class_> shape(m, - "PartialShape", - py::module_local()); - shape.doc() = "ngraph.impl.PartialShape wraps ngraph::PartialShape"; - - shape.def(py::init([](const std::vector& dimensions) { - return ngraph::PartialShape(std::vector(dimensions.begin(), dimensions.end())); - })); - shape.def(py::init&>()); - shape.def(py::init&>()); - shape.def(py::init&>()); - shape.def(py::init&>()); - shape.def(py::init()); - shape.def(py::init()); - - shape.def_static("dynamic", &ngraph::PartialShape::dynamic, py::arg("r") = ov::Dimension()); - - shape.def_property_readonly("is_dynamic", - &ngraph::PartialShape::is_dynamic, - R"( - False if this shape is static, else True. - A shape is considered static if it has static rank, - and all dimensions of the shape are static. - )"); - shape.def_property_readonly("is_static", - &ngraph::PartialShape::is_static, - R"( - True if this shape is static, else False. - A shape is considered static if it has static rank, - and all dimensions of the shape are static. - )"); - shape.def_property_readonly("rank", - &ngraph::PartialShape::rank, - R"( - The rank of the shape. - )"); - shape.def_property_readonly("all_non_negative", - &ngraph::PartialShape::all_non_negative, - R"( - True if all static dimensions of the tensor are - non-negative, else False. - )"); - - shape.def("compatible", - &ngraph::PartialShape::compatible, - py::arg("s"), - R"( - Check whether this shape is compatible with the argument, i.e., - whether it is possible to merge them. - - Parameters - ---------- - s : PartialShape - The shape to be checked for compatibility with this shape. - - - Returns - ---------- - compatible : bool - True if this shape is compatible with s, else False. - )"); - shape.def("refines", - &ngraph::PartialShape::refines, - py::arg("s"), - R"( - Check whether this shape is a refinement of the argument. - - Parameters - ---------- - s : PartialShape - The shape which is being compared against this shape. - - Returns - ---------- - refines : bool - True if this shape refines s, else False. - )"); - shape.def("relaxes", - &ngraph::PartialShape::relaxes, - py::arg("s"), - R"( - Check whether this shape is a relaxation of the argument. - - Parameters - ---------- - s : PartialShape - The shape which is being compared against this shape. - - Returns - ---------- - relaxes : bool - True if this shape relaxes s, else False. - )"); - shape.def("same_scheme", - &ngraph::PartialShape::same_scheme, - py::arg("s"), - R"( - Check whether this shape represents the same scheme as the argument. - - Parameters - ---------- - s : PartialShape - The shape which is being compared against this shape. - - Returns - ---------- - same_scheme : bool - True if shape represents the same scheme as s, else False. - )"); - shape.def("get_max_shape", - &ngraph::PartialShape::get_max_shape, - R"( - Returns - ---------- - get_max_shape : Shape - Get the max bounding shape. - )"); - shape.def("get_min_shape", - &ngraph::PartialShape::get_min_shape, - R"( - Returns - ---------- - get_min_shape : Shape - Get the min bounding shape. - )"); - shape.def("get_shape", - &ngraph::PartialShape::get_shape, - R"( - Returns - ---------- - get_shape : Shape - Get the unique shape. - )"); - shape.def("to_shape", - &ngraph::PartialShape::to_shape, - R"( - Returns - ---------- - to_shapess : Shape - Get the unique shape. - )"); - shape.def( - "get_dimension", - [](const ngraph::PartialShape& self, size_t index) -> ov::Dimension { - return self[index]; - }, - py::arg("index"), - R"( - Get the dimension at specified index of a partial shape. - - Parameters - ---------- - index : int - The index of dimension - - Returns - ---------- - get_dimension : Dimension - Get the particular dimension of a partial shape. - )"); - - shape.def( - "__eq__", - [](const ngraph::PartialShape& a, const ngraph::PartialShape& b) { - return a == b; - }, - py::is_operator()); - shape.def( - "__eq__", - [](const ngraph::PartialShape& a, const ngraph::Shape& b) { - return a == b; - }, - py::is_operator()); - - shape.def("__str__", [](const ngraph::PartialShape& self) -> std::string { - std::stringstream ss; - ss << self; - return ss.str(); - }); - - shape.def("__repr__", [](const ngraph::PartialShape& self) -> std::string { - return "() + ">"; - }); - - shape.def_static("from_capsule", [](py::object* capsule) { - // get the underlying PyObject* which is a PyCapsule pointer - auto* pybind_capsule_ptr = capsule->ptr(); - // extract the pointer stored in the PyCapsule under the name CAPSULE_NAME - auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME); - - auto* ngraph_pShape = static_cast*>(capsule_ptr); - if (ngraph_pShape && *ngraph_pShape) { - return *ngraph_pShape; - } else { - throw std::runtime_error("The provided capsule does not contain an ngraph::PartialShape"); - } - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/partial_shape.hpp b/src/bindings/python/src/compatibility/pyngraph/partial_shape.hpp deleted file mode 100644 index 9553b8e5aa8e2f..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/partial_shape.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_PartialShape(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp b/src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp deleted file mode 100644 index bd05454cddea1c..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "ngraph/pass/constant_folding.hpp" -#include "ngraph/pass/manager.hpp" -#include "ngraph/pass/pass.hpp" -#include "ngraph/pass/validate.hpp" -#include "pyngraph/passes/manager.hpp" - -namespace py = pybind11; - -namespace { -class ManagerWrapper : public ngraph::pass::Manager { -public: - ManagerWrapper() {} - ~ManagerWrapper() {} - void register_pass(std::string pass_name) { - if (pass_name == "ConstantFolding") - push_pass(); - - if (m_per_pass_validation) { - push_pass(); - } - return; - } -}; -} // namespace - -void regclass_pyngraph_passes_Manager(py::module m) { - py::class_ manager(m, "Manager", py::module_local()); - manager.doc() = "ngraph.impl.passes.Manager wraps ngraph::pass::Manager using ManagerWrapper"; - - manager.def(py::init<>()); - - manager.def("set_per_pass_validation", &ManagerWrapper::set_per_pass_validation); - manager.def("run_passes", &ManagerWrapper::run_passes); - manager.def("register_pass", (void (ManagerWrapper::*)(std::string)) & ManagerWrapper::register_pass); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/manager.hpp b/src/bindings/python/src/compatibility/pyngraph/passes/manager.hpp deleted file mode 100644 index 2134cb949e651b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/passes/manager.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_passes_Manager(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp b/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp deleted file mode 100644 index 7ad10bed80f6a0..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/passes/regmodule_pyngraph_passes.hpp" - -#include - -namespace py = pybind11; - -void regmodule_pyngraph_passes(py::module m) { - py::module m_passes = m.def_submodule("passes", "Package ngraph.impl.passes wraps ngraph::passes"); - regclass_pyngraph_passes_Manager(m_passes); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp b/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp deleted file mode 100644 index 194fb9182f5c37..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include "pyngraph/passes/manager.hpp" - -namespace py = pybind11; - -void regmodule_pyngraph_passes(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/pyngraph.cpp b/src/bindings/python/src/compatibility/pyngraph/pyngraph.cpp deleted file mode 100644 index eefa6dc378e199..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/pyngraph.cpp +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "pyngraph/axis_set.hpp" -#include "pyngraph/axis_vector.hpp" -#include "pyngraph/coordinate.hpp" -#include "pyngraph/coordinate_diff.hpp" -#include "pyngraph/dimension.hpp" -#include "pyngraph/discrete_type_info.hpp" -#include "pyngraph/function.hpp" -#include "pyngraph/node.hpp" -#include "pyngraph/node_factory.hpp" -#include "pyngraph/node_input.hpp" -#include "pyngraph/node_output.hpp" -#include "pyngraph/ops/constant.hpp" -#include "pyngraph/ops/parameter.hpp" -#include "pyngraph/ops/result.hpp" -#include "pyngraph/ops/util/regmodule_pyngraph_op_util.hpp" -#include "pyngraph/partial_shape.hpp" -#include "pyngraph/passes/regmodule_pyngraph_passes.hpp" -#include "pyngraph/rt_map.hpp" -#include "pyngraph/shape.hpp" -#include "pyngraph/strides.hpp" -#include "pyngraph/types/regmodule_pyngraph_types.hpp" -#include "pyngraph/util.hpp" -#include "pyngraph/variant.hpp" - -namespace py = pybind11; - -PYBIND11_MODULE(_pyngraph, m) { - m.doc() = "Package ngraph.impl that wraps nGraph's namespace ngraph"; - regclass_pyngraph_PyRTMap(m); - regmodule_pyngraph_types(m); - regclass_pyngraph_Dimension(m); // Dimension must be registered before PartialShape - regclass_pyngraph_Shape(m); - regclass_pyngraph_PartialShape(m); - regclass_pyngraph_Node(m); - regclass_pyngraph_Input(m); - regclass_pyngraph_Output(m); - regclass_pyngraph_NodeFactory(m); - regclass_pyngraph_Strides(m); - regclass_pyngraph_CoordinateDiff(m); - regclass_pyngraph_DiscreteTypeInfo(m); - regclass_pyngraph_AxisSet(m); - regclass_pyngraph_AxisVector(m); - regclass_pyngraph_Coordinate(m); - py::module m_op = m.def_submodule("op", "Package ngraph.impl.op that wraps ngraph::op"); - regclass_pyngraph_op_Constant(m_op); - regclass_pyngraph_op_Parameter(m_op); - regclass_pyngraph_op_Result(m_op); - regmodule_pyngraph_op_util(m_op); - regclass_pyngraph_Function(m); - regmodule_pyngraph_passes(m); - regmodule_pyngraph_util(m); - regclass_pyngraph_Variant(m); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/rt_map.cpp b/src/bindings/python/src/compatibility/pyngraph/rt_map.cpp deleted file mode 100644 index 779cd66971fb81..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/rt_map.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/rt_map.hpp" - -#include -#include -#include - -#include "dict_attribute_visitor.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/divide.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/subtract.hpp" -#include "pyngraph/node.hpp" -#include "pyngraph/variant.hpp" - -namespace py = pybind11; - -using PyRTMap = ov::RTMap; - -PYBIND11_MAKE_OPAQUE(PyRTMap); - -void regclass_pyngraph_PyRTMap(py::module m) { - auto py_map = py::bind_map(m, "PyRTMap", py::module_local()); - py_map.doc() = "ngraph.impl.PyRTMap makes bindings for std::map>, which can later be used as ngraph::Node::RTMap"; - - py_map.def("__setitem__", [](PyRTMap& m, const std::string& k, const std::string v) { - m[k] = v; - }); - py_map.def("__setitem__", [](PyRTMap& m, const std::string& k, const int64_t v) { - m[k] = v; - }); - py_map.def("__getitem__", [](PyRTMap& m, const std::string& k) { - return m.at(k).as>(); - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/rt_map.hpp b/src/bindings/python/src/compatibility/pyngraph/rt_map.hpp deleted file mode 100644 index 1b40dfc8cee640..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/rt_map.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_PyRTMap(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/shape.cpp b/src/bindings/python/src/compatibility/pyngraph/shape.cpp deleted file mode 100644 index f43dda508a2e2b..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/shape.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/shape.hpp" // ngraph::Shape - -#include -#include - -#include -#include -#include - -#include "pyngraph/shape.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Shape(py::module m) { - py::class_> shape(m, "Shape", py::module_local()); - shape.doc() = "ngraph.impl.Shape wraps ngraph::Shape"; - shape.def(py::init&>(), py::arg("axis_lengths")); - shape.def(py::init&>(), py::arg("axis_lengths")); - shape.def(py::init(), py::arg("axis_lengths")); - shape.def("__len__", [](const ngraph::Shape& v) { - return v.size(); - }); - shape.def("__getitem__", [](const ngraph::Shape& v, int key) { - return v[key]; - }); - - shape.def( - "__iter__", - [](ngraph::Shape& v) { - return py::make_iterator(v.begin(), v.end()); - }, - py::keep_alive<0, 1>()); /* Keep vector alive while iterator is used */ - - shape.def("__str__", [](const ngraph::Shape& self) -> std::string { - std::stringstream ss; - ss << self; - return ss.str(); - }); - - shape.def("__repr__", [](const ngraph::Shape& self) -> std::string { - return "() + ">"; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/shape.hpp b/src/bindings/python/src/compatibility/pyngraph/shape.hpp deleted file mode 100644 index 1f0cd8b369997c..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/shape.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Shape(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/strides.cpp b/src/bindings/python/src/compatibility/pyngraph/strides.cpp deleted file mode 100644 index b740336e2288f5..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/strides.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/strides.hpp" // ngraph::Strides - -#include -#include - -#include -#include -#include - -#include "pyngraph/strides.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Strides(py::module m) { - py::class_> strides(m, "Strides", py::module_local()); - strides.doc() = "ngraph.impl.Strides wraps ngraph::Strides"; - strides.def(py::init&>(), py::arg("axis_strides")); - strides.def(py::init&>(), py::arg("axis_strides")); - strides.def(py::init(), py::arg("axis_strides")); - - strides.def("__str__", [](const ngraph::Strides& self) -> std::string { - std::stringstream stringstream; - std::copy(self.begin(), self.end(), std::ostream_iterator(stringstream, ", ")); - std::string string = stringstream.str(); - return string.substr(0, string.size() - 2); - }); - - strides.def("__repr__", [](const ngraph::Strides& self) -> std::string { - std::string class_name = py::cast(self).get_type().attr("__name__").cast(); - std::string shape_str = py::cast(self).attr("__str__")().cast(); - return "<" + class_name + ": (" + shape_str + ")>"; - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/strides.hpp b/src/bindings/python/src/compatibility/pyngraph/strides.hpp deleted file mode 100644 index 619f20b77562e8..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/strides.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Strides(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/types/element_type.cpp b/src/bindings/python/src/compatibility/pyngraph/types/element_type.cpp deleted file mode 100644 index 070a89acd7876a..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/types/element_type.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/type/element_type.hpp" - -#include -#include - -#include "ngraph/op/parameter.hpp" -#include "pyngraph/types/element_type.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Type(py::module m) { - py::class_> type(m, "Type", py::module_local()); - type.doc() = "ngraph.impl.Type wraps ngraph::element::Type"; - type.attr("boolean") = ngraph::element::boolean; - type.attr("f16") = ngraph::element::f16; - type.attr("f32") = ngraph::element::f32; - type.attr("f64") = ngraph::element::f64; - type.attr("i8") = ngraph::element::i8; - type.attr("i16") = ngraph::element::i16; - type.attr("i32") = ngraph::element::i32; - type.attr("i64") = ngraph::element::i64; - type.attr("u1") = ngraph::element::u1; - type.attr("u8") = ngraph::element::u8; - type.attr("u16") = ngraph::element::u16; - type.attr("u32") = ngraph::element::u32; - type.attr("u64") = ngraph::element::u64; - type.attr("bf16") = ngraph::element::bf16; - - type.def("__repr__", [](const ngraph::element::Type& self) { - std::string bitwidth = std::to_string(self.bitwidth()); - if (self.is_signed()) { - return ""; - } - return ""; - }); - - type.def( - "__eq__", - [](const ngraph::element::Type& a, const ngraph::element::Type& b) { - return a == b; - }, - py::is_operator()); - - type.def_property_readonly("bitwidth", &ngraph::element::Type::bitwidth); - type.def_property_readonly("is_real", &ngraph::element::Type::is_real); - type.def("get_type_name", &ngraph::element::Type::get_type_name); - type.def("to_string", &ngraph::element::Type::to_string); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/types/element_type.hpp b/src/bindings/python/src/compatibility/pyngraph/types/element_type.hpp deleted file mode 100644 index 4a345dd6357ee4..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/types/element_type.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_pyngraph_Type(py::module m); -void regclass_pyngraph_Bool(py::module m); -void regclass_pyngraph_Float32(py::module m); -void regclass_pyngraph_Float64(py::module m); -void regclass_pyngraph_Int8(py::module m); -// void regclass_pyngraph_Int16(py::module m); -void regclass_pyngraph_Int32(py::module m); -void regclass_pyngraph_Int64(py::module m); -void regclass_pyngraph_UInt8(py::module m); -// void regclass_pyngraph_UInt16(py::module m); -void regclass_pyngraph_UInt32(py::module m); -void regclass_pyngraph_UInt64(py::module m); -void regclass_pyngraph_BFloat16(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp b/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp deleted file mode 100644 index 188107323fef55..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/types/regmodule_pyngraph_types.hpp" - -#include - -namespace py = pybind11; - -void regmodule_pyngraph_types(py::module m) { - regclass_pyngraph_Type(m); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp b/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp deleted file mode 100644 index 36c0b6aae59798..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "pyngraph/types/element_type.hpp" - -namespace py = pybind11; - -void regmodule_pyngraph_types(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/util.cpp b/src/bindings/python/src/compatibility/pyngraph/util.cpp deleted file mode 100644 index 042b0eea8ec6a5..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/util.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyngraph/util.hpp" - -#include - -#include - -#include "ngraph/op/result.hpp" -#include "ngraph/validation_util.hpp" -#include "ngraph/version.hpp" -#include "openvino/runtime/core.hpp" - -namespace py = pybind11; - -inline void* numpy_to_c(py::array a) { - py::buffer_info info = a.request(); - return info.ptr; -} - -void regmodule_pyngraph_util(py::module m) { - py::module mod = m.def_submodule("util", "ngraph.impl.util"); - mod.def("numpy_to_c", &numpy_to_c); - mod.def("get_constant_from_source", - &ngraph::get_constant_from_source, - py::arg("output"), - R"( - Runs an estimation of source tensor. - Parameters - ---------- - output : Output - output node - Returns - ---------- - get_constant_from_source : Constant or None - If it succeeded to calculate both bounds and - they are the same returns Constant operation - from the resulting bound, otherwise Null. - )"); - - mod.def("get_ngraph_version_string", []() -> std::string { - NGRAPH_SUPPRESS_DEPRECATED_START - return get_ngraph_version_string(); - NGRAPH_SUPPRESS_DEPRECATED_END - }); - - mod.def("get_ie_output_name", [](const ngraph::Output& output) { - return ov::op::util::get_ie_output_name(output); - }); - - mod.def("shutdown", - &ov::shutdown, - R"( - Shut down the OpenVINO by deleting all static-duration objects allocated by the library and releasing - dependent resources - - This function should be used by advanced user to control unload the resources. - - You might want to use this function if you are developing a dynamically-loaded library which should clean up all - resources after itself when the library is unloaded. - )"); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/util.hpp b/src/bindings/python/src/compatibility/pyngraph/util.hpp deleted file mode 100644 index 1b1b8978fdfe67..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/util.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regmodule_pyngraph_util(py::module m); diff --git a/src/bindings/python/src/compatibility/pyngraph/variant.cpp b/src/bindings/python/src/compatibility/pyngraph/variant.cpp deleted file mode 100644 index 5682b46123931e..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/variant.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "pyngraph/variant.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Variant(py::module m) { - py::class_ variant_base(m, "Variant", py::module_local()); - variant_base.doc() = "ngraph.impl.Variant wraps ngraph::Variant"; - - variant_base.def( - "__eq__", - [](const ov::Any& a, const ov::Any& b) { - return a == b; - }, - py::is_operator()); - variant_base.def( - "__eq__", - [](const ov::Any& a, const std::string& b) { - return a.as() == b; - }, - py::is_operator()); - variant_base.def( - "__eq__", - [](const ov::Any& a, const int64_t& b) { - return a.as() == b; - }, - py::is_operator()); - - variant_base.def("__repr__", [](const ov::Any self) { - std::stringstream ret; - self.print(ret); - return ret.str(); - }); -} diff --git a/src/bindings/python/src/compatibility/pyngraph/variant.hpp b/src/bindings/python/src/compatibility/pyngraph/variant.hpp deleted file mode 100644 index d9a1307db9edb3..00000000000000 --- a/src/bindings/python/src/compatibility/pyngraph/variant.hpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include -#include - -#include "openvino/core/any.hpp" - -namespace py = pybind11; - -void regclass_pyngraph_Variant(py::module m); From 729da79c3014557cfae7748968b8082bd9f3c3a0 Mon Sep 17 00:00:00 2001 From: River Li Date: Thu, 11 Jan 2024 23:29:47 +0800 Subject: [PATCH 02/43] [Core] implement BGR2RGB for postprocess (#22022) * [Core] implement BGR2RGB for postprocess * Fix clang format issue * Add missing file * Solve failure issue * Minor update * Add test cases for GPU and TEMPLATE plugin * Fix GPU test case failure issue Failed: [GPU] Output tensor with name Abs_60 is not found * Update for code reviewing * Fix test failure issue of exception type --- .../core/preprocess/output_model_info.hpp | 11 +++ .../core/preprocess/postprocess_steps.hpp | 8 ++ src/core/src/preprocess/pre_post_process.cpp | 11 +++ src/core/src/preprocess/preprocess_impls.cpp | 4 + src/core/src/preprocess/preprocess_impls.hpp | 21 ++++- .../src/preprocess/preprocess_steps_impl.cpp | 71 ++++++++++++++++ .../src/preprocess/preprocess_steps_impl.hpp | 22 +++-- src/core/tests/preprocess.cpp | 82 +++++++++++++++++++ .../subgraph_tests/preprocess.cpp | 7 ++ .../subgraph_tests/preprocess.cpp | 7 ++ .../subgraph_reference/preprocess.cpp | 34 ++++++++ .../subgraph/preprocess.hpp | 15 ++++ .../src/subgraph/postprocess.cpp | 49 +++++++++++ .../preprocess/preprocess_builders.hpp | 31 +++++++ 14 files changed, 363 insertions(+), 10 deletions(-) create mode 100644 src/tests/functional/shared_test_classes/src/subgraph/postprocess.cpp diff --git a/src/core/include/openvino/core/preprocess/output_model_info.hpp b/src/core/include/openvino/core/preprocess/output_model_info.hpp index 752985d724a64b..e5b96ee5134bb2 100644 --- a/src/core/include/openvino/core/preprocess/output_model_info.hpp +++ b/src/core/include/openvino/core/preprocess/output_model_info.hpp @@ -6,6 +6,7 @@ #include "openvino/core/core_visibility.hpp" #include "openvino/core/layout.hpp" +#include "openvino/core/preprocess/color_format.hpp" namespace ov { namespace preprocess { @@ -42,6 +43,16 @@ class OPENVINO_API OutputModelInfo final { /// /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner OutputModelInfo& set_layout(const ov::Layout& layout); + + /// \brief Set color format for model's output tensor + /// + /// \param format Color format for model's output tensor. + /// + /// \param sub_names Optional list of sub-names, not used, placeholder for future. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + OutputModelInfo& set_color_format(const ov::preprocess::ColorFormat& format, + const std::vector& sub_names = {}); }; } // namespace preprocess diff --git a/src/core/include/openvino/core/preprocess/postprocess_steps.hpp b/src/core/include/openvino/core/preprocess/postprocess_steps.hpp index ebb3e03ab17f83..d9e0cf75c74f47 100644 --- a/src/core/include/openvino/core/preprocess/postprocess_steps.hpp +++ b/src/core/include/openvino/core/preprocess/postprocess_steps.hpp @@ -93,6 +93,14 @@ class OPENVINO_API PostProcessSteps final { /// /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner PostProcessSteps& custom(const CustomPostprocessOp& postprocess_cb); + + /// \brief Converts color format for user's output tensor. Requires destinantion color format to be specified by + /// OutputTensorInfo::set_color_format. + /// + /// \param dst_format Destination color format of input image + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + PostProcessSteps& convert_color(const ov::preprocess::ColorFormat& dst_format); }; } // namespace preprocess diff --git a/src/core/src/preprocess/pre_post_process.cpp b/src/core/src/preprocess/pre_post_process.cpp index c219a77d91a780..0350437299a3c1 100644 --- a/src/core/src/preprocess/pre_post_process.cpp +++ b/src/core/src/preprocess/pre_post_process.cpp @@ -361,6 +361,12 @@ OutputModelInfo& OutputModelInfo::set_layout(const Layout& layout) { return *this; } +OutputModelInfo& OutputModelInfo::set_color_format(const ov::preprocess::ColorFormat& format, + const std::vector& sub_names) { + m_impl->set_color_format(format); + return *this; +} + // --------------------- PostProcessSteps ------------------ PostProcessSteps::PostProcessSteps() : m_impl(std::unique_ptr(new PostProcessStepsImpl())) {} @@ -381,6 +387,11 @@ PostProcessSteps& PostProcessSteps::convert_layout(const std::vector& return *this; } +PostProcessSteps& PostProcessSteps::convert_color(const ov::preprocess::ColorFormat& dst_format) { + m_impl->add_convert_color_impl(dst_format); + return *this; +} + PostProcessSteps& PostProcessSteps::custom(const CustomPostprocessOp& postprocess_cb) { // 'true' indicates that custom postprocessing step will trigger validate_and_infer_types m_impl->actions().emplace_back( diff --git a/src/core/src/preprocess/preprocess_impls.cpp b/src/core/src/preprocess/preprocess_impls.cpp index 57a02fae606220..1888a4c62fb738 100644 --- a/src/core/src/preprocess/preprocess_impls.cpp +++ b/src/core/src/preprocess/preprocess_impls.cpp @@ -338,6 +338,10 @@ void OutputInfo::OutputInfoImpl::build(ov::ResultVector& results) { if (get_tensor_data()->is_element_type_set()) { context.target_element_type() = get_tensor_data()->get_element_type(); } + if (get_model_data()->is_color_format_set()) { + context.color_format() = get_model_data()->get_color_format(); + } + // Apply post-processing node = result->get_input_source_output(0); bool post_processing_applied = false; diff --git a/src/core/src/preprocess/preprocess_impls.hpp b/src/core/src/preprocess/preprocess_impls.hpp index 64ebfd736f4ff3..4d72a826a286e3 100644 --- a/src/core/src/preprocess/preprocess_impls.hpp +++ b/src/core/src/preprocess/preprocess_impls.hpp @@ -37,7 +37,26 @@ class ModelInfoImpl { class InputModelInfo::InputModelInfoImpl : public ModelInfoImpl {}; -class OutputModelInfo::OutputModelInfoImpl : public ModelInfoImpl {}; +class OutputModelInfo::OutputModelInfoImpl : public ModelInfoImpl { +public: + void set_color_format(const ColorFormat& color_format, const std::vector& sub_names = {}) { + m_color_format_set = (color_format == ColorFormat::RGB) || (color_format == ColorFormat::BGR); + OPENVINO_ASSERT(m_color_format_set); + m_color_format = color_format; + m_planes_sub_names = sub_names; + } + bool is_color_format_set() const { + return m_color_format_set; + } + const ColorFormat& get_color_format() const { + return m_color_format; + } + +private: + ColorFormat m_color_format = ColorFormat::UNDEFINED; + std::vector m_planes_sub_names{}; + bool m_color_format_set = false; +}; /// \brief OutputInfoImpl - internal data structure struct OutputInfo::OutputInfoImpl { diff --git a/src/core/src/preprocess/preprocess_steps_impl.cpp b/src/core/src/preprocess/preprocess_steps_impl.cpp index e155f89f5ac88d..fadf5330c80fc1 100644 --- a/src/core/src/preprocess/preprocess_steps_impl.cpp +++ b/src/core/src/preprocess/preprocess_steps_impl.cpp @@ -695,5 +695,76 @@ void PostStepsList::add_convert_layout_impl(const std::vector& dims) { }, "convert layout " + vector_to_string(dims)); } + +void PostStepsList::add_convert_color_impl(const ColorFormat& dst_format) { + m_actions.emplace_back( + [dst_format](const Output& node, PostprocessingContext& context) { + if (context.color_format() == dst_format) { + return std::make_tuple(node, false); + } else if ((context.color_format() == ColorFormat::RGB || context.color_format() == ColorFormat::BGR) && + (dst_format == ColorFormat::RGB || dst_format == ColorFormat::BGR)) { + auto res = reverse_channels({node}, context); + context.color_format() = dst_format; + return res; + } else { + OPENVINO_THROW("Source color format '", + color_format_name(context.color_format()), + "' is not convertible to '", + color_format_name(dst_format), + "'"); + } + }, + "convert color (" + color_format_name(dst_format) + ")"); +} + +std::tuple, bool> PostStepsList::reverse_channels(const Output& node, + PostprocessingContext& context) { + OPENVINO_ASSERT(ov::layout::has_channels(context.layout()), + "Layout ", + context.layout().to_string(), + " doesn't have `channels` dimension"); + const auto& shape = node.get_partial_shape(); + if (shape.rank().is_static()) { + // This block of code is to preserve output shape if it contains dynamic dimensions + // Otherwise, dynamic version will transform shape {?,3,?,?} to {?,?,?,?} which is still ok but not desired + auto channels_idx = get_and_check_channels_idx(context.layout(), shape); + if (shape[channels_idx].is_static()) { + auto channels_count = shape[channels_idx].get_length(); + // Add range from constants + auto range_from = op::v0::Constant::create(element::i64, {}, {channels_count - 1}); + auto range_to = op::v0::Constant::create(element::i64, {}, {-1}); + auto range_step = op::v0::Constant::create(element::i64, {}, {-1}); + auto range = std::make_shared(range_from, range_to, range_step, element::i32); + + auto constant_axis = op::v0::Constant::create(element::i32, {1}, {channels_idx}); + auto convert = std::make_shared(node, range, constant_axis); + return std::make_tuple(convert, false); + } + } + + auto channels_idx = ov::layout::channels_idx(context.layout()); + // Get shape of user's input tensor (e.g. Tensor[1, 3, 224, 224] -> {1, 3, 224, 224}) + auto shape_of = std::make_shared(node); // E.g. {1, 3, 224, 224} + + auto constant_chan_idx = op::v0::Constant::create(element::i32, {}, {channels_idx}); // E.g. 1 + auto constant_chan_axis = op::v0::Constant::create(element::i32, {}, {0}); + // Gather will return scalar with number of channels (e.g. 3) + auto gather_channels_num = std::make_shared(shape_of, constant_chan_idx, constant_chan_axis); + + // Create Range from channels_num-1 to 0 (e.g. {2, 1, 0}) + auto const_minus1 = op::v0::Constant::create(element::i64, {}, {-1}); + auto channels_num_minus1 = std::make_shared(gather_channels_num, const_minus1); // E.g. 3-1=2 + // Add range + auto range_to = op::v0::Constant::create(element::i64, {}, {-1}); + auto range_step = op::v0::Constant::create(element::i64, {}, {-1}); + // E.g. {2, 1, 0} + auto range = std::make_shared(channels_num_minus1, range_to, range_step, element::i32); + + // Gather slices in reverse order (indexes are specified by 'range' operation) + auto constant_axis = op::v0::Constant::create(element::i32, {1}, {channels_idx}); + auto gather = std::make_shared(node, range, constant_axis); + return std::make_tuple(gather, false); +} + } // namespace preprocess } // namespace ov diff --git a/src/core/src/preprocess/preprocess_steps_impl.hpp b/src/core/src/preprocess/preprocess_steps_impl.hpp index 16b5a43c6ff384..79d6e0ef297888 100644 --- a/src/core/src/preprocess/preprocess_steps_impl.hpp +++ b/src/core/src/preprocess/preprocess_steps_impl.hpp @@ -92,10 +92,19 @@ class PrePostProcessingContextBase { return m_target_element_type; } + const ColorFormat& color_format() const { + return m_color_format; + } + + ColorFormat& color_format() { + return m_color_format; + } + protected: Layout m_layout; Layout m_target_layout; element::Type m_target_element_type; + ColorFormat m_color_format = ColorFormat::UNDEFINED; }; /// \brief Preprocessing context passed to each preprocessing operation. @@ -126,18 +135,9 @@ class PreprocessingContext : public PrePostProcessingContextBase { return model_shape()[model_width_idx].get_length(); } - const ColorFormat& color_format() const { - return m_color_format; - } - - ColorFormat& color_format() { - return m_color_format; - } - private: PartialShape m_model_shape; Layout m_model_layout; - ColorFormat m_color_format = ColorFormat::UNDEFINED; }; using InternalPreprocessOp = @@ -219,6 +219,7 @@ class PostStepsList { void add_convert_impl(const element::Type& type); void add_convert_layout_impl(const Layout& layout); void add_convert_layout_impl(const std::vector& dims); + void add_convert_color_impl(const ColorFormat& dst_format); const std::list& actions() const { return m_actions; @@ -227,6 +228,9 @@ class PostStepsList { return m_actions; } +private: + static std::tuple, bool> reverse_channels(const Output& nodes, PostprocessingContext& context); + private: std::list m_actions; }; diff --git a/src/core/tests/preprocess.cpp b/src/core/tests/preprocess.cpp index 0ce5d3a8f3590e..8824abdc1b580e 100644 --- a/src/core/tests/preprocess.cpp +++ b/src/core/tests/preprocess.cpp @@ -1782,6 +1782,88 @@ TEST(pre_post_process, postprocess_keep_friendly_names_compatibility_implicit) { EXPECT_NE(node_before_result_old->get_friendly_name(), node_name); } +// --- PostProcess - convert color format --- +TEST(pre_post_process, postprocess_convert_color_format_BGR_RGB) { + auto f = create_simple_function(element::f32, Shape{5, 30, 20, 3}); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::BGR); + p.output().postprocess().convert_color(ColorFormat::RGB); + f = p.build(); + + EXPECT_EQ(f->get_results().size(), 1); + EXPECT_EQ(f->get_result()->get_output_partial_shape(0), (PartialShape{5, 30, 20, 3})); +} + +TEST(pre_post_process, postprocess_convert_color_format_RGB_BGR) { + auto f = create_simple_function(element::f32, Shape{5, 30, 20, 3}); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::RGB); + p.output().postprocess().convert_color(ColorFormat::BGR); + f = p.build(); + + EXPECT_EQ(f->get_results().size(), 1); + EXPECT_EQ(f->get_result()->get_output_partial_shape(0), (PartialShape{5, 30, 20, 3})); +} + +TEST(pre_post_process, postprocess_convert_color_format_RGB_BGR_dynamic_batch) { + auto f = create_simple_function(element::f32, PartialShape{-1, 30, 20, 3}); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::RGB); + p.output().postprocess().convert_color(ColorFormat::BGR); + f = p.build(); + + EXPECT_EQ(f->get_results().size(), 1); + EXPECT_EQ(f->get_result()->get_output_partial_shape(0), (PartialShape{-1, 30, 20, 3})); +} + +TEST(pre_post_process, postprocess_convert_color_format_RGB_BGR_dynamic_shape) { + auto f = create_simple_function(element::f32, PartialShape{-1, -1, 20, 3}); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::RGB); + p.output().postprocess().convert_color(ColorFormat::BGR); + f = p.build(); + + EXPECT_EQ(f->get_results().size(), 1); + EXPECT_EQ(f->get_result()->get_output_partial_shape(0), (PartialShape{-1, -1, 20, 3})); +} + +TEST(pre_post_process, postprocess_convert_color_format_RGB_RGB) { + auto f = create_simple_function(element::f32, Shape{5, 30, 20, 3}); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::RGB); + p.output().postprocess().convert_color(ColorFormat::RGB); + f = p.build(); + + EXPECT_EQ(f->get_results().size(), 1); + EXPECT_EQ(f->get_result()->get_output_partial_shape(0), (PartialShape{5, 30, 20, 3})); +} + +TEST(pre_post_process, postprocess_convert_color_format_BGR_BGR) { + auto f = create_simple_function(element::f32, Shape{5, 30, 20, 3}); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::BGR); + p.output().postprocess().convert_color(ColorFormat::BGR); + f = p.build(); + + EXPECT_EQ(f->get_results().size(), 1); + EXPECT_EQ(f->get_result()->get_output_partial_shape(0), (PartialShape{5, 30, 20, 3})); +} + +TEST(pre_post_process, postprocess_convert_color_format_unsupported) { + auto f = create_simple_function(element::f32, Shape{5, 30, 20, 3}); + + EXPECT_THROW(auto p = PrePostProcessor(f); p.output().model().set_layout("NHWC").set_color_format(ColorFormat::RGB); + p.output().postprocess().convert_color(ColorFormat::GRAY); + f = p.build(), ov::Exception); + + EXPECT_THROW(auto p = PrePostProcessor(f); p.output().model().set_layout("NHWC").set_color_format(ColorFormat::RGB); + p.output().postprocess().convert_color(ColorFormat::UNDEFINED); + f = p.build(), ov::Exception); + EXPECT_THROW(auto p = PrePostProcessor(f); p.output().model().set_color_format(ColorFormat::UNDEFINED); + p.output().postprocess().convert_color(ColorFormat::BGR); + f = p.build(), ov::AssertFailure); +} + // Postprocessing - other TEST(pre_post_process, postprocess_preserve_rt_info) { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp index b8379e24fbf093..7a2b8c1ef62001 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp @@ -14,3 +14,10 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Combine(::testing::ValuesIn(ov::builder::preprocess::generic_preprocess_functions()), ::testing::Values(ov::test::utils::DEVICE_CPU)), PrePostProcessTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_PostProcess, + PostProcessTest, + ::testing::Combine(::testing::ValuesIn(ov::builder::preprocess::generic_postprocess_functions()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + PostProcessTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp index 32ecadf49663b4..0d28022a7a915e 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/preprocess.cpp @@ -49,4 +49,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_PrePostProcess_GPU, ::testing::Values(ov::test::utils::DEVICE_GPU)), PrePostProcessTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_PostProcess, + PostProcessTest, + ::testing::Combine(::testing::ValuesIn(ov::builder::preprocess::generic_postprocess_functions()), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + PostProcessTest::getTestCaseName); + } // namespace diff --git a/src/plugins/template/tests/functional/subgraph_reference/preprocess.cpp b/src/plugins/template/tests/functional/subgraph_reference/preprocess.cpp index 865d8c3d92b2f9..306885bde3e519 100644 --- a/src/plugins/template/tests/functional/subgraph_reference/preprocess.cpp +++ b/src/plugins/template/tests/functional/subgraph_reference/preprocess.cpp @@ -1159,6 +1159,38 @@ static RefPreprocessParams post_convert_layout_by_dims_multi() { return res; } +static RefPreprocessParams post_convert_color_rgb_to_bgr() { + RefPreprocessParams res("post_convert_color_rgb_to_bgr"); + res.function = []() { + auto f = create_simple_function(element::f32, Shape{2, 1, 1, 3}); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::RGB); + p.output().postprocess().convert_color(ColorFormat::BGR); + p.build(); + return f; + }; + + res.inputs.emplace_back(Shape{2, 3, 1, 1}, element::f32, std::vector{1, 2, 3, 4, 5, 6}); + res.expected.emplace_back(Shape{2, 3, 1, 1}, element::f32, std::vector{3, 2, 1, 6, 5, 4}); + return res; +} + +static RefPreprocessParams post_convert_color_bgr_to_rgb() { + RefPreprocessParams res("post_convert_color_bgr_to_rgb"); + res.function = []() { + auto f = create_simple_function(element::f32, Shape{2, 1, 1, 3}); + auto p = PrePostProcessor(f); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::BGR); + p.output().postprocess().convert_color(ColorFormat::RGB); + p.build(); + return f; + }; + + res.inputs.emplace_back(Shape{2, 3, 1, 1}, element::f32, std::vector{1, 2, 3, 4, 5, 6}); + res.expected.emplace_back(Shape{2, 3, 1, 1}, element::f32, std::vector{3, 2, 1, 6, 5, 4}); + return res; +} + static RefPreprocessParams pre_and_post_processing() { RefPreprocessParams res("pre_and_post_processing"); res.function = []() { @@ -1382,6 +1414,8 @@ std::vector allPreprocessTests() { postprocess_2_inputs_basic(), post_convert_layout_by_dims(), post_convert_layout_by_dims_multi(), + post_convert_color_rgb_to_bgr(), + post_convert_color_bgr_to_rgb(), pre_and_post_processing(), rgb_to_bgr(), bgr_to_rgb(), diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp index 59a242990c5e2c..dfb218ad1a89b2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/preprocess.hpp @@ -29,6 +29,21 @@ class PrePostProcessTest : public testing::WithParamInterface; // Device name + +class PostProcessTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; + +public: + std::string func_name; +}; + } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/postprocess.cpp b/src/tests/functional/shared_test_classes/src/subgraph/postprocess.cpp new file mode 100644 index 00000000000000..072edac1071070 --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/subgraph/postprocess.cpp @@ -0,0 +1,49 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/subgraph/preprocess.hpp" + +#include "openvino/core/preprocess/pre_post_process.hpp" +#include "ov_models/preprocess/preprocess_builders.hpp" + +using namespace ov; +using namespace ov::preprocess; +using namespace ov::builder::preprocess; + +namespace ov { +namespace test { + +std::string PostProcessTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::string targetName; + postprocess_func func; + + std::tie(func, targetName) = obj.param; + std::ostringstream result; + result << "Func=" << func.m_name << "_"; + result << "Device=" << targetName << ""; + return result.str(); +} + +void PostProcessTest::SetUp() { + postprocess_func func; + std::tie(func, targetDevice) = GetParam(); + function = func.m_function(); + rel_threshold = func.m_accuracy; + functionRefs = function->clone(); + abs_threshold = func.m_accuracy; + if (func.m_shapes.empty()) { + for (const auto& input : function->inputs()) { + func.m_shapes.push_back(input.get_shape()); + } + } + init_input_shapes(ov::test::static_shapes_to_test_representation(func.m_shapes)); + func_name = func.m_name; +} + +TEST_P(PostProcessTest, CompareWithRefs) { + run(); +} + +} // namespace test +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/include/ov_models/preprocess/preprocess_builders.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/preprocess/preprocess_builders.hpp index 7367de93d4fad4..4dcb76c5c42589 100644 --- a/src/tests/ov_helpers/ov_models/include/ov_models/preprocess/preprocess_builders.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/preprocess/preprocess_builders.hpp @@ -33,6 +33,9 @@ struct preprocess_func { inline std::vector generic_preprocess_functions(); +using postprocess_func = preprocess_func; +inline std::vector generic_postprocess_functions(); + /// -------- Functions --------------- inline std::shared_ptr create_preprocess_1input(element::Type type, const PartialShape& shape) { @@ -41,6 +44,7 @@ inline std::shared_ptr create_preprocess_1input(element::Type type, const data1->output(0).get_tensor().set_names({"input1"}); std::shared_ptr res; auto op1 = std::make_shared(data1); + op1->set_friendly_name("abs1"); if (type == element::f32) { res = std::make_shared(op1); } else { @@ -476,6 +480,33 @@ inline std::vector generic_preprocess_functions() { }; } +inline std::shared_ptr cvt_color_rgb_to_bgr() { + using namespace ov::preprocess; + auto function = create_preprocess_1input(element::f32, PartialShape{1, 20, 30, 3}); + auto p = PrePostProcessor(function); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::RGB); + p.output().postprocess().convert_color(ColorFormat::BGR); + function = p.build(); + return function; +} + +inline std::shared_ptr cvt_color_bgr_to_rgb() { + using namespace ov::preprocess; + auto function = create_preprocess_1input(element::f32, PartialShape{1, 20, 30, 3}); + auto p = PrePostProcessor(function); + p.output().model().set_layout("NHWC").set_color_format(ColorFormat::BGR); + p.output().postprocess().convert_color(ColorFormat::RGB); + function = p.build(); + return function; +} + +inline std::vector generic_postprocess_functions() { + return std::vector{ + postprocess_func(cvt_color_rgb_to_bgr, "convert_color_rgb_to_bgr", 1e-5f), + postprocess_func(cvt_color_bgr_to_rgb, "convert_color_bgr_to_rgb", 1e-5f), + }; +} + } // namespace preprocess } // namespace builder } // namespace ov From 7bdd7fc5b6b8058ca36f29f53ac243d0ddad1196 Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Thu, 11 Jan 2024 15:34:27 +0000 Subject: [PATCH 03/43] [GHA] [DOCS] Docker images document (#22061) * slightly adapt wording in overview, start with runners doc * populate table; add section about runners choosing * wording * use runners * add doc about Docker images * use better formatting for pros and cons * fix typo --- docs/dev/ci/github_actions/docker_images.md | 73 +++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/docs/dev/ci/github_actions/docker_images.md b/docs/dev/ci/github_actions/docker_images.md index b362c93863655f..37ed7f1ac1e7f0 100644 --- a/docs/dev/ci/github_actions/docker_images.md +++ b/docs/dev/ci/github_actions/docker_images.md @@ -1,5 +1,78 @@ # Overview of the Docker Images used in the OpenVINO GitHub Actions CI +Most of the workflows in the OpenVINO GHA CI are using [self-hosted machines with dynamically spawned runners](./runners.md) to execute jobs. + +To avoid corruption of the runners and machines, the workflows utilize various Docker images that introduce a layer of protection for the self-hosted machines. + +The Docker images are specified for each job using the `container` key. See the [GHA documentation](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) for the syntax reference. + +An example `Build` job from the `linux.yml` workflow: +```yaml +Build: + ... + container: + image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + volumes: + - /mount:/mount + ... +``` + +The `openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04` Docker image is used for this job. + +Additionally, it is possible to make the caches available in the Docker containers using the `volumes` key. +Read more about the available caches and how to choose one [here](./caches.md). + ## Available Docker Images +The jobs in the workflows utilize appropriate Docker images based on a job's needs. + +As the self-hosted machines are hosted in [Microsoft Azure](https://azure.microsoft.com/en-us), +it is optimal to use the Docker images hosted in an instance of [Azure Container Registry (ACR)](https://azure.microsoft.com/en-us/products/container-registry). + +The ACR used for the OpenVINO GHA CI is `openvinogithubactions.azurecr.io`. + +Some pros and cons of having own container registry are: +* pros: + * No pull limits for the images + * There are [limits](https://docs.docker.com/docker-hub/download-rate-limit/) for the pulls from Docker Hub + * Fast pulling speeds +* cons: + * The registry should be populated with needed images before usage + * The registry does not mirror the images available on Docker Hub automatically + * The needed images should be added manually to the registry + +As the number of enabled workflows grew, so did the number of available Docker images. + +The available Docker images are using the following pattern for their names: `openvinogithubactions.azurecr.io/dockerhub/:`. + +Most of the images on the OpenVINO ACR are mirrors of the images with the same names on Docker Hub. + +The examples: +* `openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04` corresponds to `ubuntu:20.04` from Docker Hub +* `openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04` corresponds to `ubuntu:22.04` from Docker Hub +* `openvinogithubactions.azurecr.io/dockerhub/nvidia/cuda:11.8.0-runtime-ubuntu20.04` corresponds to `nvidia/cuda:11.8.0-runtime-ubuntu20.04` from Docker Hub + ## How to choose an Image + +The Docker image required for a job stems from the nature of the job and configuration that is being tested. + +An example `Build` job from the `linux.yml` workflow: +```yaml +Build: + ... + container: + image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + volumes: + - /mount:/mount + ... +``` + +The `openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04` Docker image is used for this job and **for other jobs in the workflow**. +Usually, if one Docker image is used for the building job, the other jobs would use the same image for testing. + +If the tests do not require any specific OS or distribution, it would be best to use the already available images: e.g., `openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04`. + +If the plan is to test some specific OS or distribution (e.g., `fedora`), +the Docker image for this flavour should be first uploaded to the OpenVINO ACR and only then used in a workflow. + +Contact someone from the CI team for assistance with the image uploading. From a8f7b82278a742a2b695daf4b19675ddadb06881 Mon Sep 17 00:00:00 2001 From: Edward Shogulin Date: Thu, 11 Jan 2024 15:54:54 +0000 Subject: [PATCH 04/43] [LPT] [GPU] MVN fp16 fix (#21946) * [LPT] MVN fp16 fix * tests * test for similar case: NormalizeL2 --- .../low_precision_transformations/src/mvn.cpp | 14 ++++++-------- .../tests/mvn_transformation.cpp | 15 +++++++++++++++ .../tests/normalize_l2_transformation.cpp | 13 +++++++++++++ 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/src/common/low_precision_transformations/src/mvn.cpp b/src/common/low_precision_transformations/src/mvn.cpp index a2f86519ec4208..770f9813022cd4 100644 --- a/src/common/low_precision_transformations/src/mvn.cpp +++ b/src/common/low_precision_transformations/src/mvn.cpp @@ -27,7 +27,7 @@ using namespace ov::pass::low_precision; namespace mvn { template -std::shared_ptr createNewScalesConst(const ov::op::v0::Constant& originalConst) { +std::shared_ptr createNewScalesConst(const ov::op::v0::Constant& originalConst, const ov::element::Type& precision) { std::vector source = originalConst.cast_vector(); std::vector newData(source.size()); @@ -35,8 +35,7 @@ std::shared_ptr createNewScalesConst(const ov::op::v0::Con newData[i] = source[i] < 0 ? T{-1} : T{1}; } - const ov::element::Type type = originalConst.get_output_element_type(0); - return ov::op::v0::Constant::create(type, originalConst.get_shape(), newData); + return ov::op::v0::Constant::create(precision, originalConst.get_shape(), newData); } } // namespace mvn @@ -138,21 +137,20 @@ bool MVNTransformation::transform(TransformationContext &context, ov::pass::patt FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(mvn, defaultPrecisions); const auto scalesConst = dequantization.multiplyConstant; - const auto type = scalesConst->get_element_type(); auto newScalesConst = scalesConst; if (normalizeVariance) { - switch (type) { + switch (deqPrecision) { case ov::element::Type_t::f16: { - newScalesConst = mvn::createNewScalesConst::value_type>(*scalesConst); + newScalesConst = mvn::createNewScalesConst::value_type>(*scalesConst, deqPrecision); break; } case ov::element::Type_t::f32: { - newScalesConst = mvn::createNewScalesConst::value_type>(*scalesConst); + newScalesConst = mvn::createNewScalesConst::value_type>(*scalesConst, deqPrecision); break; } default: { - THROW_TRANSFORMATION_EXCEPTION << "unexpected element type " << type; + THROW_TRANSFORMATION_EXCEPTION << "unexpected element type " << deqPrecision; } } } diff --git a/src/common/low_precision_transformations/tests/mvn_transformation.cpp b/src/common/low_precision_transformations/tests/mvn_transformation.cpp index 004324dd198ffb..0c840c645754e8 100644 --- a/src/common/low_precision_transformations/tests/mvn_transformation.cpp +++ b/src/common/low_precision_transformations/tests/mvn_transformation.cpp @@ -134,6 +134,21 @@ const std::vector inputShapes = { }; const std::vector testValues = { + { + {1, 2, 3}, + true, + LayerTransformation::createParamsU8I8().setSupportAsymmetricQuantization(false), + { + ov::element::f16, + {{ov::element::f16}, {}, {{0.45f}, ov::element::f16, {}, false, 1ul, ov::element::f16}} + }, + { + ov::element::f16, + { }, + ov::element::f32, + {{}, {}, {1.f}}, + } + }, { {1, 2, 3}, true, diff --git a/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp b/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp index a9aea8fbdad3ae..718cabc532a899 100644 --- a/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp +++ b/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp @@ -132,6 +132,19 @@ const std::vector shapes = { }; const std::vector normalizeL2TransformationTestValues = { + { + LayerTransformation::createParamsU8I8().setSupportAsymmetricQuantization(false), + { + ov::element::f16, + {{ov::element::f16}, {}, {{-12.3f}, ov::element::f16, {}, false, 1ul, ov::element::f16}} + }, + { + ov::element::f16, + { }, + ov::element::f32, + {{}, {}, {{-1.f}, ov::element::f16, {}, false, 1ul, ov::element::f16}}, + } + }, // U8 per tensor quantization { LayerTransformation::createParamsU8I8(), From 45fafab3b99b5a6f81d1fdab82ff4b1c7dce62e0 Mon Sep 17 00:00:00 2001 From: Sofya Balandina Date: Thu, 11 Jan 2024 17:25:55 +0000 Subject: [PATCH 05/43] [api conformance] Add tests for wrong rw properties (#21998) --- .../src/ov_plugin/properties.cpp | 12 +++ .../behavior/ov_plugin/properties_tests.hpp | 3 + .../behavior/ov_plugin/properties_tests.cpp | 77 +++++++++++++++++++ .../CPU/expected_failures_API.csv | 3 + 4 files changed, 95 insertions(+) diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp index 517f8363f0327f..1302cbcd11c020 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/ov_plugin/properties.cpp @@ -57,6 +57,18 @@ INSTANTIATE_TEST_SUITE_P(ov_plugin, OVCheckSetSupportedRWMetricsPropsTests, ::testing::ValuesIn(OVCheckSetSupportedRWMetricsPropsTests::getRWOptionalPropertiesValues())), OVCheckSetSupportedRWMetricsPropsTests::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(ov_plugin_mandatory, OVCheckSetIncorrectRWMetricsPropsTests, + ::testing::Combine( + ::testing::Values(targetDevice), + ::testing::ValuesIn(OVCheckSetIncorrectRWMetricsPropsTests::getWrongRWMandatoryPropertiesValues())), + OVCheckSetIncorrectRWMetricsPropsTests::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(ov_plugin, OVCheckSetIncorrectRWMetricsPropsTests, + ::testing::Combine( + ::testing::Values(targetDevice), + ::testing::ValuesIn(OVCheckSetIncorrectRWMetricsPropsTests::getWrongRWOptionalPropertiesValues())), + OVCheckSetIncorrectRWMetricsPropsTests::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(ov_plugin_mandatory, OVCheckChangePropComplieModleGetPropTests_DEVICE_ID, ::testing::Combine( ::testing::Values(targetDevice), diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp index cecd117f3529a7..253de74b5e46a0 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp @@ -72,12 +72,15 @@ class OVPropertiesTestsWithCompileModelProps : public testing::WithParamInterfac static std::vector configureProperties(std::vector props); static std::vector getRWMandatoryPropertiesValues(std::vector props = {}); + static std::vector getWrongRWMandatoryPropertiesValues(std::vector props = {}); static std::vector getRWOptionalPropertiesValues(std::vector props = {}); + static std::vector getWrongRWOptionalPropertiesValues(std::vector props = {}); static std::vector getModelDependcePropertiesValues(); }; using OVCheckSetSupportedRWMetricsPropsTests = OVPropertiesTestsWithCompileModelProps; +using OVCheckSetIncorrectRWMetricsPropsTests = OVPropertiesTestsWithCompileModelProps; using OVCheckGetSupportedROMetricsPropsTests = OVPropertiesTestsWithCompileModelProps; using OVCheckChangePropComplieModleGetPropTests_DEVICE_ID = OVPropertiesTestsWithCompileModelProps; using OVCheckChangePropComplieModleGetPropTests_InferencePrecision = OVPropertiesTestsWithCompileModelProps; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp index cf1566dc5e4809..56ffa5a1be4448 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp @@ -281,6 +281,32 @@ std::vector OVPropertiesTestsWithCompileModelProps::getRWMandatoryPr return res; } +std::vector OVPropertiesTestsWithCompileModelProps::getWrongRWMandatoryPropertiesValues(std::vector props) { + std::vector res; + + if (props.empty() || std::find(props.begin(), props.end(), ov::hint::performance_mode.name()) != props.end()) { + res.push_back({{ov::hint::performance_mode.name(), -1}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::hint::num_requests.name()) != props.end()) { + res.push_back({{ov::hint::num_requests.name(), -10}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::hint::execution_mode.name()) != props.end()) { + res.push_back({{ov::hint::execution_mode.name(), 5}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::enable_profiling.name()) != props.end()) { + res.push_back({{ov::enable_profiling.name(), -1}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::streams::num.name()) != props.end()) { + res.push_back({ov::streams::num(-10)}); + } + + return res; +} + std::vector OVPropertiesTestsWithCompileModelProps::getRWOptionalPropertiesValues(std::vector props) { std::vector res; @@ -323,6 +349,57 @@ std::vector OVPropertiesTestsWithCompileModelProps::getRWOptionalPro return res; } +std::vector OVPropertiesTestsWithCompileModelProps::getWrongRWOptionalPropertiesValues(std::vector props) { + std::vector res; + + if (props.empty() || std::find(props.begin(), props.end(), ov::inference_num_threads.name()) != props.end()) { + res.push_back({{ov::inference_num_threads.name(), -1}}); + res.push_back({{ov::compilation_num_threads.name(), -1}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::affinity.name()) != props.end()) { + res.push_back({{ov::affinity.name(), -5}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::hint::enable_hyper_threading.name()) != props.end()) { + res.push_back({{ov::hint::enable_hyper_threading.name(), -1}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::hint::enable_cpu_pinning.name()) != props.end()) { + res.push_back({{ov::hint::enable_cpu_pinning.name(), -1}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::hint::scheduling_core_type.name()) != props.end()) { + res.push_back({{ov::hint::scheduling_core_type.name(), -1}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::enable_mmap.name()) != props.end()) { + res.push_back({{ov::enable_mmap.name(), -10}}); + } + + if (props.empty() || std::find(props.begin(), props.end(), ov::log::level.name()) != props.end()) { + res.push_back({{ov::log::level.name(), -3}}); + } + + return res; +} + +TEST_P(OVCheckSetIncorrectRWMetricsPropsTests, ChangeIncorrectProperties) { + std::vector supported_properties; + OV_ASSERT_NO_THROW(supported_properties = core->get_property(target_device, ov::supported_properties)); + for (const std::pair& property_item : properties) { + auto supported = util::contains(supported_properties, property_item.first); + ASSERT_TRUE(supported) << "property is not supported: " << property_item.first; + + EXPECT_THROW(core->set_property(target_device, {property_item}), ov::Exception); + + ov::Any default_property; + OV_ASSERT_NO_THROW(default_property = core->get_property(target_device, property_item.first)); + ASSERT_FALSE(default_property.empty()); + core->compile_model(model, target_device, compileModelProperties); + } +} + TEST_P(OVCheckSetSupportedRWMetricsPropsTests, ChangeCorrectProperties) { std::vectorsupported_properties; OV_ASSERT_NO_THROW(supported_properties = core->get_property(target_device, ov::supported_properties)); diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv index db8f560869e79c..d45bbc6e8afce5 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv @@ -1657,6 +1657,9 @@ ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boo ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_HETERO.CPU,0 ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_BATCH.CPU,0 ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_AUTO.CPU,0 +ov_plugin/OVCheckSetIncorrectRWMetricsPropsTests.ChangeIncorrectProperties/target_device=CPU_properties={ENABLE_MMAP:-10},1.0 +ov_plugin/OVCheckSetIncorrectRWMetricsPropsTests.ChangeIncorrectProperties/target_device=CPU_properties={LOG_LEVEL:-3},1.0 +ov_plugin/OVCheckSetIncorrectRWMetricsPropsTests.ChangeIncorrectProperties/target_device=CPU_properties={COMPILATION_NUM_THREADS:-1},1.0 ov_infer_request_2/OVInferRequestDynamicTests.InferUpperBoundNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 ov_infer_request_2/OVInferRequestDynamicTests.InferUpperBoundNetworkAfterIOTensorsReshaping/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 ov_infer_request_2/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorUpper/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 From 951e58ee927e7bb1909bcc5ff0d9a4007146bed5 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Thu, 11 Jan 2024 19:44:38 +0100 Subject: [PATCH 06/43] Remove ngraph shape utils (#22059) --- src/core/include/ngraph/ngraph.hpp | 1 - src/core/include/ngraph/shape_util.hpp | 101 ------------------------- src/core/src/shape_util.cpp | 71 +---------------- src/core/tests/partial_shape.cpp | 47 ------------ 4 files changed, 1 insertion(+), 219 deletions(-) delete mode 100644 src/core/include/ngraph/shape_util.hpp diff --git a/src/core/include/ngraph/ngraph.hpp b/src/core/include/ngraph/ngraph.hpp index a916781e28ec9d..ad8c918d5229ee 100644 --- a/src/core/include/ngraph/ngraph.hpp +++ b/src/core/include/ngraph/ngraph.hpp @@ -62,7 +62,6 @@ #include "ngraph/partial_shape.hpp" #include "ngraph/rt_info.hpp" #include "ngraph/shape.hpp" -#include "ngraph/shape_util.hpp" #include "ngraph/specialize_function.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/validation_util.hpp" diff --git a/src/core/include/ngraph/shape_util.hpp b/src/core/include/ngraph/shape_util.hpp deleted file mode 100644 index e0b3dd6b66fcb8..00000000000000 --- a/src/core/include/ngraph/shape_util.hpp +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/partial_shape.hpp" - -namespace ngraph { -template -NGRAPH_API_DEPRECATED AXIS_VALUES project(const AXIS_VALUES& axis_values, const AxisSet& axes) { - AXIS_VALUES result; - - for (size_t i = 0; i < axis_values.size(); i++) { - if (axes.find(i) != axes.end()) { - result.push_back(axis_values[i]); - } - } - - return result; -} - -template <> -NGRAPH_API_DEPRECATED NGRAPH_API PartialShape project(const PartialShape& shape, const AxisSet& axes); - -// Removes some values from a vector of axis values -template -NGRAPH_API_DEPRECATED AXIS_VALUES reduce(const AXIS_VALUES& axis_values, const AxisSet& deleted_axes, bool keep_dims) { - AXIS_VALUES result; - - for (size_t i = 0; i < axis_values.size(); i++) { - if (deleted_axes.find(i) == deleted_axes.end()) { - result.push_back(axis_values[i]); - } else { - if (keep_dims) - result.push_back(1); - } - } - - return result; -} - -template <> -NGRAPH_API_DEPRECATED NGRAPH_API PartialShape reduce(const PartialShape& shape, - const AxisSet& deleted_axes, - bool keep_dims); - -// TODO: check validity, i.e. that the new axis indices are all less than -// axis_values.size()+num_new_axes. -// Add new values at particular axis positions -template -NGRAPH_API_DEPRECATED AXIS_VALUES inject_pairs(const AXIS_VALUES& axis_values, - std::vector> new_axis_pos_value_pairs) { - AXIS_VALUES result; - - size_t original_pos = 0; - - for (size_t result_pos = 0; result_pos < axis_values.size() + new_axis_pos_value_pairs.size(); result_pos++) { - // Would be nice to use std::find_if here but would rather not #include in - // this header - auto search_it = new_axis_pos_value_pairs.begin(); - - while (search_it != new_axis_pos_value_pairs.end()) { - if (search_it->first == result_pos) { - break; - } - ++search_it; - } - - if (search_it == new_axis_pos_value_pairs.end()) { - result.push_back(axis_values[original_pos++]); - } else { - result.push_back(search_it->second); - } - } - - return result; -} - -template <> -NGRAPH_API_DEPRECATED NGRAPH_API PartialShape -inject_pairs(const PartialShape& shape, std::vector> new_axis_pos_value_pairs); - -// Add a new value at a particular axis position -template -NGRAPH_API_DEPRECATED AXIS_VALUES inject(const AXIS_VALUES& axis_values, size_t new_axis_pos, AXIS_VALUE new_axis_val) { - return inject_pairs( - axis_values, - std::vector>{std::pair(new_axis_pos, new_axis_val)}); -} -} // namespace ngraph diff --git a/src/core/src/shape_util.cpp b/src/core/src/shape_util.cpp index d84d8153a92059..ae89c4fe106cb5 100644 --- a/src/core/src/shape_util.cpp +++ b/src/core/src/shape_util.cpp @@ -2,82 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/shape_util.hpp" +#include "openvino/core/shape_util.hpp" #include #include "openvino/core/partial_shape.hpp" -#include "openvino/core/shape_util.hpp" #include "validation_util.hpp" -namespace ngraph { -template <> -PartialShape project(const PartialShape& shape, const AxisSet& axes) { - if (shape.rank().is_dynamic()) { - return shape; - } else { - std::vector result_dims; - - for (int64_t i = 0; i < shape.rank().get_length(); i++) { - if (axes.find(i) != axes.end()) { - result_dims.push_back(shape[i]); - } - } - - return PartialShape(result_dims); - } -} - -template <> -PartialShape reduce(const PartialShape& shape, const AxisSet& deleted_axes, bool keep_dims) { - if (shape.rank().is_dynamic()) { - return shape; - } else { - std::vector result_dims; - - for (int64_t i = 0; i < shape.rank().get_length(); i++) { - if (deleted_axes.find(i) == deleted_axes.end()) { - result_dims.push_back(shape[i]); - } else { - if (keep_dims) - result_dims.emplace_back(1); - } - } - - return result_dims; - } -} - -template <> -PartialShape inject_pairs(const PartialShape& shape, - std::vector> new_axis_pos_value_pairs) { - if (shape.rank().is_dynamic()) { - return shape; - } else { - std::vector result_dims; - - size_t original_pos = 0; - - for (size_t result_pos = 0; result_pos < shape.rank().get_length() + new_axis_pos_value_pairs.size(); - result_pos++) { - auto search_it = std::find_if(new_axis_pos_value_pairs.begin(), - new_axis_pos_value_pairs.end(), - [result_pos](std::pair p) { - return p.first == result_pos; - }); - - if (search_it == new_axis_pos_value_pairs.end()) { - result_dims.push_back(shape[original_pos++]); - } else { - result_dims.push_back(search_it->second); - } - } - - return PartialShape{result_dims}; - } -} -} // namespace ngraph - namespace ov { template TContainer reduce_container(const TContainer& input, const TAxes& axes) { diff --git a/src/core/tests/partial_shape.cpp b/src/core/tests/partial_shape.cpp index d3817a7d5c935d..0bd1fad1342bb1 100644 --- a/src/core/tests/partial_shape.cpp +++ b/src/core/tests/partial_shape.cpp @@ -7,7 +7,6 @@ #include #include "common_test_utils/test_tools.hpp" -#include "ngraph/shape_util.hpp" #include "ngraph/validation_util.hpp" #include "openvino/core/coordinate_diff.hpp" #include "openvino/core/descriptor/tensor.hpp" @@ -691,52 +690,6 @@ TEST(partial_shape, partial_shape_relaxes_refines_static_static_not_eq) { ASSERT_FALSE(s2.relaxes(s1)); } -OPENVINO_SUPPRESS_DEPRECATED_START -TEST(partial_shape, partial_shape_project_rank_dynamic) { - PartialShape s1{PartialShape::dynamic()}; - PartialShape s2 = ngraph::project(s1, AxisSet{284, 0, 103}); - - ASSERT_TRUE(s2.rank().is_dynamic()); -} - -TEST(partial_shape, partial_shape_project_rank_static_dynamic) { - PartialShape s1{Dimension::dynamic(), 2, Dimension::dynamic(), 3}; - PartialShape s2 = ngraph::project(s1, AxisSet{0, 3}); - - ASSERT_TRUE(s2.same_scheme(PartialShape{Dimension::dynamic(), 3})); -} - -TEST(partial_shape, partial_shape_reduce_rank_dynamic) { - PartialShape s1{PartialShape::dynamic()}; - PartialShape s2 = ngraph::reduce(s1, AxisSet{284, 0, 103}, false); - - ASSERT_TRUE(s2.rank().is_dynamic()); -} - -TEST(partial_shape, partial_shape_reduce_rank_static_dynamic) { - PartialShape s1{Dimension::dynamic(), 2, Dimension::dynamic(), 3}; - PartialShape s2 = ngraph::reduce(s1, AxisSet{0, 3}, false); - - ASSERT_TRUE(s2.same_scheme(PartialShape{2, Dimension::dynamic()})); -} - -TEST(partial_shape, partial_shape_inject_pairs_rank_dynamic) { - PartialShape s1{PartialShape::dynamic()}; - PartialShape s2 = - ngraph::inject_pairs(s1, std::vector>{{0, Dimension::dynamic()}, {207, 909}}); - - ASSERT_TRUE(s2.rank().is_dynamic()); -} - -TEST(partial_shape, partial_shape_inject_pairs_rank_static) { - PartialShape s1{1, Dimension::dynamic()}; - PartialShape s2 = ngraph::inject_pairs( - s1, - std::vector>{{0, Dimension::dynamic()}, {2, 909}, {4, Dimension::dynamic()}}); - - ASSERT_TRUE(s2.same_scheme(PartialShape{Dimension::dynamic(), 1, 909, Dimension::dynamic(), Dimension::dynamic()})); -} - TEST(partial_shape, merge_rank_dyn_dyn) { PartialShape s{PartialShape::dynamic()}; From d843a3355158f87407543f273b9d5b046bd1a347 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Thu, 11 Jan 2024 20:14:04 +0100 Subject: [PATCH 07/43] [PyOV] clean up after legacy python api removal (#22101) --- .github/workflows/job_python_unit_tests.yml | 2 +- .github/workflows/py_checks.yml | 12 ++++++------ .github/workflows/windows.yml | 2 +- src/bindings/python/CMakeLists.txt | 4 +--- src/bindings/python/docs/test_examples.md | 17 +++-------------- 5 files changed, 12 insertions(+), 25 deletions(-) diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index e740525a7f2542..274aaaacea3223 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -121,7 +121,7 @@ jobs: # Tests # - - name: Python API 2.0 Tests + - name: Python API Tests # if: ${{ fromJSON(inputs.affected-components).Python_API.test && runner.arch != 'ARM64' }} # Ticket: 126380, 127101 run: | # for 'template' extension diff --git a/.github/workflows/py_checks.yml b/.github/workflows/py_checks.yml index 56b58faacf0c16..7dbe07752766db 100644 --- a/.github/workflows/py_checks.yml +++ b/.github/workflows/py_checks.yml @@ -53,12 +53,12 @@ jobs: name: samples_diff path: samples_diff.diff - # Python API 2.0 Flake code-style - - name: Run flake8 on Python API 2.0 + # Python API Flake code-style + - name: Run flake8 on Python API run: python -m flake8 ./src/openvino --config=setup.cfg working-directory: src/bindings/python - - name: Create code style diff for Python API 2.0 + - name: Create code style diff for Python API if: failure() run: | python -m black -l 160 -S ./ @@ -89,14 +89,14 @@ jobs: name: wheel_diff path: wheel_diff.diff - # Python API 2.0 tests Flake code-style + # Python API tests Flake code-style - name: Run flake8 on python tests # ignore lack of docs in tests run: python -m flake8 tests/ --config=setup.cfg working-directory: src/bindings/python - # Python API 2.0 mypy check - - name: Run mypy on Python API 2.0 + # Python API mypy check + - name: Run mypy on Python API run: python -m mypy ./src/openvino --config-file ./setup.cfg working-directory: src/bindings/python diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 06b36f7725d1ca..e25ff48ca31128 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -428,7 +428,7 @@ jobs: # TODO: replace with Python API tests requirements python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt - - name: Python API 2.0 Tests + - name: Python API Tests #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 shell: cmd run: | diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index 2e2d6786bfa476..efd9c2ea664502 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -267,9 +267,7 @@ endmacro() macro(ov_define_setup_py_dependencies) foreach(_target # Python API dependencies - _pyngraph pyopenvino py_ov_frontends - # legacy Python API 1.0 dependencies (remove before 2024.0 release) - ie_api constants + _pyopenvino py_ov_frontends # plugins ov_plugins # frontends diff --git a/src/bindings/python/docs/test_examples.md b/src/bindings/python/docs/test_examples.md index c7078c08cc8bd1..661db4c738a89f 100644 --- a/src/bindings/python/docs/test_examples.md +++ b/src/bindings/python/docs/test_examples.md @@ -18,7 +18,7 @@ export PYTHONPATH=PYTHONPATH:/bin/intel64/Release/python cd .../openvino/src/bindings/python/ ``` -To run OpenVINO Python API 2.0 tests: +To run OpenVINO Python API tests: ```shell pytest tests/ ``` @@ -54,22 +54,12 @@ tox There are two packages used in the project to check the codestyle of python code: *mypy* and *flake8*. Besides, OpenVINO™ uses a [custom configuration file](./../setup.cfg) to exclude some strict rules. -To check the codestyle of the Python API 2.0, run the following commands: +To check the codestyle of the Python API, run the following commands: ``` python -m flake8 ./src/openvino/ --config=setup.cfg python -m mypy ./src/openvino --config-file ./setup.cfg ``` -To check the codestyle of the nGraph Python API, run the following commands: -``` -python -m flake8 ./src/compatibility/ngraph/ --config=setup.cfg -python -m mypy ./src/compatibility/ngraph --config-file ./setup.cfg -``` -To check the codestyle of the InferenceEngine Python API, run the following commands: -``` -cd src/compatibility/openvino -python -m flake8 ./ --config=setup.cfg -python -m mypy ./ --config-file ./setup.cfg -``` + It's recommended to run the mentioned codestyle check whenever new tests are added. This check should be executed from the main Python API folder: ``` @@ -94,7 +84,6 @@ Note that name of the file is connected to the class/module to be tested. This i tests/test_frontend <-- frontend manager and extensions tests/test_runtime <-- runtime classes such as Core and Tensor tests/test_graph <-- operators and their implementation - tests/test_onnx <-- ONNX Frontend tests and validation tests/test_transformations <-- optimization passes for OV Models ### Writing of the test itself From 11bce96c61aba516fb1f3f1828aca3d7ab115d37 Mon Sep 17 00:00:00 2001 From: Andrew Kwangwoong Park Date: Fri, 12 Jan 2024 05:17:44 +0900 Subject: [PATCH 08/43] [GPU] Skip redundant permute in stateful model (#21978) * Skip redundant permute in stateful model Signed-off-by: Andrew Park * clean up code * Fix failed TCs for ov_gpu_unit_tests * minor update for realloc mem Signed-off-by: Andrew Park * Apply comment and fixed failed TCs Signed-off-by: Andrew Park * add exception case for build time optimization * Apply comments Signed-off-by: Andrew Park --------- Signed-off-by: Andrew Park --- .../dynamic_shape_gather_opts.cpp | 40 ------------ .../mark_runtime_skippable_nodes.cpp | 61 +++++++++++++++++++ .../src/graph/impls/ocl/primitive_base.hpp | 4 +- .../src/graph/include/pass_manager.h | 4 +- .../src/graph/include/primitive_inst.h | 1 + src/plugins/intel_gpu/src/graph/permute.cpp | 4 +- .../intel_gpu/src/graph/primitive_inst.cpp | 60 ++++++++++++++++-- src/plugins/intel_gpu/src/graph/program.cpp | 4 +- 8 files changed, 126 insertions(+), 52 deletions(-) delete mode 100644 src/plugins/intel_gpu/src/graph/graph_optimizer/dynamic_shape_gather_opts.cpp create mode 100644 src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/dynamic_shape_gather_opts.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/dynamic_shape_gather_opts.cpp deleted file mode 100644 index 8d340fc575e407..00000000000000 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/dynamic_shape_gather_opts.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pass_manager.h" -#include "gather_inst.h" -#include "program_helpers.h" - -using namespace cldnn; - -void dynamic_shape_gather_opts::run(program& p) { - auto itr = p.get_processing_order().begin(); - // Set gathers that might be skipped at runtime as can_be_optimized. - // If not set, memory dependency will not work for the nodes that are skipped at runtime - while (itr != p.get_processing_order().end()) { - auto& node = *itr++; - if (!node->is_type()) - continue; - auto& gather_node = node->as(); - // Check pattern - auto impl_params = gather_node.get_kernel_impl_params(); - if (gather_node.has_fused_primitives() || - (impl_params->get_input_layout(0).data_type != impl_params->get_output_layout().data_type) || - gather_node.get_dependency(1).is_constant() || gather_node.get_dependency(1).is_type()) - continue; - auto idx_rank = impl_params->get_input_layout(1).get_partial_shape().size(); - - if (idx_rank != 1) { - continue; - } - auto axis = impl_params->typed_desc()->axis; - if (impl_params->get_input_layout(0).get_partial_shape()[axis] == -1 - || impl_params->get_input_layout(1).get_partial_shape()[0] == -1 - || impl_params->get_input_layout(0).get_partial_shape()[axis] == impl_params->get_input_layout(1).get_partial_shape()[0]) { - // May be skipepd - gather_node.can_be_optimized(true); - GPU_DEBUG_TRACE_DETAIL << "[dynamic_shape_gather_opts] : " << gather_node.id() << "can_be_optimized" << std::endl; - } - } -} diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp new file mode 100644 index 00000000000000..81d7ee3a39bc21 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp @@ -0,0 +1,61 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pass_manager.h" +#include "gather_inst.h" +#include "permute_inst.h" +#include "kv_cache_inst.h" +#include "gemm_inst.h" +#include "program_helpers.h" + +using namespace cldnn; + +void mark_runtime_skippable_nodes::run(program& p) { + auto itr = p.get_processing_order().begin(); + while (itr != p.get_processing_order().end()) { + auto& node = *itr++; + // Set gathers that might be skipped at runtime as can_be_optimized. + // If not set, memory dependency will not work for the nodes that are skipped at runtime + program_helpers::do_for_types(*node, [](gather_node& node){ + // Check pattern + auto impl_params = node.get_kernel_impl_params(); + if (node.has_fused_primitives() || + (impl_params->get_input_layout(0).data_type != impl_params->get_output_layout().data_type) || + node.get_dependency(1).is_constant() || node.get_dependency(1).is_type()) + return; + auto idx_rank = impl_params->get_input_layout(1).get_partial_shape().size(); + + if (idx_rank != 1) { + return; + } + auto axis = impl_params->typed_desc()->axis; + if (impl_params->get_input_layout(0).get_partial_shape()[axis] == -1 + || impl_params->get_input_layout(1).get_partial_shape()[0] == -1 + || impl_params->get_input_layout(0).get_partial_shape()[axis] == impl_params->get_input_layout(1).get_partial_shape()[0]) { + // May be skipepd + node.can_be_optimized(true); + GPU_DEBUG_TRACE_DETAIL << "[mark_runtime_skippable_nodes] : " << node.id() << "can_be_optimized" << std::endl; + } + }); + program_helpers::do_for_types(*node, [](permute_node& node){ + auto impl_params = node.get_kernel_impl_params(); + if (node.is_output() || + node.has_fused_primitives() || + (impl_params->get_input_layout(0).data_type != impl_params->get_output_layout().data_type)) + return; + + // TODO: For now, all permutes with dynamic shape are applied. + // A more detailed pattern will need to be applied later + if (node.is_dynamic()) { + if (node.get_dependency(0).is_type()) + return; + // If the user is concatenation, priority should be given to in place concat optimization at runtime + if (node.have_user_with_type() && node.get_users().size() == 1) + return; + node.can_be_optimized(true); + GPU_DEBUG_TRACE_DETAIL << "[mark_runtime_skippable_nodes] : " << node.id() << "can_be_optimized" << std::endl; + } + }); + } +} diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/primitive_base.hpp b/src/plugins/intel_gpu/src/graph/impls/ocl/primitive_base.hpp index 31c289e8940027..6909681c99aaa2 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/primitive_base.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/primitive_base.hpp @@ -19,6 +19,7 @@ #include "implementation_map.hpp" #include "concatenation_inst.h" #include "gather_inst.h" +#include "permute_inst.h" #include #include @@ -82,7 +83,8 @@ struct typed_primitive_impl_ocl : public typed_primitive_impl { template static std::unique_ptr create(const typed_program_node& arg, const kernel_impl_params& impl_param) { // concat buffer fusing for dynamic shape is adaptively applied at runtime. So we need to build dynamic impl at build time. - if (impl_param.can_be_optimized() && !((impl_param.is_type() || impl_param.is_type()) && impl_param.is_dynamic())) { + if (impl_param.can_be_optimized() && + !((impl_param.is_type() || impl_param.is_type() || impl_param.is_type()) && impl_param.is_dynamic())) { return make_unique(kernel_selector::kernel_data{}); } auto kernel_params = ImplType::get_kernel_params(ImplType::static_canonicalize_shapes(impl_param)); diff --git a/src/plugins/intel_gpu/src/graph/include/pass_manager.h b/src/plugins/intel_gpu/src/graph/include/pass_manager.h index b38176fc61ac5f..0020eee07c6233 100644 --- a/src/plugins/intel_gpu/src/graph/include/pass_manager.h +++ b/src/plugins/intel_gpu/src/graph/include/pass_manager.h @@ -414,9 +414,9 @@ class reorder_transfer : public base_pass { void run(program& p) override; }; -class dynamic_shape_gather_opts : public base_pass { +class mark_runtime_skippable_nodes : public base_pass { public: - dynamic_shape_gather_opts() : base_pass("dynamic_shape_gather_opts") {} + mark_runtime_skippable_nodes() : base_pass("mark_runtime_skippable_nodes") {} private: void run(program& p) override; diff --git a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h index 156b063409fc1f..68ec642008c7a7 100644 --- a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h @@ -231,6 +231,7 @@ class primitive_inst { void build_deps(); void do_runtime_skip_reorder(); void do_runtime_skip_gather(); + void do_runtime_skip_permute(); void do_runtime_in_place_concat(); void do_runtime_in_place_kv_cache(); void configure_shape_of_dependencies(); diff --git a/src/plugins/intel_gpu/src/graph/permute.cpp b/src/plugins/intel_gpu/src/graph/permute.cpp index 55469130573e31..a2b8ac53591773 100644 --- a/src/plugins/intel_gpu/src/graph/permute.cpp +++ b/src/plugins/intel_gpu/src/graph/permute.cpp @@ -127,7 +127,7 @@ void permute_inst::on_execute() { void permute_inst::update_output_memory() { - if (!can_be_optimized()) + if (!can_be_optimized() || _impl_params->is_dynamic()) return; if (_outputs.size() > 0 && static_cast(_outputs[0]) @@ -137,6 +137,8 @@ void permute_inst::update_output_memory() { if (_node != nullptr) build_deps(); + GPU_DEBUG_TRACE_DETAIL << id() << " : update_output_memory with mem of input " << get_node().get_dependency(0).id() + << " : " << input_memory_ptr()->buffer_ptr() << std::endl; _outputs = {_network.get_engine().reinterpret_buffer(input_memory(), _impl_params->get_output_layout())}; _mem_allocated = false; } diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 4bf1383ec64f26..6f0e94b197b13b 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -507,11 +507,17 @@ event::ptr primitive_inst::realloc_if_needed() { } } - // Clear out memory if if was previously reused, but now primitive can't be optimized - if (_node->is_type() && !can_be_optimized() && _outputs[0] - && dep_memory_ptr(0) && _network.get_engine().is_the_same_buffer(dep_memory(0), output_memory(0))) { - _outputs[0] = nullptr; - max_output_layout_size = 0; + if (_node->is_type() || _node->is_type()) { + // For the nodes which can be optimized at runtime, input memory is used as output memory + // So there is no need to reallocate output memory + if (can_be_optimized()) + return ev; + // Clear out memory if if was previously reused, but now primitive can't be optimized + if (!can_be_optimized() && _outputs[0] && dep_memory_ptr(0) + && _network.get_engine().is_the_same_buffer(dep_memory(0), output_memory(0))) { + _outputs[0] = nullptr; + max_output_layout_size = 0; + } } // update layout to ensure that it repsects paddings for correct allocation size @@ -967,7 +973,7 @@ void primitive_inst::do_runtime_skip_gather() { mem_lock idx_data(dep_memory_ptr(1), _network.get_stream()); for (int64_t i = 0; i < static_cast(idx_shape[0]); ++i) { if (idx_data[i] != i) { - GPU_DEBUG_TRACE_DETAIL << "--- Cannot optimize becuase idx_data [" << i << "] (" << idx_data[i] << ") != " << i << std::endl; + GPU_DEBUG_TRACE_DETAIL << "--- Cannot optimize because idx_data [" << i << "] (" << idx_data[i] << ") != " << i << std::endl; set_can_be_optimized(false); return; } @@ -981,6 +987,47 @@ void primitive_inst::do_runtime_skip_gather() { set_can_be_optimized(true); } +void primitive_inst::do_runtime_skip_permute() { + OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("do_runtime_skip_permute: " + id())); + // Check pattern + if (!get_node().is_type() + || is_output() + || !get_node().can_be_optimized() + || _impl_params->has_fused_primitives() + || _impl_params->get_input_layout(0).data_type != _impl_params->get_output_layout().data_type) + return; + + GPU_DEBUG_TRACE_DETAIL << "[do_runtime_skip_permute] " << id() << " : check optimizability" << std::endl; + auto desc = _node->as().get_primitive(); + auto input_shape = _impl_params->get_input_layout(0).get_shape(); + const auto& permute_order = desc->permute_order; + + // Check runtime shape + // Optimize when the largest value among the acutal dim values in case where the permute order + // is different from the shape index is equal to the multiplied value + int32_t size = 1; + int32_t max_value = 0; + for (int32_t i = 0; i < static_cast(permute_order.size()); ++i) { + int32_t order = static_cast(permute_order[i]); + int32_t dim = static_cast(input_shape[order]); + if (i != order) { + if (dim > max_value) + max_value = dim; + size *= dim; + } + } + // If the largest value and total size are different, can_be_optimized needs to be reset + if (size != max_value) { + GPU_DEBUG_TRACE_DETAIL << "--- Cannot optimize because size(" << size << ") and max_value(" << max_value << ") are different" << std::endl; + set_can_be_optimized(false); + return; + } + GPU_DEBUG_TRACE_DETAIL << "[do_runtime_skip_permute] " << id() << " : can_be_optimized" << std::endl; + GPU_DEBUG_TRACE_DETAIL << " - Input layout : " << _impl_params->get_input_layout(0).to_short_string() << std::endl; + GPU_DEBUG_TRACE_DETAIL << " - Output layout : " << _impl_params->get_output_layout().to_short_string() << std::endl; + set_can_be_optimized(true); +} + void primitive_inst::do_runtime_in_place_concat() { OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("do_runtime_in_place_concat: " + id())); GPU_DEBUG_GET_INSTANCE(debug_config); @@ -1094,6 +1141,7 @@ event::ptr primitive_inst::execute(const std::vector& events) { // if the user is can_be_optimized and output node then current nodes' output should be allocated to host. do_runtime_skip_reorder(); do_runtime_skip_gather(); + do_runtime_skip_permute(); if (!is_valid_fusion()) { OV_ITT_SCOPED_TASK(ov::intel_gpu::itt::domains::intel_gpu_plugin, openvino::itt::handle("unfused_subgraph_exec: " + id())); diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index a191c5c0504deb..fcf8982f45b20c 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -602,8 +602,8 @@ void program::pre_optimize_graph(bool is_internal) { // optimization passes apply_opt_pass(true); - // Set gathers that might be skipped at runtime as can_be_optimized. - apply_opt_pass(); + // Mark operations that might be skipped at runtime as can_be_optimized. + apply_opt_pass(); } void program::post_optimize_graph(bool is_internal) { From 0b39f586b3a3698f1292fd236539156786723319 Mon Sep 17 00:00:00 2001 From: Andrei Gorbachev Date: Fri, 12 Jan 2024 03:52:39 +0000 Subject: [PATCH 09/43] [GPU] Refactor (#22077) * strided_slice * add strided_slice * Update strided_slice.cpp --------- Co-authored-by: Pavel Durandin --- .../single_layer_tests/strided_slice.cpp | 202 +++++++++++------- 1 file changed, 125 insertions(+), 77 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp index b9cc742705184c..93ea0f2448d57b 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp @@ -2,103 +2,151 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - -#include "single_layer_tests/strided_slice.hpp" +#include "single_op_tests/strided_slice.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::StridedSliceLayerTest; +using ov::test::StridedSliceSpecificParams; +using ov::test::StridedSliceParams; + std::vector ss_only_test_cases_fp32 = { - StridedSliceSpecificParams{ { 128, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1 }, - { 0, 1, 1 }, { 0, 1, 1 }, { 1, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 128, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1}, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 1, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, -1, 0 }, { 0, 0, 0 }, { 1, 1, 1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 1, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 9, 0 }, { 0, 11, 0 }, { 1, 1, 1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 1, 0 }, { 0, -1, 0 }, { 1, 1, 1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 9, 0 }, { 0, 7, 0 }, { -1, -1, -1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 7, 0 }, { 0, 9, 0 }, { -1, 1, -1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 4, 0 }, { 0, 9, 0 }, { -1, 2, -1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 4, 0 }, { 0, 10, 0 }, { -1, 2, -1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 9, 0 }, { 0, 4, 0 }, { -1, -2, -1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 10, 0 }, { 0, 4, 0 }, { -1, -2, -1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, 11, 0 }, { 0, 0, 0 }, { -1, -2, -1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100 }, { 0, -6, 0 }, { 0, -8, 0 }, { -1, -2, -1 }, - { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, - StridedSliceSpecificParams{ { 1, 12, 100, 1, 1 }, { 0, -1, 0, 0 }, { 0, 0, 0, 0 }, { 1, 1, 1, 1 }, - { 1, 0, 1, 0 }, { 1, 0, 1, 0 }, { }, { 0, 1, 0, 1 }, {} }, - StridedSliceSpecificParams{ { 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, - {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, - StridedSliceSpecificParams{ { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, - {0, 0, 0, 0}, {1, 1, 1, 1}, {}, {}, {} }, - StridedSliceSpecificParams{ { 1, 2, 128, 2 }, { 0, 0, 0, 1 }, { 0, 1, 0, 2 }, { 1, 1, 1, 1 }, - {1, 0, 1, 0}, {1, 0, 1, 0}, {0, 0, 0, 0}, {0, 1, 0, 1}, {0, 0, 0, 0} }, - StridedSliceSpecificParams{ { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, - {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {} }, - StridedSliceSpecificParams{ { 2, 2, 4, 3 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 }, - {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, - StridedSliceSpecificParams{ { 2, 2, 4, 2 }, { 1, 0, 0, 1 }, { 2, 2, 4, 2 }, { 1, 1, 2, 1 }, - {0, 1, 1, 0}, {1, 1, 0, 0}, {}, {}, {} }, - StridedSliceSpecificParams{ { 1, 2, 4, 2 }, { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 }, - {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, - StridedSliceSpecificParams{ { 2, 2, 4, 2 }, { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 }, - {0, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, - StridedSliceSpecificParams{ { 2, 3, 4, 5, 6 }, { 0, 1, 0, 0, 0 }, { 2, 3, 4, 5, 6 }, { 1, 1, 1, 1, 1 }, - {1, 0, 1, 1, 1}, {1, 0, 1, 1, 1}, {}, {0, 1, 0, 0, 0}, {} }, - StridedSliceSpecificParams{ { 1, 5, 30, 30, 30 }, { 0, 0, 0, 0, 0 }, { 0, 0, 29, 29, 29 }, { 1, 1, 1, 1, 1 }, - {1, 1, 1, 1, 1}, {1, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0} }, - StridedSliceSpecificParams{ { 10, 12 }, { -1, 1 }, { -9999, 0 }, { -1, 1 }, - { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, - StridedSliceSpecificParams{ { 5, 5, 5, 5 }, { -1, 0, -1, 0 }, { -50, 0, -60, 0 }, { -1, 1, -1, 1 }, - { 0, 0, 0, 0 }, { 0, 1, 0, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 } }, - StridedSliceSpecificParams{ { 128, 1, 1024 }, { -1, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1 }, - { 0, 1, 1 }, { 0, 1, 1 }, { 0, 0, 0 }, { 1, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 128, 1 }})), + { 0, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1 }, { 0, 1, 1 }, + { 0, 1, 1 }, { 1, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 128, 1 }})), + { 0, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1}, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 1, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, -1, 0 }, { 0, 0, 0 }, { 1, 1, 1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 1, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 9, 0 }, { 0, 11, 0 }, { 1, 1, 1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 1, 0 }, { 0, -1, 0 }, { 1, 1, 1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 9, 0 }, { 0, 7, 0 }, { -1, -1, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 7, 0 }, { 0, 9, 0 }, { -1, 1, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 4, 0 }, { 0, 9, 0 }, { -1, 2, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 4, 0 }, { 0, 10, 0 }, { -1, 2, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 9, 0 }, { 0, 4, 0 }, { -1, -2, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 10, 0 }, { 0, 4, 0 }, { -1, -2, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 11, 0 }, { 0, 0, 0 }, { -1, -2, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, -6, 0 }, { 0, -8, 0 }, { -1, -2, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100, 1, 1 }})), + { 0, -1, 0, 0 }, { 0, 0, 0, 0 }, { 1, 1, 1, 1 }, + { 1, 0, 1, 0 }, { 1, 0, 1, 0 }, { }, { 0, 1, 0, 1 }, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 2, 2 }})), + { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, + {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 2, 2 }})), + { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, + {0, 0, 0, 0}, {1, 1, 1, 1}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 2, 128, 2 }})), + { 0, 0, 0, 1 }, { 0, 1, 0, 2 }, { 1, 1, 1, 1 }, + {1, 0, 1, 0}, {1, 0, 1, 0}, {0, 0, 0, 0}, {0, 1, 0, 1}, {0, 0, 0, 0} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 2, 2 }})), + { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, + {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 4, 3 }})), + { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 }, + {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 4, 2 }})), + { 1, 0, 0, 1 }, { 2, 2, 4, 2 }, { 1, 1, 2, 1 }, + {0, 1, 1, 0}, {1, 1, 0, 0}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 2, 4, 2 }})), + { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 }, + {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 4, 2 }})), + { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 }, + {0, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 3, 4, 5, 6 }})), + { 0, 1, 0, 0, 0 }, { 2, 3, 4, 5, 6 }, { 1, 1, 1, 1, 1 }, + {1, 0, 1, 1, 1}, {1, 0, 1, 1, 1}, {}, {0, 1, 0, 0, 0}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 5, 30, 30, 30 }})), + { 0, 0, 0, 0, 0 }, { 0, 0, 29, 29, 29 }, { 1, 1, 1, 1, 1 }, + {1, 1, 1, 1, 1}, {1, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 10, 12 }})), + { -1, 1 }, { -9999, 0 }, { -1, 1 }, + { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 5, 5, 5, 5 }})), + { -1, 0, -1, 0 }, { -50, 0, -60, 0 }, { -1, 1, -1, 1 }, + { 0, 0, 0, 0 }, { 0, 1, 0, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 128, 1, 1024 }})), + { -1, 0, 0 }, { 0, 0, 0 }, { 1, 1, 1 }, + { 0, 1, 1 }, { 0, 1, 1 }, { 0, 0, 0 }, { 1, 0, 0 }, { 0, 0, 0 } }, }; std::vector ss_only_test_cases_i64 = { - StridedSliceSpecificParams{ { 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, - {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, - StridedSliceSpecificParams{ { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, - {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 2, 2 }})), + { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, + {1, 1, 1, 1}, {1, 1, 1, 1}, {}, {}, {} }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 2, 2, 2, 2 }})), + { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, + {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {} }, }; INSTANTIATE_TEST_SUITE_P( smoke_CLDNN_FP32, StridedSliceLayerTest, ::testing::Combine( ::testing::ValuesIn(ss_only_test_cases_fp32), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(std::map())), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::utils::DEVICE_GPU)), StridedSliceLayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P( smoke_CLDNN_I64, StridedSliceLayerTest, ::testing::Combine( ::testing::ValuesIn(ss_only_test_cases_i64), - ::testing::Values(InferenceEngine::Precision::I64), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(std::map())), + ::testing::Values(ov::element::i64), + ::testing::Values(ov::test::utils::DEVICE_GPU)), StridedSliceLayerTest::getTestCaseName); } // namespace From 285b000b55ba1cc613c06c5970430bb233358dbe Mon Sep 17 00:00:00 2001 From: Przemyslaw Wysocki Date: Fri, 12 Jan 2024 07:03:39 +0100 Subject: [PATCH 10/43] Update dc link (#22091) --- .github/ISSUE_TEMPLATE/good_first_issue.yml | 2 +- CONTRIBUTING.md | 4 ++-- README.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/good_first_issue.yml b/.github/ISSUE_TEMPLATE/good_first_issue.yml index de6e092736a2b3..52f42508a621bf 100644 --- a/.github/ISSUE_TEMPLATE/good_first_issue.yml +++ b/.github/ISSUE_TEMPLATE/good_first_issue.yml @@ -42,7 +42,7 @@ body: discussions, guides. value: | - [Contribution guide - start here!](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING.md) - - [Intel DevHub Discord channel](https://discord.gg/wPuqAujS) - engage in discussions, ask questions and talk to OpenVINO developers + - [Intel DevHub Discord channel](https://discord.gg/7pVRxUwdWG) - engage in discussions, ask questions and talk to OpenVINO developers validations: required: true diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 126cd4d2d5fe53..f91846d8f0c78a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -101,7 +101,7 @@ You can start with the following links: - [User documentation](https://docs.openvino.ai/) - [Blog post on contributing to OpenVINO](https://medium.com/openvino-toolkit/how-to-contribute-to-an-ai-open-source-project-c741f48e009e) - [Pick up a Good First Issue](https://github.com/orgs/openvinotoolkit/projects/3) -- Check out [Intel DevHub Discord server](https://discord.gg/wPuqAujS) - engage in discussions, ask questions and talk to OpenVINO developers +- Check out [Intel DevHub Discord server](https://discord.gg/7pVRxUwdWG) - engage in discussions, ask questions and talk to OpenVINO developers ### 2. Building the project @@ -139,7 +139,7 @@ Choose the component your Good First Issue is related to. You can run tests to m ### 3. Start working on your Good First Issue -Use the issue description and locally built OpenVINO to complete the task. Remember that you can always ask users tagged in the "Contact points" section for help! You can also visit [Intel DevHub Discord server](https://discord.gg/wPuqAujS) and ask questions in the channel dedicated to Good First Issue support. +Use the issue description and locally built OpenVINO to complete the task. Remember that you can always ask users tagged in the "Contact points" section for help! You can also visit [Intel DevHub Discord server](https://discord.gg/7pVRxUwdWG) and ask questions in the channel dedicated to Good First Issue support. ### 4. Submit a PR with your changes diff --git a/README.md b/README.md index 81b7f0a65e7900..2e9bdbf937483d 100644 --- a/README.md +++ b/README.md @@ -174,7 +174,7 @@ See [Contributions Welcome](https://github.com/openvinotoolkit/openvino/issues/1 See [CONTRIBUTING](./CONTRIBUTING.md) for contribution details. Thank you! -Visit [Intel DevHub Discord server](https://discord.gg/wPuqAujS) if you need help or wish to talk to OpenVINO developers. You can go to the channel dedicated to Good First Issue support if you are working on a task. +Visit [Intel DevHub Discord server](https://discord.gg/7pVRxUwdWG) if you need help or wish to talk to OpenVINO developers. You can go to the channel dedicated to Good First Issue support if you are working on a task. ## Take the issue If you wish to be assigned to an issue please add a comment with `.take` command. From 14d5d18c48bd9ab4bf469ba7f11e12430ecd7582 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Fri, 12 Jan 2024 07:23:30 +0100 Subject: [PATCH 11/43] [Ref] Remove ngraph leftovers (#22066) * Remove proxy legacy functions * Remove CoordinateTransform --- .../src/pruning/init_const_mask.cpp | 17 +- .../src/pruning/propagate_masks.cpp | 47 ++- .../offline_transformations/pruning_test.cpp | 27 +- .../runtime/reference/autobroadcast_binop.hpp | 46 --- .../ngraph/runtime/reference/power.hpp | 31 -- .../ngraph/runtime/reference/softmax.hpp | 19 -- .../reference/utils/coordinate_transform.hpp | 90 ----- src/core/reference/src/op/function.cpp | 1 - .../src/utils/coordinate_transform.cpp | 315 ------------------ src/core/tests/coordinate.cpp | 214 ------------ 10 files changed, 41 insertions(+), 766 deletions(-) delete mode 100644 src/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp delete mode 100644 src/core/reference/include/ngraph/runtime/reference/power.hpp delete mode 100644 src/core/reference/include/ngraph/runtime/reference/softmax.hpp delete mode 100644 src/core/tests/coordinate.cpp diff --git a/src/common/offline_transformations/src/pruning/init_const_mask.cpp b/src/common/offline_transformations/src/pruning/init_const_mask.cpp index f2086fb05fc68f..84863f9cbe3cb8 100644 --- a/src/common/offline_transformations/src/pruning/init_const_mask.cpp +++ b/src/common/offline_transformations/src/pruning/init_const_mask.cpp @@ -8,6 +8,7 @@ #include "openvino/op/log.hpp" #include "openvino/opsets/opset6.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" #include "pruning.hpp" @@ -38,19 +39,19 @@ ov::pass::InitConstMask::InitConstMask(const ov::AxisSet& dims, Coordinate begin(shape.size(), 0); Coordinate end(shape); - begin[dim] = value; - end[dim] = value + 1; - bool skip_dim_value = false; - OPENVINO_SUPPRESS_DEPRECATED_START - ov::CoordinateTransform iter(shape, begin, end); - for (const Coordinate& coord : iter) { - if (!condition(values.at(iter.index(coord)))) { + + auto narrow_shape = shape; + narrow_shape[dim] = 1; + ov::CoordinateTransformBasic iter(narrow_shape); + for (auto coord : iter) { + coord[dim] = value; + if (!condition(values.at(coordinate_index(coord, shape)))) { skip_dim_value = true; break; } } - OPENVINO_SUPPRESS_DEPRECATED_END + if (!skip_dim_value) { mask->at(dim).insert(value); } diff --git a/src/common/offline_transformations/src/pruning/propagate_masks.cpp b/src/common/offline_transformations/src/pruning/propagate_masks.cpp index 7837f563dbe7ee..bc9d6a7f8c7d99 100644 --- a/src/common/offline_transformations/src/pruning/propagate_masks.cpp +++ b/src/common/offline_transformations/src/pruning/propagate_masks.cpp @@ -15,6 +15,7 @@ #include "openvino/op/util/pad_base.hpp" #include "openvino/opsets/opset10.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" #include "openvino/util/log.hpp" #include "pruning.hpp" @@ -1012,18 +1013,12 @@ struct ChannelsMap { /* Returns coordinate iterator through all values of given channel * on unsquized_shape_dim dimension according to unsquized_shape shape. */ -OPENVINO_SUPPRESS_DEPRECATED_START -static ov::CoordinateTransform get_channel_iter(const ov::Shape unsquized_shape, - const size_t unsquized_shape_dim, - const size_t channel) { - auto begin = ov::Coordinate(unsquized_shape.size(), 0); - auto end = ov::Coordinate(unsquized_shape); - begin[unsquized_shape_dim] = channel; - end[unsquized_shape_dim] = channel + 1; - ov::CoordinateTransform iter(unsquized_shape, begin, end); - return iter; +static ov::CoordinateTransformBasic get_channel_iter(const ov::Shape& unsquized_shape, + const size_t unsquized_shape_dim) { + auto iter_shape = unsquized_shape; + iter_shape[unsquized_shape_dim] = 1; + return ov::CoordinateTransformBasic{iter_shape}; } -OPENVINO_SUPPRESS_DEPRECATED_END /* Maps squzed_mask_dim mask dimension to vector of masks for unsquized_dims. * Using dims_attrs and unsquized_shape for channel iteration. @@ -1054,16 +1049,16 @@ static ChannelsMap map_channels(const std::set squized_mask_dim, ch %= dims_attrs[unsquized_dim].dim; // Start iterating through chanel - OPENVINO_SUPPRESS_DEPRECATED_START - auto iter = get_channel_iter(unsquized_shape, unsquized_shift, ch); - for (const auto& coord : iter) { - const auto idx = iter.index(coord); + auto iter = get_channel_iter(unsquized_shape, unsquized_shift); + for (auto coord : iter) { + coord[unsquized_shift] = ch; + const auto idx = coordinate_index(coord, unsquized_shape); if (squized_mask_dim_copy.find(idx) != squized_mask_dim_copy.end()) { cur_ch_elems.insert(idx); squized_mask_dim_copy.erase(idx); } } - OPENVINO_SUPPRESS_DEPRECATED_END + if (cur_ch_elems.size() != dims_attrs[unsquized_dim].elems_inner_dims * dims_attrs[unsquized_dim].elems_outer_dims) { suspicious_elems.insert(cur_ch_elems.begin(), cur_ch_elems.end()); @@ -1245,11 +1240,11 @@ class ov::pass::mask_propagation::Reshape : public MatcherPass { for (const auto out_dim : dims_map[in_dim]) { const auto unsquized_shift = out_dim - dims_map[in_dim][0]; for (const auto ch : weights_mask_row->at(out_dim)) { - OPENVINO_SUPPRESS_DEPRECATED_START - auto iter = get_channel_iter(dims_shape[in_dim], unsquized_shift, ch); - for (const auto& coord : iter) - cur_mask->at(in_dim).insert(iter.index(coord)); - OPENVINO_SUPPRESS_DEPRECATED_END + auto iter = get_channel_iter(dims_shape[in_dim], unsquized_shift); + for (auto coord : iter) { + coord[unsquized_shift] = ch; + cur_mask->at(in_dim).insert(coordinate_index(coord, dims_shape[in_dim])); + } } } } @@ -1318,11 +1313,11 @@ class ov::pass::mask_propagation::Reshape : public MatcherPass { for (const auto in_dim : dims_map[out_dim]) { const auto unsquized_shift = in_dim - dims_map[out_dim][0]; for (const auto ch : input_mask_row->at(in_dim)) { - OPENVINO_SUPPRESS_DEPRECATED_START - auto iter = get_channel_iter(dims_shape[out_dim], unsquized_shift, ch); - for (const auto& coord : iter) - cur_mask->at(out_dim).insert(iter.index(coord)); - OPENVINO_SUPPRESS_DEPRECATED_END + auto iter = get_channel_iter(dims_shape[out_dim], unsquized_shift); + for (auto coord : iter) { + coord[unsquized_shift] = ch; + cur_mask->at(out_dim).insert(coordinate_index(coord, dims_shape[out_dim])); + } } } } diff --git a/src/common/transformations/tests/offline_transformations/pruning_test.cpp b/src/common/transformations/tests/offline_transformations/pruning_test.cpp index 8cb95249a4456e..485bcc28d4c462 100644 --- a/src/common/transformations/tests/offline_transformations/pruning_test.cpp +++ b/src/common/transformations/tests/offline_transformations/pruning_test.cpp @@ -21,6 +21,7 @@ #include "openvino/pass/manager.hpp" #include "openvino/pass/serialize.hpp" #include "openvino/pass/visualize_tree.hpp" +#include "openvino/reference/utils/coordinate_index.hpp" #include "openvino/reference/utils/coordinate_transform.hpp" #include "openvino/util/env_util.hpp" #include "transformations/init_node_info.hpp" @@ -45,18 +46,13 @@ Output create_constant_with_zeros(const Shape& shape, const Mask& mask) { std::vector values(shape_size(shape), 1); for (size_t dim = 0; dim < mask.size(); ++dim) { for (const auto& dim_value : mask.at(dim)) { - Coordinate coord_begin(shape.size(), 0); - coord_begin[dim] = dim_value; - - Coordinate coord_end(shape); - coord_end[dim] = dim_value + 1; - - OPENVINO_SUPPRESS_DEPRECATED_START - ov::CoordinateTransform iter(shape, coord_begin, coord_end); - for (const Coordinate& coord : iter) { - values[iter.index(coord)] = 0; + auto narrow_shape = shape; + narrow_shape[dim] = 1; + ov::CoordinateTransformBasic iter(narrow_shape); + for (auto coord : iter) { + coord[dim] = dim_value; + values[coordinate_index(coord, shape)] = 0; } - OPENVINO_SUPPRESS_DEPRECATED_END } } return std::make_shared(element::f32, shape, values); @@ -137,12 +133,11 @@ TEST(TransformationTests, InitMasksOutputChannel) { Shape input_shape{1, 3, 64, 64}; Shape weights_shape{6, 3, 3, 3}; std::vector values(shape_size(weights_shape), 1); - OPENVINO_SUPPRESS_DEPRECATED_START - ov::CoordinateTransform iter(weights_shape, {0, 1, 0, 0}, {6, 2, 3, 3}); - for (const Coordinate& coord : iter) { - values[iter.index(coord)] = 0; + ov::CoordinateTransformBasic iter({6, 1, 3, 3}); + for (auto coord : iter) { + coord[1] = 1; + values[coordinate_index(coord, weights_shape)] = 0; } - OPENVINO_SUPPRESS_DEPRECATED_END auto weights = std::make_shared(element::f32, weights_shape, values); ov::pass::InitConstMask({1}).apply(weights); diff --git a/src/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp b/src/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp deleted file mode 100644 index d0b7d704dd89b8..00000000000000 --- a/src/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/reference/autobroadcast_binop.hpp" - -// Proxy calls for dependant components transition to ov::reference namespace -namespace ngraph { -namespace runtime { -namespace reference { -template -void autobroadcast_binop(const T* arg0, - const T* arg1, - U* out, - const Shape& arg0_shape, - const Shape& arg1_shape, - const op::AutoBroadcastSpec& broadcast_spec, - Functor elementwise_functor) { - ov::reference::autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, elementwise_functor); -} - -template -void autobroadcast_select(const U* arg0, - const T* arg1, - const T* arg2, - T* out, - const Shape& arg0_shape, - const Shape& arg1_shape, - const Shape& arg2_shape, - const op::AutoBroadcastSpec& broadcast_spec, - Functor elementwise_functor) { - ov::reference::autobroadcast_select(arg0, - arg1, - arg2, - out, - arg0_shape, - arg1_shape, - arg2_shape, - broadcast_spec, - elementwise_functor); -} -} // namespace reference -} // namespace runtime -} // namespace ngraph diff --git a/src/core/reference/include/ngraph/runtime/reference/power.hpp b/src/core/reference/include/ngraph/runtime/reference/power.hpp deleted file mode 100644 index 9a8e0d89612dcb..00000000000000 --- a/src/core/reference/include/ngraph/runtime/reference/power.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/reference/power.hpp" - -// Proxy calls for dependant components transition to ov::reference namespace -namespace ngraph { -namespace runtime { -namespace reference { - -template -void power(const T* arg0, const T* arg1, T* out, size_t count) { - ov::reference::power(arg0, arg1, out, count); -} - -template -void power(const T* arg0, - const T* arg1, - T* out, - const Shape& arg0_shape, - const Shape& arg1_shape, - const op::AutoBroadcastSpec& broadcast_spec) { - ov::reference::power(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec); -} - -} // namespace reference -} // namespace runtime -} // namespace ngraph diff --git a/src/core/reference/include/ngraph/runtime/reference/softmax.hpp b/src/core/reference/include/ngraph/runtime/reference/softmax.hpp deleted file mode 100644 index 559ab8f94d615b..00000000000000 --- a/src/core/reference/include/ngraph/runtime/reference/softmax.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/reference/softmax.hpp" - -// Proxy call for dependant components transition to ov::reference namespace -namespace ngraph { -namespace runtime { -namespace reference { -template -void softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) { - ov::reference::softmax(arg, out, shape, axes); -} -} // namespace reference -} // namespace runtime -} // namespace ngraph diff --git a/src/core/reference/include/openvino/reference/utils/coordinate_transform.hpp b/src/core/reference/include/openvino/reference/utils/coordinate_transform.hpp index d94f8022e65a2b..692de2f2e2f071 100644 --- a/src/core/reference/include/openvino/reference/utils/coordinate_transform.hpp +++ b/src/core/reference/include/openvino/reference/utils/coordinate_transform.hpp @@ -7,7 +7,6 @@ #include "openvino/core/axis_vector.hpp" #include "openvino/core/coordinate.hpp" #include "openvino/core/coordinate_diff.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/strides.hpp" @@ -17,7 +16,6 @@ namespace ov { /// produces the following coordinates: /// {0,0}, {0,1}, {0,2}, /// {1,0}, {1,1}, {2,2} -/// \deprecated class CoordinateIterator { /// \brief Coordinates iterator constructor /// \param target_shape The target shape for coordinates iteration @@ -73,12 +71,6 @@ class CoordinateTransformBasic { CoordinateTransformBasic(const Shape& source_shape); - /// \brief The tensor element index calculation by given coordinate. - /// \param c tensor element coordinate - /// \deprecated - OPENVINO_DEPRECATED("This method is deprecated and will be removed soon.") - size_t index(const Coordinate& c) const; - /// \brief Returns an iterator to the first coordinate of the tensor. CoordinateIterator begin() const noexcept; @@ -88,86 +80,4 @@ class CoordinateTransformBasic { protected: Shape m_source_shape; }; - -/// \brief Class which allows to calculate item index with given coordinates in tensor -/// and helps to iterate over the subset of coordinates. -/// Tensor items should be placed in memory in row-major order. -/// \deprecated -class OPENVINO_DEPRECATED("This class is deprecated and will be removed soon.") CoordinateTransform - : protected CoordinateTransformBasic { -public: - using Iterator = CoordinateIterator; - - CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner, - const Strides& source_strides, - const AxisVector& source_axis_order, - const CoordinateDiff& target_padding_below, - const CoordinateDiff& target_padding_above, - const Strides& source_dilation_strides); - - CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner, - const Strides& source_strides, - const AxisVector& source_axis_order, - const CoordinateDiff& target_padding_below, - const CoordinateDiff& target_padding_above); - - CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner, - const Strides& source_strides, - const AxisVector& source_axis_order); - - CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner, - const Strides& source_strides); - - CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner); - - CoordinateTransform(const Shape& source_shape); - - /// \brief The tensor element index calculation by given coordinate. - /// \param c tensor element coordinate - size_t index(const Coordinate& c) const; - - /// \brief Checks that coordinate belongs to given coordinates subset. - /// \param c tensor element coordinate - bool has_source_coordinate(const Coordinate& c) const; - - /// \brief Convert a target-space coordinate to a source-space coordinate. - /// \param c tensor element coordinate - Coordinate to_source_coordinate(const Coordinate& c) const; - - const Shape& get_source_shape() const noexcept; - const Shape& get_target_shape() const noexcept; - const Coordinate& get_source_start_corner() const noexcept; - const Coordinate& get_source_end_corner() const noexcept; - const Strides& get_source_strides() const noexcept; - const AxisVector& get_source_axis_order() const noexcept; - const Strides& get_target_dilation_strides() const noexcept; - - /// \brief Returns an iterator to the first coordinate of the tensor. - CoordinateIterator begin() const noexcept; - - /// \brief Returns an iterator to the coordinate following the last element of the tensor. - const CoordinateIterator& end() const noexcept; - -private: - Coordinate m_source_start_corner; - Coordinate m_source_end_corner; - Strides m_source_strides; - AxisVector m_source_axis_order; - CoordinateDiff m_target_padding_below; - CoordinateDiff m_target_padding_above; - Strides m_target_dilation_strides; - - Shape m_target_shape; - size_t m_n_axes; -}; } // namespace ov diff --git a/src/core/reference/src/op/function.cpp b/src/core/reference/src/op/function.cpp index 37378d831f3ca7..7212615e58e31c 100644 --- a/src/core/reference/src/op/function.cpp +++ b/src/core/reference/src/op/function.cpp @@ -6,7 +6,6 @@ #include -#include "openvino/core/deprecated.hpp" #include "openvino/core/shape_util.hpp" namespace ov { diff --git a/src/core/reference/src/utils/coordinate_transform.cpp b/src/core/reference/src/utils/coordinate_transform.cpp index cd97834e6d0245..bd8995a1008146 100644 --- a/src/core/reference/src/utils/coordinate_transform.cpp +++ b/src/core/reference/src/utils/coordinate_transform.cpp @@ -11,7 +11,6 @@ #include #include -#include "ngraph/util.hpp" #include "openvino/core/axis_vector.hpp" #include "openvino/core/coordinate_diff.hpp" #include "openvino/core/shape.hpp" @@ -20,36 +19,8 @@ using namespace ov; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace { -Strides default_strides(size_t n_axes) { - return Strides(n_axes, 1); -} -CoordinateDiff default_padding(size_t n_axes) { - return CoordinateDiff(n_axes, 0); -} -AxisVector default_axis_order(size_t n_axes) { - AxisVector result(n_axes); - std::iota(result.begin(), result.end(), 0); - return result; -} - -Coordinate default_source_start_corner(size_t n_axes) { - return Coordinate(n_axes, 0); -} -Coordinate default_source_end_corner(const Shape& source_shape) { - return source_shape; -} -} // namespace -OPENVINO_SUPPRESS_DEPRECATED_END - CoordinateTransformBasic::CoordinateTransformBasic(const Shape& source_shape) : m_source_shape(source_shape) {} -// Compute the index of a source-space coordinate in the buffer. -size_t CoordinateTransformBasic::index(const Coordinate& c) const { - return coordinate_index(c, m_source_shape); -} - CoordinateIterator CoordinateTransformBasic::begin() const noexcept { return CoordinateIterator(m_source_shape); } @@ -58,292 +29,6 @@ const CoordinateIterator& CoordinateTransformBasic::end() const noexcept { return CoordinateIterator::end(); } -OPENVINO_SUPPRESS_DEPRECATED_START -CoordinateTransform::CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner, - const Strides& source_strides, - const AxisVector& source_axis_order, - const CoordinateDiff& target_padding_below, - const CoordinateDiff& target_padding_above, - const Strides& target_dilation_strides) - : CoordinateTransformBasic(source_shape), - m_source_start_corner(source_start_corner), - m_source_end_corner(source_end_corner), - m_source_strides(source_strides), - m_source_axis_order(source_axis_order), - m_target_padding_below(target_padding_below), - m_target_padding_above(target_padding_above), - m_target_dilation_strides(target_dilation_strides) { - m_n_axes = source_shape.size(); - - if (m_n_axes != source_start_corner.size()) { - throw std::domain_error("Source start corner does not have the same number of axes as the source space shape"); - } - if (m_n_axes != source_end_corner.size()) { - throw std::domain_error("Source end corner does not have the same number of axes as the source space shape"); - } - if (m_n_axes != source_strides.size()) { - throw std::domain_error("Source strides do not have the same number of axes as the source space shape"); - } - if (m_n_axes != source_axis_order.size()) { - // Note: this check is NOT redundant with the is_permutation check below, though you might - // think it is. If the lengths don't match then is_permutation won't catch that; it'll - // either stop short or walk off the end of source_axis_order. - throw std::domain_error("Source axis order does not have the same number of axes as the source space shape"); - } - if (m_n_axes != target_padding_below.size()) { - throw std::domain_error("Padding-below shape does not have the same number of axes as the source space shape"); - } - if (m_n_axes != target_padding_above.size()) { - throw std::domain_error("Padding-above shape does not have the same number of axes as the source space shape"); - } - if (m_n_axes != target_dilation_strides.size()) { - throw std::domain_error("Target dilation strides do not have the same number of axes as the source shape"); - } - - AxisVector all_axes(m_n_axes); - for (size_t i = 0; i < all_axes.size(); i++) { - all_axes[i] = i; - } - - if (!std::is_permutation(all_axes.begin(), all_axes.end(), source_axis_order.begin())) { - throw std::domain_error("Source axis order is not a permutation of {0,...,n-1} where n is the number of axes " - "in the source space shape"); - } - - for (size_t i = 0; i < m_n_axes; i++) { - if (target_dilation_strides[i] == 0) { - std::stringstream ss; - - ss << "The target dilation stride is 0 at axis " << i; - throw std::domain_error(ss.str()); - } - } - - std::vector padded_upper_bounds; - - for (size_t i = 0; i < m_n_axes; i++) { - std::ptrdiff_t padded_upper_bound = - ngraph::subtract_or_zero(source_shape[i], size_t(1)) * target_dilation_strides[i] + 1 + - target_padding_below[i] + target_padding_above[i]; - - if (padded_upper_bound < 0) { - std::stringstream ss; - - ss << "The end corner is out of bounds at axis " << i; - throw std::domain_error(ss.str()); - } - - padded_upper_bounds.push_back(padded_upper_bound); - } - - for (size_t i = 0; i < m_n_axes; i++) { - if (static_cast(source_start_corner[i]) >= padded_upper_bounds[i] && - source_start_corner[i] != source_shape[i]) { - std::stringstream ss; - - ss << "The start corner is out of bounds at axis " << i; - throw std::domain_error(ss.str()); - } - - if (static_cast(source_end_corner[i]) > padded_upper_bounds[i]) { - std::stringstream ss; - - ss << "The end corner is out of bounds at axis " << i; - throw std::domain_error(ss.str()); - } - } - - for (size_t i = 0; i < m_n_axes; i++) { - if (source_strides[i] == 0) { - std::stringstream ss; - - ss << "The source stride is 0 at axis " << i; - throw std::domain_error(ss.str()); - } - } - - for (size_t axis = 0; axis < m_n_axes; axis++) { - m_target_shape.push_back( - ngraph::ceil_div(source_end_corner[source_axis_order[axis]] - source_start_corner[source_axis_order[axis]], - source_strides[source_axis_order[axis]])); - } -} - -CoordinateTransform::CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner, - const Strides& source_strides, - const AxisVector& source_axis_order, - const CoordinateDiff& target_padding_below, - const CoordinateDiff& target_padding_above) - : CoordinateTransform(source_shape, - source_start_corner, - source_end_corner, - source_strides, - source_axis_order, - target_padding_below, - target_padding_above, - default_strides(source_shape.size())) {} - -CoordinateTransform::CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner, - const Strides& source_strides, - const AxisVector& source_axis_order) - : CoordinateTransform(source_shape, - source_start_corner, - source_end_corner, - source_strides, - source_axis_order, - default_padding(source_shape.size()), - default_padding(source_shape.size()), - default_strides(source_shape.size())) {} - -CoordinateTransform::CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner, - const Strides& source_strides) - : CoordinateTransform(source_shape, - source_start_corner, - source_end_corner, - source_strides, - default_axis_order(source_shape.size()), - default_padding(source_shape.size()), - default_padding(source_shape.size()), - default_strides(source_shape.size())) {} - -CoordinateTransform::CoordinateTransform(const Shape& source_shape, - const Coordinate& source_start_corner, - const Coordinate& source_end_corner) - : CoordinateTransform(source_shape, - source_start_corner, - source_end_corner, - default_strides(source_shape.size()), - default_axis_order(source_shape.size()), - default_padding(source_shape.size()), - default_padding(source_shape.size()), - default_strides(source_shape.size())) {} - -CoordinateTransform::CoordinateTransform(const Shape& source_shape) - : CoordinateTransform(source_shape, - default_source_start_corner(source_shape.size()), - default_source_end_corner(source_shape), - default_strides(source_shape.size()), - default_axis_order(source_shape.size()), - default_padding(source_shape.size()), - default_padding(source_shape.size()), - default_strides(source_shape.size())) {} - -// Compute the index of a target-space coordinate in thebuffer. -size_t CoordinateTransform::index(const Coordinate& c) const { - return CoordinateTransformBasic::index(to_source_coordinate(c)); -} - -// Convert a target-space coordinate to a source-space coordinate. -Coordinate CoordinateTransform::to_source_coordinate(const Coordinate& c_target) const { - if (c_target.size() != m_n_axes) { - throw std::domain_error("Target coordinate rank does not match the coordinate transform rank"); - } - - Coordinate c_source(c_target.size()); - - for (size_t target_axis = 0; target_axis < m_n_axes; target_axis++) { - size_t source_axis = m_source_axis_order[target_axis]; - - size_t target_pos = c_target[target_axis]; - size_t pos_destrided = target_pos * m_source_strides[source_axis]; - size_t pos_deshifted = pos_destrided + m_source_start_corner[source_axis]; - size_t pos_depadded = pos_deshifted - m_target_padding_below[target_axis]; - size_t pos_dedilated = pos_depadded / m_target_dilation_strides[target_axis]; - c_source[source_axis] = pos_dedilated; - } - - return c_source; -} - -// A point in the target space is considered not to have a source coordinate if it was inserted due -// to padding or dilation, or if it is out of the bounds of the target space. -bool CoordinateTransform::has_source_coordinate(const Coordinate& c_target) const { - if (c_target.size() != m_n_axes) { - throw std::domain_error("Target coordinate rank does not match the coordinate transform rank"); - } - - for (size_t target_axis = 0; target_axis < m_n_axes; target_axis++) { - // Is this coordinate out of bounds of the target space? - if (c_target[target_axis] >= m_target_shape[target_axis]) { - return false; - } - - // The rest of this is a replay of the corresponding logic in `to_source_coordinate`, with - // bounds and divisibility checking. - std::ptrdiff_t source_axis = m_source_axis_order[target_axis]; - - std::ptrdiff_t target_pos = c_target[target_axis]; - std::ptrdiff_t pos_destrided = target_pos * m_source_strides[source_axis]; - std::ptrdiff_t pos_deshifted = pos_destrided + m_source_start_corner[source_axis]; - - // If we are in the below-padding or the above-padding. - if (pos_deshifted < m_target_padding_below[target_axis]) { - return false; - } - std::ptrdiff_t pos_depadded = pos_deshifted - m_target_padding_below[target_axis]; - - // If we are in the above-padding, we have no source coordinate. - if (m_source_shape[source_axis] == 0 || - (pos_depadded >= ((static_cast(m_source_shape[source_axis]) - 1) * - static_cast(m_target_dilation_strides[target_axis])) + - 1)) { - return false; - } - - // If we are in a dilation gap, we have no source coordinate. - if (pos_depadded % m_target_dilation_strides[target_axis] != 0) { - return false; - } - } - - return true; -} - -const Shape& CoordinateTransform::get_source_shape() const noexcept { - return m_source_shape; -} - -const Shape& CoordinateTransform::get_target_shape() const noexcept { - return m_target_shape; -} - -const Coordinate& CoordinateTransform::get_source_start_corner() const noexcept { - return m_source_start_corner; -} - -const Coordinate& CoordinateTransform::get_source_end_corner() const noexcept { - return m_source_end_corner; -} - -const Strides& CoordinateTransform::get_source_strides() const noexcept { - return m_source_strides; -} - -const AxisVector& CoordinateTransform::get_source_axis_order() const noexcept { - return m_source_axis_order; -} - -const Strides& CoordinateTransform::get_target_dilation_strides() const noexcept { - return m_target_dilation_strides; -} - -CoordinateIterator CoordinateTransform::begin() const noexcept { - return CoordinateIterator(m_target_shape); -} - -const CoordinateIterator& CoordinateTransform::end() const noexcept { - return CoordinateIterator::end(); -} -OPENVINO_SUPPRESS_DEPRECATED_END - // The "is_end" parameter is true if we want the "end()" iterator. CoordinateIterator::CoordinateIterator(const Shape& target_shape, bool is_end) : m_target_shape(target_shape), diff --git a/src/core/tests/coordinate.cpp b/src/core/tests/coordinate.cpp deleted file mode 100644 index f7cc7e18b8a0a8..00000000000000 --- a/src/core/tests/coordinate.cpp +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/core/coordinate.hpp" - -#include -#include -#include - -#include "common_test_utils/ndarray.hpp" -#include "common_test_utils/test_tools.hpp" -#include "gtest/gtest.h" -#include "openvino/reference/utils/coordinate_transform.hpp" - -using namespace std; -using namespace ov; - -OPENVINO_SUPPRESS_DEPRECATED_START -TEST(coordinate, shape0d) { - auto ct = ov::CoordinateTransform({}); - ASSERT_EQ(shape_size(ct.get_target_shape()), 1); - auto it = ct.begin(); - EXPECT_EQ(*it++, ov::Coordinate({})); - EXPECT_TRUE(it == ct.end()); -} - -TEST(coordinate, shape1d) { - auto ct = ov::CoordinateTransform({3}); - ASSERT_EQ(shape_size(ct.get_target_shape()), 3); - auto it = ct.begin(); - EXPECT_EQ(*it++, Coordinate({0})); - EXPECT_EQ(*it++, Coordinate({1})); - EXPECT_EQ(*it++, Coordinate({2})); - EXPECT_TRUE(it == ct.end()); -} - -TEST(coordinate, shape2d) { - auto ct = ov::CoordinateTransform({2, 3}); - ASSERT_EQ(shape_size(ct.get_target_shape()), 6); - auto it = ct.begin(); - EXPECT_EQ(*it++, Coordinate({0, 0})); - EXPECT_EQ(*it++, Coordinate({0, 1})); - EXPECT_EQ(*it++, Coordinate({0, 2})); - EXPECT_EQ(*it++, Coordinate({1, 0})); - EXPECT_EQ(*it++, Coordinate({1, 1})); - EXPECT_EQ(*it++, Coordinate({1, 2})); - EXPECT_TRUE(it == ct.end()); -} - -TEST(coordinate, shape3d) { - auto ct = ov::CoordinateTransform({2, 3, 4}); - ASSERT_EQ(shape_size(ct.get_target_shape()), 24); - auto it = ct.begin(); - EXPECT_EQ(*it++, Coordinate({0, 0, 0})); - EXPECT_EQ(*it++, Coordinate({0, 0, 1})); - EXPECT_EQ(*it++, Coordinate({0, 0, 2})); - EXPECT_EQ(*it++, Coordinate({0, 0, 3})); - EXPECT_EQ(*it++, Coordinate({0, 1, 0})); - EXPECT_EQ(*it++, Coordinate({0, 1, 1})); - EXPECT_EQ(*it++, Coordinate({0, 1, 2})); - EXPECT_EQ(*it++, Coordinate({0, 1, 3})); - EXPECT_EQ(*it++, Coordinate({0, 2, 0})); - EXPECT_EQ(*it++, Coordinate({0, 2, 1})); - EXPECT_EQ(*it++, Coordinate({0, 2, 2})); - EXPECT_EQ(*it++, Coordinate({0, 2, 3})); - EXPECT_EQ(*it++, Coordinate({1, 0, 0})); - EXPECT_EQ(*it++, Coordinate({1, 0, 1})); - EXPECT_EQ(*it++, Coordinate({1, 0, 2})); - EXPECT_EQ(*it++, Coordinate({1, 0, 3})); - EXPECT_EQ(*it++, Coordinate({1, 1, 0})); - EXPECT_EQ(*it++, Coordinate({1, 1, 1})); - EXPECT_EQ(*it++, Coordinate({1, 1, 2})); - EXPECT_EQ(*it++, Coordinate({1, 1, 3})); - EXPECT_EQ(*it++, Coordinate({1, 2, 0})); - EXPECT_EQ(*it++, Coordinate({1, 2, 1})); - EXPECT_EQ(*it++, Coordinate({1, 2, 2})); - EXPECT_EQ(*it++, Coordinate({1, 2, 3})); - EXPECT_TRUE(it == ct.end()); -} - -TEST(coordinate, zero_sized_axis) { - auto ct = ov::CoordinateTransform({2, 0, 4}); - ASSERT_EQ(shape_size(ct.get_target_shape()), 0); - auto it = ct.begin(); - EXPECT_TRUE(it == ct.end()); -} - -TEST(coordinate, corner) { - Shape source_shape{10, 10}; - Coordinate source_start_corner = Coordinate{3, 3}; - Coordinate source_end_corner{6, 6}; - Strides source_strides = Strides(source_shape.size(), 1); - AxisVector source_axis_order(source_shape.size()); - iota(source_axis_order.begin(), source_axis_order.end(), 0); - CoordinateDiff target_padding_below = CoordinateDiff(source_shape.size(), 0); - CoordinateDiff target_padding_above = CoordinateDiff(source_shape.size(), 0); - Strides source_dilation_strides = Strides(source_shape.size(), 1); - - auto ct = ov::CoordinateTransform(source_shape, - source_start_corner, - source_end_corner, - source_strides, - source_axis_order, - target_padding_below, - target_padding_above, - source_dilation_strides); - - ASSERT_EQ(shape_size(ct.get_target_shape()), 9); - auto it = ct.begin(); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({3, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({3, 4})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({3, 5})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({4, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({4, 4})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({4, 5})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({5, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({5, 4})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({5, 5})); - EXPECT_TRUE(it == ct.end()); -} - -TEST(coordinate, strides) { - Shape source_shape{10, 10}; - Coordinate source_start_corner = Coordinate{0, 0}; - Coordinate source_end_corner{source_shape}; - Strides source_strides = Strides({2, 3}); - AxisVector source_axis_order(source_shape.size()); - iota(source_axis_order.begin(), source_axis_order.end(), 0); - CoordinateDiff target_padding_below = CoordinateDiff(source_shape.size(), 0); - CoordinateDiff target_padding_above = CoordinateDiff(source_shape.size(), 0); - Strides source_dilation_strides = Strides(source_shape.size(), 1); - - auto ct = ov::CoordinateTransform(source_shape, - source_start_corner, - source_end_corner, - source_strides, - source_axis_order, - target_padding_below, - target_padding_above, - source_dilation_strides); - - ASSERT_EQ(shape_size(ct.get_target_shape()), 20); - auto it = ct.begin(); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 6})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 9})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 6})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 9})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({4, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({4, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({4, 6})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({4, 9})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({6, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({6, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({6, 6})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({6, 9})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({8, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({8, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({8, 6})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({8, 9})); - EXPECT_TRUE(it == ct.end()); -} - -TEST(coordinate, axis_order) { - Shape source_shape{3, 2, 4}; - Coordinate source_start_corner = Coordinate{0, 0, 0}; - Coordinate source_end_corner{source_shape}; - Strides source_strides = Strides(source_shape.size(), 1); - AxisVector source_axis_order({1, 2, 0}); - CoordinateDiff target_padding_below = CoordinateDiff(source_shape.size(), 0); - CoordinateDiff target_padding_above = CoordinateDiff(source_shape.size(), 0); - Strides source_dilation_strides = Strides(source_shape.size(), 1); - - auto ct = ov::CoordinateTransform(source_shape, - source_start_corner, - source_end_corner, - source_strides, - source_axis_order, - target_padding_below, - target_padding_above, - source_dilation_strides); - - ASSERT_EQ(shape_size(ct.get_target_shape()), 24); - auto it = ct.begin(); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 0, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({1, 0, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 0, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 0, 1})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({1, 0, 1})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 0, 1})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 0, 2})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({1, 0, 2})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 0, 2})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 0, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({1, 0, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 0, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 1, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({1, 1, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 1, 0})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 1, 1})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({1, 1, 1})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 1, 1})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 1, 2})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({1, 1, 2})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 1, 2})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({0, 1, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({1, 1, 3})); - EXPECT_EQ(ct.to_source_coordinate(*it++), Coordinate({2, 1, 3})); - EXPECT_TRUE(it == ct.end()); -} From 994438b898cd27537cd211d1d79bff7e06324b4e Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Fri, 12 Jan 2024 10:32:56 +0400 Subject: [PATCH 12/43] [TF Hub] Move Notebooks tests to Kaggle links (#22108) Signed-off-by: Kazantsev, Roman --- .../tf_hub_tests/test_tf_hub_api_notebooks.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/model_hub_tests/tf_hub_tests/test_tf_hub_api_notebooks.py b/tests/model_hub_tests/tf_hub_tests/test_tf_hub_api_notebooks.py index 50c6f54f7c6c40..80b31329ed38d3 100644 --- a/tests/model_hub_tests/tf_hub_tests/test_tf_hub_api_notebooks.py +++ b/tests/model_hub_tests/tf_hub_tests/test_tf_hub_api_notebooks.py @@ -12,15 +12,17 @@ class TestTFHubApiNotebooks(TestConvertModel): def load_model(self, model_name, model_link): if model_name == 'mobilenet_v2_100_224_dict': image = tf.keras.layers.Input(shape=(224, 224, 3), dtype=tf.float32, name="image") - feature_vector = hub.KerasLayer("https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/5", - trainable=False)(image) + feature_vector = hub.KerasLayer( + "https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/100-224-feature-vector/versions/2", + trainable=False)(image) softmax = tf.keras.layers.Dense(20, activation='softmax')(feature_vector) classification_model = tf.keras.Model(inputs={'image': image}, outputs={'softmax': softmax}) return classification_model elif model_name == 'mobilenet_v2_100_224_list': image = tf.keras.layers.Input(shape=(224, 224, 3), dtype=tf.float32, name="image") - feature_vector = hub.KerasLayer("https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/5", - trainable=False)(image) + feature_vector = hub.KerasLayer( + "https://www.kaggle.com/models/google/mobilenet-v2/frameworks/tensorFlow2/variations/100-224-feature-vector/versions/2", + trainable=False)(image) softmax = tf.keras.layers.Dense(20, activation='softmax')(feature_vector) classification_model = tf.keras.Model(inputs=[image], outputs=[softmax]) return classification_model @@ -30,7 +32,8 @@ def load_model(self, model_name, model_link): x1=tf.keras.layers.Input(shape=(200, 200, 3)), time=tf.keras.layers.Input(shape=(1)), ) - film_layer = hub.KerasLayer("https://tfhub.dev/google/film/1")(inputs) + film_layer = hub.KerasLayer( + "https://www.kaggle.com/models/google/film/frameworks/tensorFlow2/variations/film/versions/1")(inputs) film_model = tf.keras.Model(inputs=inputs, outputs=list(film_layer.values())[0]) return film_model else: From 9d8088338da28a51a847aa49a91ed7bfdcc6bc84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Jan 2024 10:33:42 +0400 Subject: [PATCH 13/43] Bump jinja2 (#22111) Bumps [jinja2](https://github.com/pallets/jinja) from 3.1.2 to 3.1.3. - [Release notes](https://github.com/pallets/jinja/releases) - [Changelog](https://github.com/pallets/jinja/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/jinja/compare/3.1.2...3.1.3) --- updated-dependencies: - dependency-name: jinja2 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../functional_test_utils/layer_tests_summary/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/requirements.txt b/src/tests/test_utils/functional_test_utils/layer_tests_summary/requirements.txt index 685bfe2d7d2c8f..d90682d5abca85 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/requirements.txt +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/requirements.txt @@ -1,3 +1,3 @@ -jinja2==3.1.2 +jinja2==3.1.3 defusedxml>=0.7.1 openpyxl==3.0.10 \ No newline at end of file From 7b4787cd8e81500f8a4efa77cb7ccb70c20b3eb8 Mon Sep 17 00:00:00 2001 From: Andrei Gorbachev Date: Fri, 12 Jan 2024 06:34:31 +0000 Subject: [PATCH 14/43] [GPU] Refactor (#22110) * add rnn_sequence * add scatter_elements_update V12 * fix << for v12 --- .../single_layer_tests/rnn_sequence.cpp | 30 +++++----- .../scatter_elements_update.cpp | 43 ++++++++++----- .../scatter_elements_update.hpp | 4 ++ .../single_op/scatter_elements_update.hpp | 19 +++++++ .../src/single_op/scatter_elements_update.cpp | 55 +++++++++++++++++++ 5 files changed, 120 insertions(+), 31 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp index f1ba2a1eaf2df3..7dcba48116e167 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp @@ -2,20 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include "single_layer_tests/rnn_sequence.hpp" +#include "single_op_tests/rnn_sequence.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { -std::vector mode{ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, - ngraph::helpers::SequenceTestsMode::PURE_SEQ}; +using ov::test::RNNSequenceTest; + +std::vector mode{ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST, + ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST, + ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, + ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, + ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, + ov::test::utils::SequenceTestsMode::PURE_SEQ}; // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; @@ -26,10 +24,10 @@ std::vector> activations = {{"relu"}, {"sigmoid"}, {"ta std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, - ov::op::RecurrentSequenceDirection::REVERSE, - ov::op::RecurrentSequenceDirection::BIDIRECTIONAL, + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL, }; -std::vector netPrecisions = {InferenceEngine::Precision::FP32}; +std::vector netPrecisions = {ov::element::f32}; INSTANTIATE_TEST_SUITE_P(RNNSequenceCommonZeroClip, RNNSequenceTest, ::testing::Combine( @@ -41,7 +39,7 @@ INSTANTIATE_TEST_SUITE_P(RNNSequenceCommonZeroClip, RNNSequenceTest, ::testing::ValuesIn(activations), ::testing::ValuesIn(clip), ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), RNNSequenceTest::getTestCaseName); @@ -56,7 +54,7 @@ INSTANTIATE_TEST_SUITE_P(RNNSequenceCommonClip, RNNSequenceTest, ::testing::ValuesIn(activations), ::testing::ValuesIn(clip_non_zeros), ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), RNNSequenceTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp index ba6c4b7e0d773a..a306e8f7ce6594 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp @@ -2,15 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - -#include "single_layer_tests/scatter_elements_update.hpp" +#include "single_op_tests/scatter_elements_update.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { +using ov::test::ScatterElementsUpdateLayerTest; +using ov::test::ScatterElementsUpdate12LayerTest; + // map> std::map, std::map, std::vector>> axesShapeInShape { {{10, 12, 15}, {{{1, 2, 4}, {0, 1, 2}}, {{2, 2, 2}, {-1, -2, -3}}}}, @@ -23,21 +21,36 @@ const std::vector> idxValue = { {1, 0, 4, 6, 2, 3, 7, 5} }; -const std::vector inputPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I32, +const std::vector inputPrecisions = { + ov::element::f32, + ov::element::f16, + ov::element::i32, }; -const std::vector idxPrecisions = { - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I64, +const std::vector idxPrecisions = { + ov::element::i32, + ov::element::i64, }; +std::vector combine_shapes( + const std::map, std::map, std::vector>>& input_shapes) { + std::vector res_vec; + for (auto& input_shape : input_shapes) { + for (auto& item : input_shape.second) { + for (auto& elt : item.second) { + res_vec.push_back(ov::test::axisShapeInShape{ + ov::test::static_shapes_to_test_representation({input_shape.first, item.first}), + elt}); + } + } + } + return res_vec; +} + INSTANTIATE_TEST_SUITE_P( smoke_ScatterEltsUpdate, ScatterElementsUpdateLayerTest, - ::testing::Combine(::testing::ValuesIn(ScatterElementsUpdateLayerTest::combineShapes(axesShapeInShape)), + ::testing::Combine(::testing::ValuesIn(combine_shapes(axesShapeInShape)), ::testing::ValuesIn(idxValue), ::testing::ValuesIn(inputPrecisions), ::testing::ValuesIn(idxPrecisions), @@ -65,7 +78,7 @@ const std::vector> idxWithNegativeValues = { INSTANTIATE_TEST_SUITE_P( smoke_ScatterEltsUpdate12, ScatterElementsUpdate12LayerTest, - ::testing::Combine(::testing::ValuesIn(ScatterElementsUpdateLayerTest::combineShapes(axesShapeInShape)), + ::testing::Combine(::testing::ValuesIn(combine_shapes(axesShapeInShape)), ::testing::ValuesIn(idxWithNegativeValues), ::testing::ValuesIn(reduceModes), ::testing::ValuesIn({true, false}), diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/scatter_elements_update.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/scatter_elements_update.hpp index 66e7badad7f0b9..113aa46af3b4e4 100644 --- a/src/tests/functional/plugin/shared/include/single_op_tests/scatter_elements_update.hpp +++ b/src/tests/functional/plugin/shared/include/single_op_tests/scatter_elements_update.hpp @@ -11,5 +11,9 @@ namespace test { TEST_P(ScatterElementsUpdateLayerTest, Inference) { run(); } + +TEST_P(ScatterElementsUpdate12LayerTest, Inference) { + run(); +} } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/scatter_elements_update.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/scatter_elements_update.hpp index 0a7ebea7a20809..757d6c64bb93ca 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/scatter_elements_update.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/scatter_elements_update.hpp @@ -25,11 +25,30 @@ using scatterElementsUpdateParamsTuple = typename std::tuple< ov::test::TargetDevice // Device name >; +using scatterElementsUpdate12ParamsTuple = typename std::tuple< + axisShapeInShape, // Shape description + std::vector, // Indices value + ov::op::v12::ScatterElementsUpdate::Reduction, // Reduce mode + bool, // Use init value + ov::element::Type, // Model type + ov::element::Type, // Indices type + ov::test::TargetDevice // Device name +>; + class ScatterElementsUpdateLayerTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo &obj); +protected: + void SetUp() override; +}; + +class ScatterElementsUpdate12LayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + protected: void SetUp() override; }; diff --git a/src/tests/functional/shared_test_classes/src/single_op/scatter_elements_update.cpp b/src/tests/functional/shared_test_classes/src/single_op/scatter_elements_update.cpp index 9a2947820bb35e..ad64def009f5c5 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/scatter_elements_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/scatter_elements_update.cpp @@ -53,5 +53,60 @@ void ScatterElementsUpdateLayerTest::SetUp() { auto scatter_elements_update = std::make_shared(param, indices_const, update_param, axis_const); function = std::make_shared(scatter_elements_update->outputs(), ov::ParameterVector{param, update_param}, "ScatterElementsUpdate"); } + +std::string ScatterElementsUpdate12LayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + auto shapes_ss = [](const InputShape& shape) { + std::stringstream ss; + ss << "_IS=(" << ov::test::utils::partialShape2str({shape.first}) << ")_TS="; + for (size_t j = 0lu; j < shape.second.size(); j++) + ss << "{" << ov::test::utils::vec2str(shape.second[j]) << "}"; + return ss; + }; + + axisShapeInShape shapes_desc; + std::vector input_shapes; + int axis; + std::vector indices_value; + ov::op::v12::ScatterElementsUpdate::Reduction reduceMode; + bool useInitVal; + ov::element::Type model_type, indices_type; + std::string target_device; + std::tie(shapes_desc, indices_value, reduceMode, useInitVal, model_type, indices_type, target_device) = obj.param; + std::tie(input_shapes, axis) = shapes_desc; + std::ostringstream result; + result << "InputShape=" << shapes_ss(input_shapes.at(0)).str() << "_"; + result << "IndicesShape=" << ov::test::utils::vec2str(input_shapes.at(1).second) << "_"; + result << "Axis=" << axis << "_"; + result << "ReduceMode=" << as_string(reduceMode) << "_"; + result << "UseInitVal=" << useInitVal << "_"; + result << "Indices=" << ov::test::utils::vec2str(indices_value) << "_"; + result << "modelType=" << model_type.to_string() << "_"; + result << "idxType=" << indices_type.to_string() << "_"; + result << "trgDev=" << target_device; + return result.str(); +} + +void ScatterElementsUpdate12LayerTest::SetUp() { + axisShapeInShape shapes_desc; + std::vector input_shapes; + int axis; + std::vector indices_value; + ov::op::v12::ScatterElementsUpdate::Reduction reduceMode; + bool useInitVal; + ov::element::Type model_type, indices_type; + std::string target_device; + std::tie(shapes_desc, indices_value, reduceMode, useInitVal, model_type, indices_type, targetDevice) = this->GetParam(); + std::tie(input_shapes, axis) = shapes_desc; + + init_input_shapes(input_shapes); + + auto param = std::make_shared(model_type, inputDynamicShapes.at(0)); + auto update_param = std::make_shared(model_type, inputDynamicShapes.at(1)); + auto indices_const = std::make_shared(indices_type, targetStaticShapes.at(0).at(1), indices_value); + auto axis_const = + std::make_shared(ov::element::i32, ov::Shape{}, std::vector{axis}); + auto scatter_elements_update = std::make_shared(param, indices_const, update_param, axis_const, reduceMode, useInitVal); + function = std::make_shared(scatter_elements_update->outputs(), ov::ParameterVector{param, update_param}, "ScatterElementsUpdate"); +} } // namespace test } // namespace ov From 2a2e6899d0ef92720e3d0ff0f662b88b01634a7b Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Fri, 12 Jan 2024 11:13:03 +0400 Subject: [PATCH 15/43] [Discord] Update the left Discord link to permanent one (#22117) Signed-off-by: Kazantsev, Roman --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2e9bdbf937483d..0df51e968941a7 100644 --- a/README.md +++ b/README.md @@ -186,7 +186,7 @@ Report questions, issues and suggestions, using: * [GitHub* Issues](https://github.com/openvinotoolkit/openvino/issues) * The [`openvino`](https://stackoverflow.com/questions/tagged/openvino) tag on StackOverflow\* * [Forum](https://software.intel.com/en-us/forums/computer-vision) -* OpenVINO channels on the [Intel DevHub Discord server](https://discord.gg/wPuqAujS) +* OpenVINO channels on the [Intel DevHub Discord server](https://discord.gg/7pVRxUwdWG) ## Additional Resources From c8a1a700006c36bd8bce26c3aa4154500a60b0df Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Fri, 12 Jan 2024 10:28:51 +0100 Subject: [PATCH 16/43] Fix images for notebooks (#22116) --- docs/articles_en/learn_openvino/tutorials.rst | 4 ++-- docs/nbdoc/consts.py | 4 ++-- .../openvino_sphinx_theme/directives/code.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/articles_en/learn_openvino/tutorials.rst b/docs/articles_en/learn_openvino/tutorials.rst index f6ada6fa201175..ee0c83616f0f91 100644 --- a/docs/articles_en/learn_openvino/tutorials.rst +++ b/docs/articles_en/learn_openvino/tutorials.rst @@ -74,7 +74,7 @@ Below you will find a selection of recommended tutorials that demonstrate infere .. showcase:: :title: 270-sound-generation-audioldm2 :img: https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/270-sound-generation-audioldm2/270-sound-generation-audioldm2.png - + Sound Generation with AudioLDM2 and OpenVINO. .. showcase:: @@ -135,7 +135,7 @@ Additional Resources .. |binder logo| image:: https://mybinder.org/badge_logo.svg :alt: Binder button -.. |colab logo| image:: https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667 +.. |colab logo| image:: https://colab.research.google.com/assets/colab-badge.svg :width: 109 :alt: Google Colab button diff --git a/docs/nbdoc/consts.py b/docs/nbdoc/consts.py index 46f8c9480258bf..22a1fda6418f49 100644 --- a/docs/nbdoc/consts.py +++ b/docs/nbdoc/consts.py @@ -51,7 +51,7 @@ .. |colab_link| raw:: html - Google Colab + Google Colab .. |github_link| raw:: html @@ -76,7 +76,7 @@ .. |colab_link| raw:: html - Google Colab + Google Colab .. |github_link| raw:: html diff --git a/docs/openvino_sphinx_theme/openvino_sphinx_theme/directives/code.py b/docs/openvino_sphinx_theme/openvino_sphinx_theme/directives/code.py index 8fe35b75f2d587..a613bfad8f1c9a 100644 --- a/docs/openvino_sphinx_theme/openvino_sphinx_theme/directives/code.py +++ b/docs/openvino_sphinx_theme/openvino_sphinx_theme/directives/code.py @@ -154,7 +154,7 @@ def visit_showcase(self, node): self.body.append("") self.body.append(""+os.path.basename(node["img"]) + "") if "img" in node is not None else "" - + self.body.append("
") @@ -164,7 +164,7 @@ def depart_showcase(self, node): notebooks_colab = "https://colab.research.google.com/github/openvinotoolkit/openvino_notebooks/blob/main/" git_badge = "Github" binder_badge = "Binder" - colab_badge = "Colab" + colab_badge = "Colab" binder_list_file = Path('../../../docs/notebooks/notebooks_with_binder_buttons.txt').resolve(strict=True) colab_list_file = Path('../../../docs/notebooks/notebooks_with_colab_buttons.txt').resolve(strict=True) openvino_notebooks_repo_listing = Path('../../../docs/notebooks/all_notebooks_paths.txt').resolve(strict=True) From bd5b3434e7531d0775e214630de603c15e84348a Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Fri, 12 Jan 2024 14:11:26 +0400 Subject: [PATCH 17/43] Added missing cache_mode declarations for python and c API (#22094) --- src/bindings/c/include/openvino/c/ov_property.h | 9 +++++++++ src/bindings/c/src/ov_property.cpp | 1 + .../python/src/openvino/properties/__init__.py | 1 + .../src/openvino/runtime/properties/__init__.py | 2 ++ .../src/pyopenvino/core/properties/properties.cpp | 4 ++++ .../python/src/pyopenvino/utils/utils.cpp | 2 ++ .../python/tests/test_runtime/test_properties.py | 15 +++++++++++++++ 7 files changed, 34 insertions(+) diff --git a/src/bindings/c/include/openvino/c/ov_property.h b/src/bindings/c/include/openvino/c/ov_property.h index b291f73c028678..dbefcbb366a0e5 100644 --- a/src/bindings/c/include/openvino/c/ov_property.h +++ b/src/bindings/c/include/openvino/c/ov_property.h @@ -97,6 +97,15 @@ ov_property_key_max_batch_size; OPENVINO_C_VAR(const char*) ov_property_key_cache_dir; +/** + * @brief Read-write property to select the cache mode between optimize_size and optimize_speed. + * If optimize_size is selected, smaller cache files will be created. + * And if optimize_speed is selected, loading time will decrease but the cache file size will increase. + * @ingroup ov_property_c_api + */ +OPENVINO_C_VAR(const char*) +ov_property_key_cache_mode; + /** * @brief Read-write property to set/get the number of executor logical partitions. * @ingroup ov_property_c_api diff --git a/src/bindings/c/src/ov_property.cpp b/src/bindings/c/src/ov_property.cpp index 5aaea4c614ba94..8a083e2afd8c41 100644 --- a/src/bindings/c/src/ov_property.cpp +++ b/src/bindings/c/src/ov_property.cpp @@ -19,6 +19,7 @@ const char* ov_property_key_max_batch_size = "MAX_BATCH_SIZE"; // Read-write property key const char* ov_property_key_cache_dir = "CACHE_DIR"; +const char* ov_property_key_cache_mode = "CACHE_MODE"; const char* ov_property_key_num_streams = "NUM_STREAMS"; const char* ov_property_key_affinity = "AFFINITY"; const char* ov_property_key_inference_num_threads = "INFERENCE_NUM_THREADS"; diff --git a/src/bindings/python/src/openvino/properties/__init__.py b/src/bindings/python/src/openvino/properties/__init__.py index b611cf3a085b0d..237d7c0d7dd8e3 100644 --- a/src/bindings/python/src/openvino/properties/__init__.py +++ b/src/bindings/python/src/openvino/properties/__init__.py @@ -4,6 +4,7 @@ # Enums from openvino._pyopenvino.properties import Affinity +from openvino._pyopenvino.properties import CacheMode # Properties import openvino._pyopenvino.properties as __properties diff --git a/src/bindings/python/src/openvino/runtime/properties/__init__.py b/src/bindings/python/src/openvino/runtime/properties/__init__.py index fc3ac92f8c5e4a..c70b24882849d4 100644 --- a/src/bindings/python/src/openvino/runtime/properties/__init__.py +++ b/src/bindings/python/src/openvino/runtime/properties/__init__.py @@ -4,10 +4,12 @@ # Enums from openvino._pyopenvino.properties import Affinity +from openvino._pyopenvino.properties import CacheMode # Properties from openvino._pyopenvino.properties import enable_profiling from openvino._pyopenvino.properties import cache_dir +from openvino._pyopenvino.properties import cache_mode from openvino._pyopenvino.properties import auto_batch_timeout from openvino._pyopenvino.properties import num_streams from openvino._pyopenvino.properties import inference_num_threads diff --git a/src/bindings/python/src/pyopenvino/core/properties/properties.cpp b/src/bindings/python/src/pyopenvino/core/properties/properties.cpp index d2e82022068160..2491afcf606ba6 100644 --- a/src/bindings/python/src/pyopenvino/core/properties/properties.cpp +++ b/src/bindings/python/src/pyopenvino/core/properties/properties.cpp @@ -21,6 +21,10 @@ void regmodule_properties(py::module m) { .value("NUMA", ov::Affinity::NUMA) .value("HYBRID_AWARE", ov::Affinity::HYBRID_AWARE); + py::enum_(m_properties, "CacheMode", py::arithmetic()) + .value("OPTIMIZE_SIZE", ov::CacheMode::OPTIMIZE_SIZE) + .value("OPTIMIZE_SPEED", ov::CacheMode::OPTIMIZE_SPEED); + // Submodule properties - properties wrap_property_RW(m_properties, ov::enable_profiling, "enable_profiling"); wrap_property_RW(m_properties, ov::cache_dir, "cache_dir"); diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index 2d94ea21f3092e..7f5cf58b8ff45c 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -186,6 +186,8 @@ py::object from_ov_any(const ov::Any& any) { return py::cast(any.as()); } else if (any.is()) { return py::cast(any.as()); + } else if (any.is()) { + return py::cast(any.as()); } else if (any.is()) { std::stringstream uuid_stream; uuid_stream << any.as(); diff --git a/src/bindings/python/tests/test_runtime/test_properties.py b/src/bindings/python/tests/test_runtime/test_properties.py index eb42b59743dea6..ca280bd33b7144 100644 --- a/src/bindings/python/tests/test_runtime/test_properties.py +++ b/src/bindings/python/tests/test_runtime/test_properties.py @@ -52,6 +52,13 @@ def test_properties_rw_base(): (props.Affinity.HYBRID_AWARE, "Affinity.HYBRID_AWARE", 2), ), ), + ( + props.CacheMode, + ( + (props.CacheMode.OPTIMIZE_SIZE, "CacheMode.OPTIMIZE_SIZE", 0), + (props.CacheMode.OPTIMIZE_SPEED, "CacheMode.OPTIMIZE_SPEED", 1), + ), + ), ( hints.Priority, ( @@ -206,6 +213,14 @@ def test_properties_ro(ov_property_ro, expected_value): "CACHE_DIR", (("./test_cache", "./test_cache"),), ), + ( + props.cache_mode, + "CACHE_MODE", + ( + (props.CacheMode.OPTIMIZE_SIZE, props.CacheMode.OPTIMIZE_SIZE), + (props.CacheMode.OPTIMIZE_SPEED, props.CacheMode.OPTIMIZE_SPEED), + ), + ), ( props.auto_batch_timeout, "AUTO_BATCH_TIMEOUT", From 9b52795a631dd2d462c57f46d2abcadb0a6b0270 Mon Sep 17 00:00:00 2001 From: Mingyu Kim Date: Fri, 12 Jan 2024 19:26:06 +0900 Subject: [PATCH 18/43] [GPU] New debug config ImplsCacheCapacity (#22098) --- .../include/intel_gpu/runtime/debug_configuration.hpp | 1 + src/plugins/intel_gpu/src/graph/primitive_inst.cpp | 4 +++- src/plugins/intel_gpu/src/graph/program.cpp | 9 ++++++++- .../intel_gpu/src/runtime/debug_configuration.cpp | 3 +++ 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/debug_configuration.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/debug_configuration.hpp index 0a5fb7513ee379..73b9a0c7fdd7d6 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/debug_configuration.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/debug_configuration.hpp @@ -123,6 +123,7 @@ class debug_configuration { int serialize_compile; // Serialize creating primitives and compiling kernels std::vector forced_impl_types; // Force implementation type either ocl or onednn int max_kernels_per_batch; // Maximum number of kernels in a batch during compiling kernels + int impls_cache_capacity; // The maximum number of entries in the kernel impl cache int disable_async_compilation; // Disable async compilation int disable_winograd_conv; // Disable Winograd conv int disable_dynamic_impl; // Disable dynamic implementation diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 6f0e94b197b13b..2abae896317657 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -549,7 +549,9 @@ event::ptr primitive_inst::realloc_if_needed() { updated_params.output_layouts[0] = updated_layout; if (can_reuse_buffer) { - GPU_DEBUG_TRACE_DETAIL << id() << ": reuse previously allocated output buffer" << std::endl; + GPU_DEBUG_TRACE_DETAIL << id() << ": reuse previously allocated output buffer - " + << actual_layout.count() << "/" << max_output_layout_size + << std::endl; if (_outputs[0]->get_layout() != actual_layout) { _outputs[0] = _network.get_engine().reinterpret_buffer(*_outputs[0], actual_layout); } diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index fcf8982f45b20c..43d0efc3dce4cd 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -216,6 +216,7 @@ program::~program() { } void program::init_program() { + GPU_DEBUG_GET_INSTANCE(debug_config); set_options(); pm = std::unique_ptr(new pass_manager(*this)); @@ -228,7 +229,13 @@ void program::init_program() { if (!_compilation_context) _compilation_context = program::make_compilation_context(_config); - _impls_cache = cldnn::make_unique(_impls_cache_capacity); + + size_t impls_cache_capacity = _impls_cache_capacity; + GPU_DEBUG_IF(debug_config->impls_cache_capacity >= 0) { + impls_cache_capacity = debug_config->impls_cache_capacity; + } + + _impls_cache = cldnn::make_unique(impls_cache_capacity); // Remove items of compilation context's internal queue when some impl is popped in kernels_cache // compilation context's queue check duplication of inserted task _impls_cache->set_remove_item_callback([this](ImplementationsCache::ItemType& item) { diff --git a/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp b/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp index 6866e85220a611..5fad5093d94cd7 100644 --- a/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp +++ b/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp @@ -136,6 +136,7 @@ static void print_help_messages() { " For example fc:onednn gemm:onednn reduce:ocl do:cpu" " For primitives fc, gemm, do, reduce, concat are supported. Separated by space."); message_list.emplace_back("OV_GPU_MaxKernelsPerBatch", "Maximum number of kernels in a batch during compiling kernels"); + message_list.emplace_back("OV_GPU_ImplsCacheCapacity", "The maximum number of entries in the kernel impl cache"); message_list.emplace_back("OV_GPU_DisableAsyncCompilation", "Disable async compilation"); message_list.emplace_back("OV_GPU_DisableWinogradConv", "Disable Winograd convolution"); message_list.emplace_back("OV_GPU_DisableDynamicImpl", "Disable dynamic implementation"); @@ -196,6 +197,7 @@ debug_configuration::debug_configuration() , base_batch_for_memory_estimation(-1) , serialize_compile(0) , max_kernels_per_batch(0) + , impls_cache_capacity(-1) , disable_async_compilation(0) , disable_winograd_conv(0) , disable_dynamic_impl(0) @@ -236,6 +238,7 @@ debug_configuration::debug_configuration() std::string forced_impl_types_str; get_gpu_debug_env_var("ForceImplTypes", forced_impl_types_str); get_gpu_debug_env_var("MaxKernelsPerBatch", max_kernels_per_batch); + get_gpu_debug_env_var("ImplsCacheCapacity", impls_cache_capacity); get_gpu_debug_env_var("DisableAsyncCompilation", disable_async_compilation); get_gpu_debug_env_var("DisableWinogradConv", disable_winograd_conv); get_gpu_debug_env_var("DisableDynamicImpl", disable_dynamic_impl); From 44823dd59e9338b6e3d3a18051f8c52610db9556 Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Fri, 12 Jan 2024 02:43:19 -0800 Subject: [PATCH 19/43] Updated org.openvinotoolkit/* (#22121) --- .../deformable_conv_2d.cpp | 35 ++++++++----- .../deformable_conv_2d.hpp | 1 - .../org.openvinotoolkit/detection_output.cpp | 21 ++++---- .../org.openvinotoolkit/detection_output.hpp | 1 - .../op/org.openvinotoolkit/fake_quantize.cpp | 8 +-- .../op/org.openvinotoolkit/fake_quantize.hpp | 1 - .../generate_proposals.cpp | 27 ++++++---- .../src/op/org.openvinotoolkit/group_norm.cpp | 16 +++--- .../src/op/org.openvinotoolkit/group_norm.hpp | 1 - .../src/op/org.openvinotoolkit/normalize.cpp | 25 +++++---- .../src/op/org.openvinotoolkit/normalize.hpp | 1 - .../src/op/org.openvinotoolkit/prior_box.cpp | 51 ++++++++++--------- .../src/op/org.openvinotoolkit/prior_box.hpp | 1 - .../src/op/org.openvinotoolkit/swish.cpp | 13 ++--- .../src/op/org.openvinotoolkit/swish.hpp | 1 - 15 files changed, 109 insertions(+), 94 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp index 72b3a902fffae9..94ac1b8b12e9a4 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp @@ -16,14 +16,16 @@ #include "op/org.openvinotoolkit/deformable_conv_2d.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/deformable_convolution.hpp" +#include "openvino/op/deformable_convolution.hpp" #include "utils/convpool.hpp" +using namespace ov::op; + namespace ngraph { namespace onnx_import { -OutputVector op::set_1::deformable_conv_2d(const Node& node) { +namespace op { +namespace set_1 { +OutputVector deformable_conv_2d(const Node& node) { const OutputVector& inputs = node.get_ng_inputs(); const auto strides = convpool::get_strides(node); const auto dilations = convpool::get_dilations(node); @@ -33,16 +35,21 @@ OutputVector op::set_1::deformable_conv_2d(const Node& node) { const auto deformable_groups = node.get_attribute_value("deformable_groups", 1); const auto auto_pad_type = convpool::get_auto_pad(node); - return {std::make_shared(inputs.at(0), - inputs.at(1), - inputs.at(2), - strides, - paddings.first, - paddings.second, - dilations, - auto_pad_type, - group, - deformable_groups)}; + return {std::make_shared(inputs.at(0), + inputs.at(1), + inputs.at(2), + strides, + paddings.first, + paddings.second, + dilations, + auto_pad_type, + group, + deformable_groups)}; } +} // namespace set_1 + +} // namespace op + } // namespace onnx_import + } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp index 98d4988b3564c8..5de5a21c8d3854 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp @@ -19,7 +19,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp index ce40105451edd4..1eef678c6ec2b0 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp @@ -4,11 +4,12 @@ #include "ngraph/op/detection_output.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" #include "op/org.openvinotoolkit/detection_output.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/detection_output.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -21,7 +22,7 @@ OutputVector detection_output(const Node& node) { auto class_preds = inputs[1]; auto proposals = inputs[2]; - ov::op::v8::DetectionOutput::Attributes attrs; + v8::DetectionOutput::Attributes attrs; attrs.background_label_id = static_cast(node.get_attribute_value("background_label_id", 0)); attrs.top_k = static_cast(node.get_attribute_value("top_k", -1)); attrs.variance_encoded_in_target = node.get_attribute_value("variance_encoded_in_target", 0); @@ -51,16 +52,16 @@ OutputVector detection_output(const Node& node) { attrs.objectness_score = node.get_attribute_value("objectness_score", 0); if (inputs.size() == 3) { - return {std::make_shared(box_logits, class_preds, proposals, attrs)}; + return {std::make_shared(box_logits, class_preds, proposals, attrs)}; } else if (inputs.size() == 5) { auto aux_class_preds = inputs[3]; auto aux_box_preds = inputs[4]; - return {std::make_shared(box_logits, - class_preds, - proposals, - aux_class_preds, - aux_box_preds, - attrs)}; + return {std::make_shared(box_logits, + class_preds, + proposals, + aux_class_preds, + aux_box_preds, + attrs)}; } else { FRONT_END_GENERAL_CHECK(false, "Invalid number of inputs"); } diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp index e20810e2b4e0a0..28edc4e737ada8 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp index 692e7589dd720f..b88f2c0b850107 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp @@ -6,13 +6,15 @@ #include -#include "default_opset.hpp" +#include "openvino/op/fake_quantize.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { namespace op { namespace set_1 { -OutputVector fake_quantize(const onnx_import::Node& node) { +OutputVector fake_quantize(const Node& node) { const auto inputs = node.get_ng_inputs(); const auto X = inputs.at(0); const auto input_low = inputs.at(1); @@ -22,7 +24,7 @@ OutputVector fake_quantize(const onnx_import::Node& node) { const auto levels = node.get_attribute_value("levels"); - return {std::make_shared(X, input_low, input_high, output_low, output_high, levels)}; + return {std::make_shared(X, input_low, input_high, output_low, output_high, levels)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp index 497b7b430482cc..6f26d64cb48de5 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp index ed7782cbef63e8..9cae371d68bdfe 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp @@ -4,7 +4,14 @@ #include "generate_proposals.hpp" -#include "default_opset.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/generate_proposals.hpp" +#include "openvino/op/shape_of.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -36,7 +43,7 @@ OutputVector generate_proposals(const Node& node) { const auto& im_info = inputs[2]; // shape [N, 3] or [N, 4] const auto& anchors = inputs[3]; // shape [A, 4] - ov::op::v9::GenerateProposals::Attributes attrs; + v9::GenerateProposals::Attributes attrs; attrs.min_size = node.get_attribute_value("min_size", 1.f); attrs.nms_threshold = node.get_attribute_value("nms_thresh", 0.7f); attrs.pre_nms_count = node.get_attribute_value("pre_nms_topN", 6000); @@ -44,16 +51,16 @@ OutputVector generate_proposals(const Node& node) { attrs.normalized = !node.get_attribute_value("legacy_plus_one", true); // Broadcast anchors from [A, 4] to [H, W, A, 4] where [H, W] is taken from scores shape. - const auto zero = default_opset::Constant::create(element::i64, Shape{1}, {0}); - const auto scores_shape = std::make_shared(scores); - const auto anchors_shape = std::make_shared(anchors); - const auto scores_shape_tail = default_opset::Constant::create(element::i64, Shape{2}, {2, 3}); - const auto new_anchors_shape_front = std::make_shared(scores_shape, scores_shape_tail, zero); + const auto zero = v0::Constant::create(element::i64, Shape{1}, {0}); + const auto scores_shape = std::make_shared(scores); + const auto anchors_shape = std::make_shared(anchors); + const auto scores_shape_tail = v0::Constant::create(element::i64, Shape{2}, {2, 3}); + const auto new_anchors_shape_front = std::make_shared(scores_shape, scores_shape_tail, zero); const auto new_anchors_shape = - std::make_shared(OutputVector{new_anchors_shape_front, anchors_shape}, 0); - const auto new_anchors = std::make_shared(anchors, new_anchors_shape); + std::make_shared(OutputVector{new_anchors_shape_front, anchors_shape}, 0); + const auto new_anchors = std::make_shared(anchors, new_anchors_shape); - const auto proposals = std::make_shared(im_info, new_anchors, deltas, scores, attrs); + const auto proposals = std::make_shared(im_info, new_anchors, deltas, scores, attrs); return proposals->outputs(); } diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp index 585084e9409ba9..c59dc2b2f36885 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp @@ -4,14 +4,12 @@ #include "op/org.openvinotoolkit/group_norm.hpp" -#include "default_opset.hpp" -#include "ngraph/node.hpp" -#include "ngraph/opsets/opset5.hpp" #include "onnx_import/core/node.hpp" #include "openvino/frontend/exception.hpp" -#include "openvino/opsets/opset12.hpp" -#include "utils/common.hpp" -#include "utils/reshape.hpp" +#include "openvino/op/group_normalization.hpp" +#include "openvino/op/squeeze.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -30,13 +28,13 @@ OutputVector group_norm(const Node& node) { float eps = node.get_attribute_value("eps", 1e-6f); if (!scale.get_partial_shape().rank().compatible(1)) { - scale = std::make_shared(scale); + scale = std::make_shared(scale); } if (!bias.get_partial_shape().rank().compatible(1)) { - bias = std::make_shared(bias); + bias = std::make_shared(bias); } - return {std::make_shared(data, scale, bias, num_groups, eps)}; + return {std::make_shared(data, scale, bias, num_groups, eps)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp index 46ab07f27fa077..a7bb55652db67a 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp index dd82e4cc68d501..7c54595a27141b 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp @@ -4,12 +4,16 @@ #include "op/org.openvinotoolkit/normalize.hpp" -#include "default_opset.hpp" -#include "ngraph/op/normalize_l2.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/normalize_l2.hpp" +#include "openvino/op/reshape.hpp" #include "openvino/op/util/op_types.hpp" #include "utils/common.hpp" +using namespace ov::op; + namespace ngraph { namespace onnx_import { namespace op { @@ -23,7 +27,7 @@ OutputVector normalize(const Node& node) { int64_t across_spatial = node.get_attribute_value("across_spatial", 0); int64_t channel_shared = node.get_attribute_value("channel_shared", 0); - std::shared_ptr weights; + std::shared_ptr weights; if (channel_shared) { FRONT_END_GENERAL_CHECK(ov::op::util::is_constant(inputs[1].get_node()), "Weights input must be a constant if channel_shared is set to 1"); @@ -42,24 +46,23 @@ OutputVector normalize(const Node& node) { for (int64_t i = 2; i < data_shape.rank().get_length(); ++i) { weights_shape.push_back(1); } - auto new_shape = - std::make_shared(element::i64, Shape{weights_shape.size()}, weights_shape); - weights = std::make_shared(inputs[1], new_shape, true); + auto new_shape = std::make_shared(element::i64, Shape{weights_shape.size()}, weights_shape); + weights = std::make_shared(inputs[1], new_shape, true); } - std::shared_ptr axes; + std::shared_ptr axes; if (!across_spatial) { - axes = std::make_shared(element::i64, Shape{1}, std::vector{1}); + axes = std::make_shared(element::i64, Shape{1}, std::vector{1}); } else { axes = common::get_monotonic_range_along_node_rank(data, 1); } - return {std::make_shared( - std::make_shared(data, axes, eps, ngraph::op::EpsMode::ADD), - weights)}; + return {std::make_shared(std::make_shared(data, axes, eps, ov::op::EpsMode::ADD), + weights)}; } } // namespace set_1 + } // namespace op } // namespace onnx_import diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp index a265980052bd02..f47929a97b636d 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp index d5a0fe303b1b5f..63c26e61c9f4fd 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp @@ -4,27 +4,30 @@ #include "ngraph/op/prior_box.hpp" -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" #include "op/org.openvinotoolkit/prior_box.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/prior_box.hpp" +#include "openvino/op/prior_box_clustered.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/strided_slice.hpp" +#include "openvino/op/unsqueeze.hpp" + +using namespace ov::op; namespace ngraph { namespace onnx_import { namespace op { namespace detail { namespace { -std::shared_ptr make_slice(std::shared_ptr node, - int64_t start, - int64_t end) { - return std::make_shared( - node, - default_opset::Constant::create(element::i64, Shape{1}, std::vector{start}), - default_opset::Constant::create(element::i64, Shape{1}, std::vector{end}), - std::vector{0}, // begin mask - std::vector{0}); // end mask +std::shared_ptr make_slice(std::shared_ptr node, int64_t start, int64_t end) { + return std::make_shared(node, + v0::Constant::create(element::i64, Shape{1}, std::vector{start}), + v0::Constant::create(element::i64, Shape{1}, std::vector{end}), + std::vector{0}, // begin mask + std::vector{0}); // end mask } } // namespace } // namespace detail @@ -34,12 +37,12 @@ OutputVector prior_box(const Node& node) { auto inputs = node.get_ng_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 2, "Invalid number of inputs"); - auto output_shape = std::make_shared(inputs[0]); - auto image_shape = std::make_shared(inputs[1]); + auto output_shape = std::make_shared(inputs[0]); + auto image_shape = std::make_shared(inputs[1]); auto output_shape_slice = detail::make_slice(output_shape, 2, 4); auto image_shape_slice = detail::make_slice(image_shape, 2, 4); - ngraph::op::v8::PriorBox::Attributes attrs; + ov::op::v8::PriorBox::Attributes attrs; attrs.min_size = node.get_attribute_value>("min_size", {}); attrs.max_size = node.get_attribute_value>("max_size", {}); attrs.aspect_ratio = node.get_attribute_value>("aspect_ratio", {}); @@ -54,11 +57,11 @@ OutputVector prior_box(const Node& node) { attrs.density = node.get_attribute_value>("density", {}); attrs.min_max_aspect_ratios_order = node.get_attribute_value("min_max_aspect_ratios_order", 1); - auto axes = default_opset::Constant::create(element::i64, Shape{1}, std::vector{0}); + auto axes = v0::Constant::create(element::i64, Shape{1}, std::vector{0}); - return {std::make_shared( - std::make_shared(output_shape_slice, image_shape_slice, attrs), - axes)}; + return { + std::make_shared(std::make_shared(output_shape_slice, image_shape_slice, attrs), + axes)}; } OutputVector prior_box_clustered(const Node& node) { @@ -78,12 +81,12 @@ OutputVector prior_box_clustered(const Node& node) { image_shape_rank, " (should be 4)"); - auto output_shape = std::make_shared(inputs[0]); - auto image_shape = std::make_shared(inputs[1]); + auto output_shape = std::make_shared(inputs[0]); + auto image_shape = std::make_shared(inputs[1]); auto output_shape_slice = detail::make_slice(output_shape, 2, 4); auto image_shape_slice = detail::make_slice(image_shape, 2, 4); - default_opset::PriorBoxClustered::Attributes attrs{}; + v0::PriorBoxClustered::Attributes attrs{}; attrs.widths = node.get_attribute_value>("width"); attrs.heights = node.get_attribute_value>("height"); attrs.clip = static_cast(node.get_attribute_value("clip", 0)); @@ -93,10 +96,10 @@ OutputVector prior_box_clustered(const Node& node) { attrs.step = node.get_attribute_value("step", 0.0f); attrs.offset = node.get_attribute_value("offset", 0.0f); - auto axes = default_opset::Constant::create(element::i64, Shape{1}, std::vector{0}); + auto axes = v0::Constant::create(element::i64, Shape{1}, std::vector{0}); - return {std::make_shared( - std::make_shared(output_shape_slice, image_shape_slice, attrs), + return {std::make_shared( + std::make_shared(output_shape_slice, image_shape_slice, attrs), axes)}; } diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp index 9bf1db0d9bf276..3e24fb25b3d8d5 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp @@ -7,7 +7,6 @@ #include "openvino/core/deprecated.hpp" OPENVINO_SUPPRESS_DEPRECATED_START -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp index 6aefa4c6032e68..da94cd72810b20 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp @@ -4,12 +4,13 @@ #include "op/org.openvinotoolkit/swish.hpp" -#include "default_opset.hpp" -#include "ngraph/op/normalize_l2.hpp" -#include "op/org.openvinotoolkit/normalize.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/swish.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + namespace ngraph { namespace onnx_import { namespace op { @@ -17,14 +18,14 @@ namespace set_1 { OutputVector swish(const Node& node) { OutputVector ng_inputs{node.get_ng_inputs()}; - Output beta; + Output beta; if (ng_inputs.size() > 1) { beta = ngraph::onnx_import::reshape::interpret_as_scalar(ng_inputs.at(1)); } else { - beta = default_opset::Constant::create(element::f32, Shape{}, {1.0}); + beta = v0::Constant::create(element::f32, Shape{}, {1.0}); } - return {std::make_shared(ng_inputs.at(0), beta)}; + return {std::make_shared(ng_inputs.at(0), beta)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp index 0b89434bed01f5..4288e8162032dc 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp @@ -9,7 +9,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include -#include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" namespace ngraph { From 8319230515f29f04a3c98b4dbe0bc9e3c404c37b Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Fri, 12 Jan 2024 03:08:40 -0800 Subject: [PATCH 20/43] [ONNX] Frontend refactoring (#22122) * Updated convpool.cpp/hpp * Updated dft.cpp * Updated onnx_internal.cpp/hpp * Updated polling_factory.cpp/hpp * Updated recurrent.cpp/hpp * Updated reshape.cpp/hpp --- src/frontends/onnx/frontend/src/frontend.cpp | 2 +- .../onnx/frontend/src/utils/convpool.cpp | 65 ++++++++-------- .../onnx/frontend/src/utils/convpool.hpp | 18 ++--- src/frontends/onnx/frontend/src/utils/dft.cpp | 41 +++++----- .../onnx/frontend/src/utils/onnx_internal.cpp | 56 +++++++------- .../onnx/frontend/src/utils/onnx_internal.hpp | 34 ++++----- .../frontend/src/utils/pooling_factory.cpp | 62 +++++++-------- .../frontend/src/utils/pooling_factory.hpp | 16 ++-- .../onnx/frontend/src/utils/recurrent.cpp | 76 +++++++++---------- .../onnx/frontend/src/utils/recurrent.hpp | 12 +-- .../onnx/frontend/src/utils/reshape.cpp | 38 ++++++---- 11 files changed, 214 insertions(+), 206 deletions(-) diff --git a/src/frontends/onnx/frontend/src/frontend.cpp b/src/frontends/onnx/frontend/src/frontend.cpp index c3355159c27f94..d239aab2ba90ff 100644 --- a/src/frontends/onnx/frontend/src/frontend.cpp +++ b/src/frontends/onnx/frontend/src/frontend.cpp @@ -130,7 +130,7 @@ std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const } void FrontEnd::convert(const std::shared_ptr& partially_converted) const { - ngraph::onnx_import::detail::convert_decoded_function(partially_converted); + ngraph::onnx_import::detail::convert_decoded_model(partially_converted); normalize(partially_converted); } diff --git a/src/frontends/onnx/frontend/src/utils/convpool.cpp b/src/frontends/onnx/frontend/src/utils/convpool.cpp index 25f1364b9abf60..f789eb58ecf4fa 100644 --- a/src/frontends/onnx/frontend/src/utils/convpool.cpp +++ b/src/frontends/onnx/frontend/src/utils/convpool.cpp @@ -6,11 +6,18 @@ #include -#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/strides.hpp" -#include "ngraph/validation_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/util/attr_types.hpp" +#include "openvino/op/variadic_split.hpp" + +using namespace ov; +using namespace ov::op; OPENVINO_SUPPRESS_DEPRECATED_START @@ -71,20 +78,20 @@ Strides get_dilations(const Node& node, const std::size_t kernel_rank) { return get_attribute_value(node, "dilations", kernel_rank); } -ngraph::op::RoundingType get_rounding_type(const Node& node) { - return static_cast(node.get_attribute_value("ceil_mode", 0)); +ov::op::RoundingType get_rounding_type(const Node& node) { + return static_cast(node.get_attribute_value("ceil_mode", 0)); } -ngraph::op::PadType get_auto_pad(const Node& node) { +ov::op::PadType get_auto_pad(const Node& node) { // Default value means use explicitly provided padding values. - ngraph::op::PadType pad_type{ngraph::op::PadType::NOTSET}; + ov::op::PadType pad_type{ov::op::PadType::NOTSET}; if (node.has_attribute("auto_pad")) { - static std::unordered_multimap auto_pad_values{ - {"VALID", ngraph::op::PadType::VALID}, - {"SAME_UPPER", ngraph::op::PadType::SAME_UPPER}, - {"SAME_LOWER", ngraph::op::PadType::SAME_LOWER}, - {"NOTSET", ngraph::op::PadType::NOTSET}, - {"", ngraph::op::PadType::NOTSET}, // empty string considered as undefined attribute + static std::unordered_multimap auto_pad_values{ + {"VALID", ov::op::PadType::VALID}, + {"SAME_UPPER", ov::op::PadType::SAME_UPPER}, + {"SAME_LOWER", ov::op::PadType::SAME_LOWER}, + {"NOTSET", ov::op::PadType::NOTSET}, + {"", ov::op::PadType::NOTSET}, // empty string considered as undefined attribute }; const std::string& pad_str{node.get_attribute_value("auto_pad", "NOTSET")}; @@ -131,39 +138,33 @@ void calculate_auto_pads(const Shape& data_shape, const Shape& filter_shape, const Strides& strides, const Strides& dilations, - const ngraph::op::PadType& pad_type, + const ov::op::PadType& pad_type, CoordinateDiff& padding_below, CoordinateDiff& padding_above) { - if (pad_type == ngraph::op::PadType::SAME_UPPER || pad_type == ngraph::op::PadType::SAME_LOWER) { + if (pad_type == ov::op::PadType::SAME_UPPER || pad_type == ov::op::PadType::SAME_LOWER) { padding_below.clear(); padding_above.clear(); // Extract kernel shape - remove (N,C) channels Shape kernel_shape(std::next(std::begin(filter_shape), 2), std::end(filter_shape)); OPENVINO_SUPPRESS_DEPRECATED_START - ngraph::infer_auto_padding(data_shape, - kernel_shape, - strides, - dilations, - pad_type, - padding_above, - padding_below); + ov::infer_auto_padding(data_shape, kernel_shape, strides, dilations, pad_type, padding_above, padding_below); OPENVINO_SUPPRESS_DEPRECATED_END } } -Output get_reshaped_filters(const Output& filters, int64_t groups) { - const auto zero_node = default_opset::Constant::create(element::i64, Shape(), {0}); - const auto split_lengths = default_opset::Constant::create(element::i64, Shape{2}, {1, -1}); - const auto groups_node = default_opset::Constant::create(element::i64, Shape{1}, {groups}); +Output get_reshaped_filters(const Output& filters, int64_t groups) { + const auto zero_node = v0::Constant::create(element::i64, Shape(), {0}); + const auto split_lengths = v0::Constant::create(element::i64, Shape{2}, {1, -1}); + const auto groups_node = v0::Constant::create(element::i64, Shape{1}, {groups}); - const auto filters_shape = std::make_shared(filters); - const auto splitted_shape = std::make_shared(filters_shape, zero_node, split_lengths); + const auto filters_shape = std::make_shared(filters); + const auto splitted_shape = std::make_shared(filters_shape, zero_node, split_lengths); - const auto first_dim = std::make_shared(splitted_shape->output(0), groups_node); + const auto first_dim = std::make_shared(splitted_shape->output(0), groups_node); const auto new_filters_shape = - std::make_shared(OutputVector{groups_node, first_dim, splitted_shape->output(1)}, 0); + std::make_shared(OutputVector{groups_node, first_dim, splitted_shape->output(1)}, 0); - const auto reshaped_filters = std::make_shared(filters, new_filters_shape, false); + const auto reshaped_filters = std::make_shared(filters, new_filters_shape, false); return reshaped_filters; } diff --git a/src/frontends/onnx/frontend/src/utils/convpool.hpp b/src/frontends/onnx/frontend/src/utils/convpool.hpp index e5523dc9b6ea7e..36d880abcbffe9 100644 --- a/src/frontends/onnx/frontend/src/utils/convpool.hpp +++ b/src/frontends/onnx/frontend/src/utils/convpool.hpp @@ -5,10 +5,10 @@ #pragma once #include "ngraph/coordinate_diff.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/strides.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/strides.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -44,8 +44,8 @@ Strides get_dilations(const Node& node, const std::size_t kernel_rank = 0UL); /// /// \param[in] node The ONNX node we query for attribute. /// -/// \return The nGraph RoundingType object representing 'ceil_mode' attribute value. -ngraph::op::RoundingType get_rounding_type(const Node& node); +/// \return The OV RoundingType object representing 'ceil_mode' attribute value. +ov::op::RoundingType get_rounding_type(const Node& node); /// \brief Get padding values for the operation described by an ONNX node. /// \details Values are taken from the `pads` attribute. @@ -81,12 +81,12 @@ std::pair get_pads(const Node& node); /// \param[in,out] padding_below The paddings below axis. /// \param[in,out] padding_above The paddings above axis. /// -/// \see ngraph::op::PadType +/// \see ov::op::PadType void calculate_auto_pads(const Shape& data_shape, const Shape& filter_shape, const Strides& strides, const Strides& dilations, - const ngraph::op::PadType& pad_type, + const ov::op::PadType& pad_type, CoordinateDiff& padding_below, CoordinateDiff& padding_above); @@ -94,9 +94,9 @@ void calculate_auto_pads(const Shape& data_shape, /// /// \param[in] node The ONNX node we query for attribute. /// -/// \return The nGraph PadType object representing 'auto_pad' attribute value. +/// \return The OV PadType object representing 'auto_pad' attribute value. /// -ngraph::op::PadType get_auto_pad(const Node& node); +ov::op::PadType get_auto_pad(const Node& node); /// \brief Reshape group convolution filters to match desired shape: /// from [C_INPUT x C_OUTPUT/groups x k1 x k2 x ... x kn] @@ -106,7 +106,7 @@ ngraph::op::PadType get_auto_pad(const Node& node); /// \param[in] groups Number of groups /// /// \return Reshaped filters input. -Output get_reshaped_filters(const Output& filters, int64_t groups); +Output get_reshaped_filters(const Output& filters, int64_t groups); } // namespace convpool } // namespace onnx_import diff --git a/src/frontends/onnx/frontend/src/utils/dft.cpp b/src/frontends/onnx/frontend/src/utils/dft.cpp index d0b4f2277428f8..d432fd81dc10d1 100644 --- a/src/frontends/onnx/frontend/src/utils/dft.cpp +++ b/src/frontends/onnx/frontend/src/utils/dft.cpp @@ -4,11 +4,19 @@ #include "dft.hpp" -#include "default_opset.hpp" #include "onnx_import/core/null_node.hpp" #include "openvino/core/deprecated.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/dft.hpp" +#include "openvino/op/idft.hpp" +#include "openvino/op/irdft.hpp" +#include "openvino/op/rdft.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/unsqueeze.hpp" -using namespace ngraph::onnx_import; +using namespace ov::op; namespace ngraph { namespace onnx_import { @@ -23,10 +31,9 @@ bool try_convert_real_to_complex(ov::Output& signal) { const auto last_axis_pos = length - 1; const auto last_dim = signal.get_partial_shape()[last_axis_pos]; if (last_dim.is_static() && last_dim.get_length() == 1) { - ov::Output imag_part = default_opset::Constant::create(signal.get_element_type(), {}, {0}); - imag_part = - std::make_shared(imag_part, std::make_shared(signal)); - signal = std::make_shared(OutputVector{signal, imag_part}, last_axis_pos); + ov::Output imag_part = v0::Constant::create(signal.get_element_type(), {}, {0}); + imag_part = std::make_shared(imag_part, std::make_shared(signal)); + signal = std::make_shared(OutputVector{signal, imag_part}, last_axis_pos); return true; } } @@ -42,7 +49,7 @@ ov::Output make_dft(const ov::Output& signal, bool is_inversed, bool is_onesided) { auto processed_signal = signal; - const auto axis_const = default_opset::Constant::create(element::i64, {1}, {axis}); + const auto axis_const = v0::Constant::create(element::i64, {1}, {axis}); bool conversion_to_complex_applied = false; if (is_inversed || !is_onesided) { // skip for RDFT case conversion_to_complex_applied = try_convert_real_to_complex(processed_signal); @@ -54,23 +61,23 @@ ov::Output make_dft(const ov::Output& signal, ov::Output result; if (is_inversed) { if (is_onesided) { - result = dft_length_provided ? std::make_shared(processed_signal, axis_const, length) - : std::make_shared(processed_signal, axis_const); + result = dft_length_provided ? std::make_shared(processed_signal, axis_const, length) + : std::make_shared(processed_signal, axis_const); if (conversion_to_complex_applied) { // align the output shape with a real numbers representation - const auto unsqueeze_axis = default_opset::Constant::create(element::i64, {}, {-1}); - result = std::make_shared(result, unsqueeze_axis); + const auto unsqueeze_axis = v0::Constant::create(element::i64, {}, {-1}); + result = std::make_shared(result, unsqueeze_axis); } } else { - result = dft_length_provided ? std::make_shared(processed_signal, axis_const, length) - : std::make_shared(processed_signal, axis_const); + result = dft_length_provided ? std::make_shared(processed_signal, axis_const, length) + : std::make_shared(processed_signal, axis_const); } } else { if (is_onesided) { - result = dft_length_provided ? std::make_shared(processed_signal, axis_const, length) - : std::make_shared(processed_signal, axis_const); + result = dft_length_provided ? std::make_shared(processed_signal, axis_const, length) + : std::make_shared(processed_signal, axis_const); } else { - result = dft_length_provided ? std::make_shared(processed_signal, axis_const, length) - : std::make_shared(processed_signal, axis_const); + result = dft_length_provided ? std::make_shared(processed_signal, axis_const, length) + : std::make_shared(processed_signal, axis_const); } } return {result}; diff --git a/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp b/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp index 349b9e8294e256..a478eef9f92f49 100644 --- a/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp +++ b/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp @@ -7,18 +7,20 @@ #include #include "core/graph.hpp" -#include "core/model.hpp" #include "core/transform.hpp" #include "onnx_framework_node.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/core/model.hpp" #include "openvino/util/file_util.hpp" +using namespace ov; + namespace ngraph { namespace onnx_import { namespace detail { namespace { -void remove_dangling_parameters(std::shared_ptr& function) { - const auto parameters = function->get_parameters(); +void remove_dangling_parameters(std::shared_ptr& model) { + const auto parameters = model->get_parameters(); for (auto parameter : parameters) { const auto parameter_users = parameter->get_users(); // if a Parameter is connected to a ONNXFrameworkNode that was not converted @@ -27,32 +29,32 @@ void remove_dangling_parameters(std::shared_ptr& function) { const bool is_dangling_parameter = std::all_of(parameter_users.begin(), parameter_users.end(), - [](const std::shared_ptr& node) -> bool { + [](const std::shared_ptr& node) -> bool { return std::dynamic_pointer_cast(node) != nullptr; }); if (is_dangling_parameter) { - function->remove_parameter(parameter); + model->remove_parameter(parameter); } } } -OPENVINO_SUPPRESS_DEPRECATED_START -void remove_dangling_results(std::shared_ptr& function) { - const auto results = function->get_results(); +void remove_dangling_results(std::shared_ptr& model) { + const auto results = model->get_results(); for (auto result : results) { // we can remove Result from function if after function conversion, // Result is connected to NullNode only const auto result_inputs = result->input_values(); const bool is_dangling_result = - std::all_of(result_inputs.begin(), result_inputs.end(), [](const Output& node) -> bool { + std::all_of(result_inputs.begin(), result_inputs.end(), [](const Output& node) -> bool { + OPENVINO_SUPPRESS_DEPRECATED_START return ov::op::util::is_null(node); + OPENVINO_SUPPRESS_DEPRECATED_END }); if (is_dangling_result) { - function->remove_result(result); + model->remove_result(result); } } } -OPENVINO_SUPPRESS_DEPRECATED_END void apply_transformations(ONNX_NAMESPACE::ModelProto& model_proto) { transform::fixup_legacy_operators(model_proto); @@ -60,19 +62,19 @@ void apply_transformations(ONNX_NAMESPACE::ModelProto& model_proto) { } // namespace -void convert_decoded_function(std::shared_ptr function) { - auto& rt_info = function->get_rt_info(); +void convert_decoded_model(std::shared_ptr model) { + auto& rt_info = model->get_rt_info(); auto it = rt_info.find(ONNX_GRAPH_RT_ATTRIBUTE); OPENVINO_ASSERT(it != rt_info.end(), "Could not find '" + std::string(ONNX_GRAPH_RT_ATTRIBUTE) + "' attribute in decoded model. Model probably wasn't created by FrontEnd::decode function."); auto onnx_graph = it->second.as>(); - for (const auto& node : function->get_ordered_ops()) { + for (const auto& node : model->get_ordered_ops()) { if (auto raw_node = std::dynamic_pointer_cast(node)) { if (auto subgraph_node = std::dynamic_pointer_cast(node)) { subgraph_node->infer_inputs_from_parent(); for (auto& model : subgraph_node->get_subgraph_models()) { - convert_decoded_function(model); + convert_decoded_model(model); } } auto ov_nodes = raw_node->get_ov_nodes(onnx_graph); @@ -84,35 +86,31 @@ void convert_decoded_function(std::shared_ptr function) { } } rt_info.erase(it); - detail::remove_dangling_parameters(function); - detail::remove_dangling_results(function); + detail::remove_dangling_parameters(model); + detail::remove_dangling_results(model); } -std::shared_ptr import_onnx_model(std::shared_ptr model_proto, - const std::string& model_path, - detail::MappedMemoryHandles mmap_cache, - ov::frontend::ExtensionHolder extensions) { +std::shared_ptr import_onnx_model(std::shared_ptr model_proto, + const std::string& model_path, + detail::MappedMemoryHandles mmap_cache, + ov::frontend::ExtensionHolder extensions) { apply_transformations(*model_proto); - NGRAPH_SUPPRESS_DEPRECATED_START Graph graph{ov::util::get_directory(ov::util::get_absolute_file_path(model_path)), model_proto, mmap_cache, std::move(extensions)}; - NGRAPH_SUPPRESS_DEPRECATED_END return graph.convert(); } -std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, - const std::string& model_path, - detail::MappedMemoryHandles mmap_cache, - ov::frontend::ExtensionHolder extensions) { +std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, + const std::string& model_path, + detail::MappedMemoryHandles mmap_cache, + ov::frontend::ExtensionHolder extensions) { apply_transformations(*model_proto); - NGRAPH_SUPPRESS_DEPRECATED_START auto graph = std::make_shared(ov::util::get_directory(ov::util::get_absolute_file_path(model_path)), model_proto, mmap_cache, extensions); - NGRAPH_SUPPRESS_DEPRECATED_END return graph->decode(); } } // namespace detail diff --git a/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp b/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp index 4e2377947f3e48..8348313ca3f112 100644 --- a/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp +++ b/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph/function.hpp" +#include "openvino/core/model.hpp" #include "openvino/frontend/extension/holder.hpp" #include "utils/legacy_conversion_extension.hpp" #include "utils/tensor_external_data.hpp" @@ -20,43 +20,41 @@ namespace ngraph { namespace onnx_import { namespace detail { /// \brief Imports and converts an serialized ONNX model from a ModelProto -/// to an nGraph Function representation. +/// to an ov::Model representation. /// /// \note The function can be used only internally by OV components! /// Passing ModelProto between componets which use different protobuf /// library can cause segfaults. If stream parsing fails or the ONNX model -/// contains unsupported ops, the function throws an ngraph_error exception. +/// contains unsupported ops, the function throws an ov::Exception. /// /// \param model_proto Reference to a GraphProto object. /// \param model_path The path to the imported onnx model. /// It is required if the imported model uses data saved in external files. /// \param enable_mmap Enable mapping files with external weights instead of reading. /// \param extensions An object containing a collection of frontend extensions to use during the import process -/// \return An nGraph function that represents a single output from the created +/// \return An ov::Model that represents a single output from the created /// graph. -std::shared_ptr import_onnx_model(std::shared_ptr model_proto, - const std::string& model_path, - detail::MappedMemoryHandles mmap_cache, - ov::frontend::ExtensionHolder extensions = {}); +std::shared_ptr import_onnx_model(std::shared_ptr model_proto, + const std::string& model_path, + detail::MappedMemoryHandles mmap_cache, + ov::frontend::ExtensionHolder extensions = {}); -/// \brief Decode ONNX model to nGraph function with ONNXFrameworkNode(s) +/// \brief Decode ONNX model to ov::Model with ONNXFrameworkNode(s) /// /// \param model_proto Reference to a GraphProto object. /// \param model_path The path to the imported onnx model. /// It is required if the imported model uses data saved in external files. /// \param enable_mmap Enable mapping files with external weights instead of reading. /// \param extensions An object containing a collection of frontend extensions to use during the import process -/// \return A nGraph function with ONNXFrameworkNodes -std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, - const std::string& model_path, - detail::MappedMemoryHandles mmap_cache, - ov::frontend::ExtensionHolder extensions = {}); +/// \return A ov::Model with ONNXFrameworkNodes +std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, + const std::string& model_path, + detail::MappedMemoryHandles mmap_cache, + ov::frontend::ExtensionHolder extensions = {}); -/// \brief Converts a nGraph function (onnx model decoded to function with ONNXFrameworkNode(s)) +/// \brief Converts a ov::Model (onnx model decoded to function with ONNXFrameworkNode(s)) /// to a complete function with actual compute operations -/// -/// \return A nGraph function. -void convert_decoded_function(std::shared_ptr function); +void convert_decoded_model(std::shared_ptr model); /// \brief Get the legacy conversion extension. /// diff --git a/src/frontends/onnx/frontend/src/utils/pooling_factory.cpp b/src/frontends/onnx/frontend/src/utils/pooling_factory.cpp index d1c4d24071debf..cec8a8f1f30599 100644 --- a/src/frontends/onnx/frontend/src/utils/pooling_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/pooling_factory.cpp @@ -6,18 +6,21 @@ #include -#include "default_opset.hpp" -#include "exceptions.hpp" -#include "ngraph/coordinate_diff.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/avg_pool.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/max_pool.hpp" +#include "openvino/op/transpose.hpp" #include "utils/convpool.hpp" +using namespace ov::op; + namespace ngraph { namespace onnx_import { namespace pooling { namespace { -std::shared_ptr transposition_axis_order(const Rank& input_rank) { +std::shared_ptr transposition_axis_order(const Rank& input_rank) { FRONT_END_GENERAL_CHECK(input_rank.is_static(), "Generating column-major MaxPool results is supported only for inputs with static rank."); @@ -27,7 +30,7 @@ std::shared_ptr transposition_axis_order(const Rank& in std::iota(axes.begin(), axes.end(), 0); std::reverse(axes.begin() + 2, axes.end()); - return std::make_shared(element::i32, Shape{rank}, axes); + return std::make_shared(element::i32, Shape{rank}, axes); } } // namespace @@ -50,40 +53,39 @@ PoolingFactory::PoolingFactory(const Node& node) OutputVector PoolingFactory::make_avg_pool() const { const bool count_include_pad = m_onnx_node.get_attribute_value("count_include_pad", 0); - return {std::make_shared(m_inputs.at(0), - m_strides, - m_padding_below, - m_padding_above, - m_kernel_shape, - !count_include_pad, - m_rounding_type, - m_auto_pad)}; + return {std::make_shared(m_inputs.at(0), + m_strides, + m_padding_below, + m_padding_above, + m_kernel_shape, + !count_include_pad, + m_rounding_type, + m_auto_pad)}; } OPENVINO_SUPPRESS_DEPRECATED_END OutputVector PoolingFactory::make_max_pool() const { - return {std::make_shared(m_inputs.at(0), - m_strides, - m_padding_below, - m_padding_above, - m_kernel_shape, - m_rounding_type, - m_auto_pad)}; + return {std::make_shared(m_inputs.at(0), + m_strides, + m_padding_below, + m_padding_above, + m_kernel_shape, + m_rounding_type, + m_auto_pad)}; } OutputVector PoolingFactory::make_max_pool_with_indices() const { - const auto max_pool = std::make_shared(m_inputs.at(0), - m_strides, - m_dilations, - m_padding_below, - m_padding_above, - m_kernel_shape, - m_rounding_type, - m_auto_pad); + const auto max_pool = std::make_shared(m_inputs.at(0), + m_strides, + m_dilations, + m_padding_below, + m_padding_above, + m_kernel_shape, + m_rounding_type, + m_auto_pad); if (m_storage_order == StorageOrder::COLUMN_MAJOR) { const auto transposition_axes = transposition_axis_order(m_inputs.at(0).get_partial_shape().rank()); - const auto transposed_indices = - std::make_shared(max_pool->output(1), transposition_axes); + const auto transposed_indices = std::make_shared(max_pool->output(1), transposition_axes); return {max_pool->output(0), transposed_indices}; } else { diff --git a/src/frontends/onnx/frontend/src/utils/pooling_factory.hpp b/src/frontends/onnx/frontend/src/utils/pooling_factory.hpp index d4daef7075550c..8fb6ad51cedf9e 100644 --- a/src/frontends/onnx/frontend/src/utils/pooling_factory.hpp +++ b/src/frontends/onnx/frontend/src/utils/pooling_factory.hpp @@ -7,15 +7,13 @@ #include #include -#include "ngraph/node.hpp" -#include "ngraph/op/avg_pool.hpp" -#include "ngraph/op/max_pool.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" -#include "ngraph/strides.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" +#include "openvino/core/node.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/strides.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" namespace ngraph { namespace onnx_import { @@ -62,8 +60,8 @@ class PoolingFactory { Strides m_dilations; Shape m_padding_below; Shape m_padding_above; - ngraph::op::PadType m_auto_pad; - ngraph::op::RoundingType m_rounding_type; + ov::op::PadType m_auto_pad; + ov::op::RoundingType m_rounding_type; enum class StorageOrder : int64_t { ROW_MAJOR = 0, COLUMN_MAJOR = 1 }; diff --git a/src/frontends/onnx/frontend/src/utils/recurrent.cpp b/src/frontends/onnx/frontend/src/utils/recurrent.cpp index df114b35fd61c6..35344d8f64adb7 100644 --- a/src/frontends/onnx/frontend/src/utils/recurrent.cpp +++ b/src/frontends/onnx/frontend/src/utils/recurrent.cpp @@ -8,12 +8,21 @@ #include #include -#include "default_opset.hpp" -#include "ngraph/enum_names.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/core/enum_names.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/util/common_util.hpp" #include "ov_models/ov_builders/reshape.hpp" #include "ov_models/ov_builders/split.hpp" +using namespace ov::op; + OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { @@ -30,67 +39,58 @@ OpInputMap::OpInputMap(const onnx_import::Node& node, std::size_t gates_count) { const auto r_pshape = m_map[OpInput::R].get_partial_shape(); // Get dimensions needed for default inputs creation - auto shape_of_x = std::make_shared(m_map[OpInput::X]); - auto axes = default_opset::Constant::create(element::i32, Shape{1}, {0}); + auto shape_of_x = std::make_shared(m_map[OpInput::X]); + auto axes = v0::Constant::create(element::i32, Shape{1}, {0}); auto batch_size_node = - std::make_shared(shape_of_x, - default_opset::Constant::create(element::i32, Shape{1}, {0}), - axes); + std::make_shared(shape_of_x, v0::Constant::create(element::i32, Shape{1}, {0}), axes); auto seq_length_node = - std::make_shared(shape_of_x, - default_opset::Constant::create(element::i32, Shape{1}, {1}), - axes); + std::make_shared(shape_of_x, v0::Constant::create(element::i32, Shape{1}, {1}), axes); - auto shape_of_r = std::make_shared(m_map[OpInput::R]); + auto shape_of_r = std::make_shared(m_map[OpInput::R]); auto num_directions_node = - std::make_shared(shape_of_r, - default_opset::Constant::create(element::i32, Shape{1}, {0}), - axes); + std::make_shared(shape_of_r, v0::Constant::create(element::i32, Shape{1}, {0}), axes); auto hidden_size_node = - std::make_shared(shape_of_r, - default_opset::Constant::create(element::i32, Shape{1}, {2}), - axes); + std::make_shared(shape_of_r, v0::Constant::create(element::i32, Shape{1}, {2}), axes); // ------ Optional inputs ------ if (ng_inputs.size() > 3 && !ov::op::util::is_null(ng_inputs.at(3))) { auto bias = ng_inputs.at(3); auto split_bias = ov::op::util::split(bias, 2, 1); - m_map[OpInput::B] = std::make_shared(split_bias.at(0), split_bias.at(1)); + m_map[OpInput::B] = std::make_shared(split_bias.at(0), split_bias.at(1)); } else { - auto b_shape = std::make_shared( - OutputVector{num_directions_node, - std::make_shared( - default_opset::Constant::create(element::Type_t::i64, Shape{1}, {gates_count}), - hidden_size_node)}, + auto b_shape = std::make_shared( + OutputVector{ + num_directions_node, + std::make_shared(v0::Constant::create(element::Type_t::i64, Shape{1}, {gates_count}), + hidden_size_node)}, 0); - m_map[OpInput::B] = std::make_shared( - default_opset::Constant::create(m_map[OpInput::X].get_element_type(), Shape{}, {0}), - b_shape); + m_map[OpInput::B] = + std::make_shared(v0::Constant::create(m_map[OpInput::X].get_element_type(), Shape{}, {0}), + b_shape); } if (ng_inputs.size() > 4 && !ov::op::util::is_null(ng_inputs.at(4))) { m_map[OpInput::SEQ_LENGTHS] = ng_inputs.at(4); } else { - m_map[OpInput::SEQ_LENGTHS] = std::make_shared(seq_length_node, batch_size_node); + m_map[OpInput::SEQ_LENGTHS] = std::make_shared(seq_length_node, batch_size_node); } // The initial value of the hidden. if (ng_inputs.size() > 5 && !ov::op::util::is_null(ng_inputs.at(5))) { m_map[OpInput::INIT_H] = ov::op::util::reorder_axes(ng_inputs.at(5), {1, 0, 2}); } else { - auto init_h_shape = std::make_shared( - OutputVector{batch_size_node, num_directions_node, hidden_size_node}, - 0); - m_map[OpInput::INIT_H] = std::make_shared( - default_opset::Constant::create(m_map[OpInput::X].get_element_type(), Shape{}, {0}), - init_h_shape); + auto init_h_shape = + std::make_shared(OutputVector{batch_size_node, num_directions_node, hidden_size_node}, 0); + m_map[OpInput::INIT_H] = + std::make_shared(v0::Constant::create(m_map[OpInput::X].get_element_type(), Shape{}, {0}), + init_h_shape); } } OpInputMap::OpInputMap(container_type&& map) : m_map(std::move(map)) {} -Output& OpInputMap::at(const OpInput& key) { +Output& OpInputMap::at(const OpInput& key) { return m_map.at(key); } -const Output& OpInputMap::at(const OpInput& key) const { +const Output& OpInputMap::at(const OpInput& key) const { return m_map.at(key); } @@ -109,10 +109,8 @@ OpAttributes::OpAttributes(const Node& node) m_activations_alpha{node.get_attribute_value>("activation_alpha", std::vector{})}, m_activations_beta{node.get_attribute_value>("activation_beta", std::vector{})} { m_clip_threshold = std::abs(m_clip_threshold); - OPENVINO_SUPPRESS_DEPRECATED_START - std::string direction = ngraph::to_lower(node.get_attribute_value("direction", "forward")); - OPENVINO_SUPPRESS_DEPRECATED_END - m_direction = ngraph::as_enum(direction); + std::string direction = ov::util::to_lower(node.get_attribute_value("direction", "forward")); + m_direction = ov::as_enum(direction); } } // namespace recurrent diff --git a/src/frontends/onnx/frontend/src/utils/recurrent.hpp b/src/frontends/onnx/frontend/src/utils/recurrent.hpp index 02e04a78177b2c..a5cab40cf266ac 100644 --- a/src/frontends/onnx/frontend/src/utils/recurrent.hpp +++ b/src/frontends/onnx/frontend/src/utils/recurrent.hpp @@ -8,10 +8,10 @@ #include #include -#include "ngraph/node.hpp" -#include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/node.hpp" #include "openvino/core/deprecated.hpp" +#include "openvino/core/node.hpp" +#include "openvino/op/util/attr_types.hpp" namespace ngraph { namespace onnx_import { @@ -39,7 +39,7 @@ enum class OpInput { /// \brief This structure aggregates operator's inptus in a key-value map. /// struct OpInputMap { - using container_type = std::map>; + using container_type = std::map>; OPENVINO_SUPPRESS_DEPRECATED_START explicit OpInputMap(const onnx_import::Node& node, std::size_t gates_count); @@ -47,8 +47,8 @@ struct OpInputMap { OpInputMap(container_type&& map); virtual ~OpInputMap() = default; - Output& at(const OpInput& key); - const Output& at(const OpInput& key) const; + Output& at(const OpInput& key); + const Output& at(const OpInput& key) const; container_type m_map; }; @@ -64,7 +64,7 @@ struct OpAttributes { OPENVINO_SUPPRESS_DEPRECATED_END virtual ~OpAttributes() = default; - ngraph::op::RecurrentSequenceDirection m_direction; + ov::op::RecurrentSequenceDirection m_direction; std::int64_t m_hidden_size; float m_clip_threshold; std::vector m_activations; diff --git a/src/frontends/onnx/frontend/src/utils/reshape.cpp b/src/frontends/onnx/frontend/src/utils/reshape.cpp index 630cc91ffafdad..67e7781d692030 100644 --- a/src/frontends/onnx/frontend/src/utils/reshape.cpp +++ b/src/frontends/onnx/frontend/src/utils/reshape.cpp @@ -9,12 +9,18 @@ #include #include -#include "default_opset.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "ngraph/shape.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/util/op_types.hpp" #include "utils/reshape.hpp" +using namespace ov::op; + namespace ngraph { namespace onnx_import { namespace reshape { @@ -64,7 +70,7 @@ std::vector infer_dimensions(const std::string& node_name, return inferred_dims; } -Output interpret_as_scalar(const Output& node) { +Output interpret_as_scalar(const Output& node) { Shape node_shape = node.get_shape(); // If node is already a scalar, return original @@ -77,27 +83,27 @@ Output interpret_as_scalar(const Output& node) { node_shape); // If node is a Constant, recreate as Constant with Shape{} - if (ngraph::op::is_constant(node.get_node())) { - const auto value = ov::as_type_ptr(node.get_node_shared_ptr())->get_data_ptr(); - return std::make_shared(node.get_element_type(), ngraph::Shape{}, value); + if (ov::op::util::is_constant(node.get_node())) { + const auto value = ov::as_type_ptr(node.get_node_shared_ptr())->get_data_ptr(); + return std::make_shared(node.get_element_type(), ov::Shape{}, value); } return ov::op::util::reshape(node, Shape{}); } -Output reshape_channel_shaped_node_to_nchw(const Output& node, - const Output& expected_rank) { +Output reshape_channel_shaped_node_to_nchw(const Output& node, + const Output& expected_rank) { // Prepare tail shape (rank = conv.rank - 2): [1, 1, 1, 1, ... ] - const auto one_const = default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto two_const = default_opset::Constant::create(element::i64, Shape{1}, {2}); - const auto tail_shape_rank = std::make_shared(expected_rank, two_const); - const auto tail_shape = std::make_shared(one_const, tail_shape_rank); + const auto one_const = v0::Constant::create(element::i64, Shape{1}, {1}); + const auto two_const = v0::Constant::create(element::i64, Shape{1}, {2}); + const auto tail_shape_rank = std::make_shared(expected_rank, two_const); + const auto tail_shape = std::make_shared(one_const, tail_shape_rank); // Construct new bias shape: [1, C, 1, 1, ... ] - const auto C_dim = std::make_shared(node); - const auto new_shape = std::make_shared(OutputVector{one_const, C_dim, tail_shape}, 0); + const auto C_dim = std::make_shared(node); + const auto new_shape = std::make_shared(OutputVector{one_const, C_dim, tail_shape}, 0); - return std::make_shared(node, new_shape, false); + return std::make_shared(node, new_shape, false); } } // namespace reshape From 44e2a85e6bfb9b2cb5b7b67f01e81ebaaaa7507e Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Fri, 12 Jan 2024 15:17:13 +0400 Subject: [PATCH 21/43] [GPU] Fix convert_matmul_to_fc transformation pass for FC weights sharing case (#22107) --- .../transformations/convert_matmul_to_fc.cpp | 30 ++++++++++++++++-- .../convert_matmul_to_fc_test.cpp | 31 +++++++++++++++++++ 2 files changed, 59 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.cpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.cpp index 2caf3cd4d69850..0cd5e1090eb2df 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.cpp @@ -50,6 +50,11 @@ ConvertMatMulToFullyConnected::ConvertMatMulToFullyConnected() { fc_input_b = convert_node->get_input_node_shared_ptr(0); } + auto transpose_node = std::dynamic_pointer_cast(fc_input_b.get_node_shared_ptr()); + if (transpose_node) { + fc_input_b = transpose_node->get_input_node_shared_ptr(0); + } + auto shape_a = fc_input_a.get_partial_shape(); auto shape_b = fc_input_b.get_partial_shape(); OPENVINO_ASSERT(shape_b.is_static()); @@ -134,8 +139,23 @@ ConvertMatMulToFullyConnected::ConvertMatMulToFullyConnected() { } // Weights normalization + bool can_reuse_transpose = false; if (!matmul->get_transpose_b()) { - fc_input_b = create_transpose(fc_input_b, matmul->get_friendly_name() + "/transpose_b"); + if (transpose_node && transpose_node->get_input_size() == 2) { + auto order_constant = std::dynamic_pointer_cast(transpose_node->get_input_node_shared_ptr(1)); + if (order_constant) { + std::vector order = order_constant->cast_vector(); + + std::vector expected_order(fc_input_b.get_partial_shape().size()); + std::iota(expected_order.begin(), expected_order.end(), 0); + std::swap(*(expected_order.end() - 1), *(expected_order.end() - 2)); + + can_reuse_transpose = order == expected_order; + } + } + + fc_input_b = can_reuse_transpose ? transpose_node + : create_transpose(fc_input_b, matmul->get_friendly_name() + "/transpose_b"); } // Input normalization @@ -144,7 +164,13 @@ ConvertMatMulToFullyConnected::ConvertMatMulToFullyConnected() { } // Connect Convert to new input if needed - if (is_convert) { + if (is_convert && transpose_node && !can_reuse_transpose) { + auto convert = pattern_map.at(weights_m).get_node_shared_ptr(); + auto new_convert = convert->clone_with_new_inputs({fc_input_b}); + new_ops.push_back(new_convert); + new_convert->validate_and_infer_types(); + fc_input_b = new_convert; + } else if (is_convert) { auto convert = pattern_map.at(weights_m).get_node_shared_ptr(); convert->input(0).replace_source_output(fc_input_b); convert->validate_and_infer_types(); diff --git a/src/plugins/intel_gpu/tests/unit/transformations/convert_matmul_to_fc_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/convert_matmul_to_fc_test.cpp index 4dd06da5ff5d0b..2b6dfb4a8f0602 100644 --- a/src/plugins/intel_gpu/tests/unit/transformations/convert_matmul_to_fc_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/transformations/convert_matmul_to_fc_test.cpp @@ -249,6 +249,37 @@ TEST_F(TransformationTestsF, ConvertMatMulToFullyConnectedTest14) { } } +TEST_F(TransformationTestsF, ConvertMatMulToFullyConnectedTest15) { + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{1, 10, 64}); + auto input2 = std::make_shared(ov::element::f32, ov::Shape{1, 10, 64}); + auto input3 = ov::opset1::Constant::create(ov::element::f16, ov::Shape{64, 32}, {1}); + + auto convert = std::make_shared(input3, ov::element::f32); + ov::mark_as_decompression(convert); + + auto matmul1 = std::make_shared(input1, convert, false, false); + auto matmul2 = std::make_shared(input2, convert, false, false); + + model = std::make_shared(ov::NodeVector{matmul1, matmul2}, ov::ParameterVector{input1, input2}); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{1, 10, 64}); + auto input2 = std::make_shared(ov::element::f32, ov::Shape{1, 10, 64}); + auto input3 = ov::opset1::Constant::create(ov::element::f16, ov::Shape{64, 32}, {1}); + + auto transpose_constant = ov::opset1::Constant::create(ov::element::i32, ov::Shape{2}, {1, 0}); + auto transpose = std::make_shared(input3, transpose_constant); + auto convert = std::make_shared(transpose, ov::element::f32); + + auto matmul1 = std::make_shared(input1, convert); + auto matmul2 = std::make_shared(input2, convert); + + model_ref = std::make_shared(ov::NodeVector{matmul1, matmul2}, ov::ParameterVector{input1, input2}); + } +} + TEST_F(TransformationTestsF, ConvertMatMulToFullyConnectedTest_second_input_rank_adj_1) { { auto input1 = std::make_shared(ov::element::f32, ov::Shape{5, 2, 3}); From eeb2ccc2ecaae4d75ab13d86f71e3635b1556d7e Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Fri, 12 Jan 2024 12:22:41 +0100 Subject: [PATCH 22/43] [DOCS] fix npu and install (#22123) --- .../installing-openvino-overview.rst | 44 +++++++++---------- .../Device_Plugins/NPU.rst | 42 +++++++++--------- 2 files changed, 44 insertions(+), 42 deletions(-) diff --git a/docs/articles_en/get_started/installing-openvino-overview.rst b/docs/articles_en/get_started/installing-openvino-overview.rst index 9f42f3de9a1c93..37e8701b7fd5f6 100644 --- a/docs/articles_en/get_started/installing-openvino-overview.rst +++ b/docs/articles_en/get_started/installing-openvino-overview.rst @@ -1,11 +1,11 @@ .. {#openvino_docs_install_guides_overview} -Install OpenVINO™ 2023.2 +Install OpenVINO™ 2024.0 ========================== .. meta:: - :description: install OpenVINO Runtime package, using the distribution channel + :description: install OpenVINO Runtime package, using the distribution channel of your choice. @@ -15,7 +15,7 @@ Install OpenVINO™ 2023.2 OpenVINO Runtime on Linux OpenVINO Runtime on Windows - OpenVINO Runtime on macOS + OpenVINO Runtime on macOS Create a Yocto Image @@ -26,8 +26,8 @@ Install OpenVINO™ 2023.2 .. warning:: - - The OpenVINO Development Tools package has been deprecated and removed from the default + + The OpenVINO™ Development Tools package has been deprecated and removed from the default installation options. For new projects, the OpenVINO runtime package now includes all necessary components. @@ -36,30 +36,30 @@ Install OpenVINO™ 2023.2 .. tip:: - - OpenVINO 2023.2, described here, is not a Long-Term-Support version! + + OpenVINO 2024.0, described here, is not a Long-Term-Support version! All currently supported versions are: - * 2023.2 (development) + * 2024.0 (development) + * 2023.3 (LTS) * 2022.3 (LTS) - * 2021.4 (LTS) Moreover, different OpenVINO distributions may support slightly different sets of features. - Read installation guides for particular distributions for more details. - - .. dropdown:: Distribution Comparison for OpenVINO 2023.2 - - =============== ========== ====== ========= ======== ============ ========== ========== - Device Archives PyPI APT/YUM Conda Homebrew vcpkg Conan - =============== ========== ====== ========= ======== ============ ========== ========== - CPU V V V V V V V - GPU V V V V V V V - GNA V n/a n/a n/a n/a n/a n/a - NPU V n/a n/a n/a n/a n/a n/a - =============== ========== ====== ========= ======== ============ ========== ========== + Read installation guides for particular distributions for more details. + + .. dropdown:: Distribution Comparison for OpenVINO 2024.0 + + =============== ========== ====== ========= ======== ============ ========== ========== + Device Archives PyPI APT/YUM Conda Homebrew vcpkg Conan + =============== ========== ====== ========= ======== ============ ========== ========== + CPU V V V V V V V + GPU V V V V V V V + GNA V n/a n/a n/a n/a n/a n/a + NPU V n/a n/a n/a n/a n/a n/a + =============== ========== ====== ========= ======== ============ ========== ========== | **Build OpenVINO from source** -| OpenVINO Toolkit source files are available on GitHub as open source. If you want to build your own version of OpenVINO for your platform, +| OpenVINO Toolkit source files are available on GitHub as open source. If you want to build your own version of OpenVINO for your platform, follow the `OpenVINO Build Instructions `__. diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/NPU.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/NPU.rst index 0b36f07093074e..3c4e09e86a0863 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/NPU.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/NPU.rst @@ -4,7 +4,7 @@ NPU Device ========== .. meta:: - :description: OpenVINO™ supports the Neural Processing Unit, + :description: OpenVINO™ supports the Neural Processing Unit, a low-power processing device dedicated to running AI inference. @@ -13,17 +13,19 @@ Intel® Core™ Ultra generation of CPUs (formerly known as Meteor Lake). It ena you to offload certain neural network computation tasks from other devices, for more streamlined resource management. -For an in-depth description of the NPU plugin, see: +Note that the NPU plugin is currently available only with the Archive distribution of OpenVINO™ +and you need to :doc:`install a proper NPU driver ` +to use it successfully. For an in-depth description of the NPU plugin, see: -• `NPU plugin developer documentation `__ -• `OpenVINO Runtime NPU plugin source files `__ +* `NPU plugin developer documentation `__ +* `OpenVINO Runtime NPU plugin source files `__ | **Supported Platforms:** | Host: Intel® Core™ Ultra (former Meteor Lake) | NPU device: NPU 3720 | OS: Ubuntu* 20, MS Windows* 11 (both 64-bit) - + | **Supported Inference Data Types** | The NPU plugin supports the following data types as inference precision of internal primitives: @@ -32,7 +34,7 @@ For an in-depth description of the NPU plugin, see: | Computation precision for the HW is fp16. | | For more details on how to get a quantized model, refer to the - :doc:`Model Optimization guide `, and + :doc:`Model Optimization guide `, and :doc:`NNCF tool quantization guide `. @@ -56,13 +58,13 @@ UMD Dynamic Model Caching +++++++++++++++++++++++++++++ UMD model caching is a solution enabled by default in the current NPU driver. -It improves time to first inference (FIL) by storing the model in the cache +It improves time to first inference (FIL) by storing the model in the cache after the compilation (included in FEIL), based on a hash key. The process may be summarized in three stages: 1. UMD generates the key from the input IR model and build arguments 2. UMD requests the DirectX Shader cache session to store the model - with the computed key. + with the computed key. 3. All subsequent requests to compile the same IR model with the same arguments use the pre-compiled model, reading it from the cache instead of recompiling. @@ -73,7 +75,7 @@ OpenVINO Model Caching OpenVINO Model Caching is a common mechanism for all OpenVINO device plugins and can be enabled by setting the ``ov::cache_dir`` property. This way, the UMD model caching is automatically bypassed by the NPU plugin, which means the model -will only be stored in the OpenVINO cache after compilation. When a cache hit +will only be stored in the OpenVINO cache after compilation. When a cache hit occurs for subsequent compilation requests, the plugin will import the model instead of recompiling it. @@ -86,8 +88,8 @@ Supported Features and properties The NPU device is currently supported by AUTO and MULTI inference modes. -The NPU support in OpenVINO is still under active development and may -offer a limited set of supported OpenVINO features. +The NPU support in OpenVINO is still under active development and may +offer a limited set of supported OpenVINO features. **Supported Properties:** @@ -110,11 +112,11 @@ offer a limited set of supported OpenVINO features. ov::intel_vpux::dpu_groups ov::intel_vpux::dma_engines ov::intel_vpux::compilation_mode - ov::intel_vpux::compilation_mode_params - ov::intel_vpux::print_profiling - ov::intel_vpux::profiling_output_file - ov::intel_vpux::vpux_platform - ov::intel_vpux::use_elf_compiler_backend + ov::intel_vpux::compilation_mode_params + ov::intel_vpux::print_profiling + ov::intel_vpux::profiling_output_file + ov::intel_vpux::vpux_platform + ov::intel_vpux::use_elf_compiler_backend .. tab-item:: Read-only properties @@ -132,7 +134,7 @@ offer a limited set of supported OpenVINO features. ov::intel_vpux::device_total_mem_size ov::intel_vpux::driver_version -.. note:: +.. note:: The optimum number of inference requests returned by the plugin based on the performance mode is **4 for THROUGHPUT** and **1 for LATENCY**. @@ -142,10 +144,10 @@ offer a limited set of supported OpenVINO features. Limitations ############################# -* Currently, only the models with static shapes are supported on NPU. +* Currently, only the models with static shapes are supported on NPU. * If the path to the model file includes non-Unicode symbols, such as in Chinese, - the model cannot be used for inference on NPU. It will return an error. -* Running the Alexnet model with NPU may result in a drop in accuracy. + the model cannot be used for inference on NPU. It will return an error. +* Running the Alexnet model with NPU may result in a drop in accuracy. At this moment, the googlenet-v4 model is recommended for classification tasks. From e07602720cc71ffd684342430298d01e29579d6d Mon Sep 17 00:00:00 2001 From: Ivan Tikhonov Date: Fri, 12 Jan 2024 15:58:41 +0400 Subject: [PATCH 23/43] Move SliceToStridedSlice conversion from MOC to Common transformations (#21556) * codestyle * fix merge conflicts * cleanup * fix unit tests * codestyle --------- Co-authored-by: Andrei Kochin --- .../common_optimizations/common_optimizations.cpp | 5 ++++- .../optimize_strided_slice.cpp | 7 ------- .../optimize_strided_slice_test.cpp | 15 ++++++++++++++- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp index d6c0b5f4cde515..be86c5c4344c80 100644 --- a/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp @@ -89,6 +89,7 @@ #include "transformations/op_conversions/convert_roi_align_v3_to_v9.hpp" #include "transformations/op_conversions/convert_roi_align_v9_to_v3.hpp" #include "transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp" +#include "transformations/op_conversions/convert_slice_to_strided_slice.hpp" #include "transformations/op_conversions/convert_softmax_downgrade.hpp" #include "transformations/op_conversions/convert_softmax_upgrade.hpp" #include "transformations/op_conversions/convert_space_to_depth.hpp" @@ -122,7 +123,9 @@ bool ov::pass::CommonOptimizations::run_on_model(const std::shared_ptr& f) { RUN_ON_FUNCTION_SCOPE(StridedSliceOptimization); - - ov::pass::Manager manager(get_pass_config()); - using namespace ov::pass; - REGISTER_PASS(manager, SliceToStridedSlice, m_use_shapes) - manager.run_passes(f); - bool rewritten = false; if (m_use_shapes) { rewritten = UselessSliceEraser().run_on_model(f); diff --git a/src/common/transformations/tests/common_optimizations/optimize_strided_slice_test.cpp b/src/common/transformations/tests/common_optimizations/optimize_strided_slice_test.cpp index a7ab79e36341b5..a01c581f31cfd3 100644 --- a/src/common/transformations/tests/common_optimizations/optimize_strided_slice_test.cpp +++ b/src/common/transformations/tests/common_optimizations/optimize_strided_slice_test.cpp @@ -21,6 +21,7 @@ #include "openvino/opsets/opset3.hpp" #include "openvino/opsets/opset8.hpp" #include "openvino/pass/constant_folding.hpp" +#include "transformations/op_conversions/convert_slice_to_strided_slice.hpp" #include "transformations/utils/utils.hpp" using namespace ov; @@ -407,6 +408,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_default_axes) { auto slice = std::make_shared(data, begin, end, step); model = std::make_shared(NodeVector{slice}, ParameterVector{data}); + manager.register_pass(true); manager.register_pass(); } { @@ -438,7 +440,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_axes_const_sorted_full) { auto slice = std::make_shared(data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{data}); - manager.register_pass(); + manager.register_pass(true); } { auto data = std::make_shared(element::f32, Shape{2, 4, 3, 5}); @@ -469,6 +471,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_all_const) { auto slice = std::make_shared(data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{}); + manager.register_pass(true); manager.register_pass(); } { @@ -522,6 +525,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_sss_params_axes_const_sorted_le auto slice = std::make_shared(data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{data, begin, end, step}); + manager.register_pass(true); manager.register_pass(); } { @@ -563,6 +567,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_sss_params_axes_const_unsorted) auto slice = std::make_shared(data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{data, begin, end, step}); + manager.register_pass(true); manager.register_pass(); } { @@ -605,6 +610,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_sss_params_axes_const_negative_ auto slice = std::make_shared(data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{data, begin, end, step}); + manager.register_pass(true); manager.register_pass(); } { @@ -637,6 +643,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_sss_params_dyn_shape_axes_const auto slice = std::make_shared(data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{data, begin, end, step}); + manager.register_pass(true); manager.register_pass(); } { @@ -680,6 +687,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_sss_params_static_shape_axes_co auto slice = std::make_shared(data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{data, begin, end, step}); + manager.register_pass(true); manager.register_pass(); } { @@ -722,6 +730,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_dyn_rank_axes_const_positive) { auto slice = std::make_shared(data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{data, begin, end, step}); + manager.register_pass(true); manager.register_pass(); } { @@ -796,6 +805,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_begin_param_shape_of_use_shapes auto slice = std::make_shared(shape_of_data, begin, end, step, axes); model = std::make_shared(NodeVector{slice}, ParameterVector{data, begin}); + manager.register_pass(true); manager.register_pass(true); manager.register_pass(); } @@ -838,6 +848,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_begin_param_shape_of_use_shapes model = std::make_shared(NodeVector{slice}, ParameterVector{data, begin}); manager.register_pass(); + manager.register_pass(false); manager.register_pass(false); manager.register_pass(); } @@ -940,6 +951,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_slice_all_use_shapes_true) { auto slice = std::make_shared(relu, begin, end, step); model = std::make_shared(NodeVector{slice}, ParameterVector{data}); + manager.register_pass(true); manager.register_pass(true); manager.register_pass(); } @@ -979,6 +991,7 @@ TEST_F(TransformationTestsF, SliceToStridedSlice_slice_all_use_shapes_false) { auto slice = std::make_shared(relu, begin, end, step); model = std::make_shared(NodeVector{slice}, ParameterVector{data}); + manager.register_pass(false); manager.register_pass(false); manager.register_pass(); } From 3ac9085609ae5b1ba296da5d5bf70d31501d77e8 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Fri, 12 Jan 2024 13:13:18 +0100 Subject: [PATCH 24/43] Refactor shared beh tests to API 2.0 (#22112) * Refactor shared beh tests to API 2.0 * Apply comments --- .../hetero_synthetic.cpp | 17 +- .../skip_tests_config.cpp | 2 - .../compiled_model/compiled_model_base.hpp | 13 + .../ov_executable_network/exec_graph_info.hpp | 31 ++ .../behavior/ov_plugin/core_integration.hpp | 12 + .../behavior/ov_plugin/core_threading.hpp | 10 + .../behavior/ov_plugin/hetero_synthetic.hpp | 64 +++ .../ov_executable_network/exec_graph_info.cpp | 431 ++++++++++++++++++ .../behavior/ov_infer_request/io_tensor.cpp | 23 + .../behavior/ov_plugin/hetero_synthetic.cpp | 196 ++++++++ 10 files changed, 789 insertions(+), 10 deletions(-) rename src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/{plugin => ov_plugin}/hetero_synthetic.cpp (56%) create mode 100644 src/tests/functional/plugin/shared/include/behavior/ov_plugin/hetero_synthetic.hpp create mode 100644 src/tests/functional/plugin/shared/src/behavior/ov_plugin/hetero_synthetic.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/hetero_synthetic.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/hetero_synthetic.cpp similarity index 56% rename from src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/hetero_synthetic.cpp rename to src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/hetero_synthetic.cpp index e767a26ca072ba..c72285d1e5a2f6 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/hetero_synthetic.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/hetero_synthetic.cpp @@ -4,7 +4,7 @@ #include -#include "behavior/plugin/hetero_synthetic.hpp" +#include "behavior/ov_plugin/hetero_synthetic.hpp" #include "ov_models/builders.hpp" #include "ov_models/subgraph_builders.hpp" @@ -12,22 +12,23 @@ extern const char * cpu_plugin_file_name; namespace { -using namespace HeteroTests; +using ov::test::behavior::OVHeteroSyntheticTest; +using ov::test::behavior::PluginParameter; // this tests load plugin by library name: this is not available during static linkage #ifndef OPENVINO_STATIC_LIBRARY -INSTANTIATE_TEST_SUITE_P(smoke_SingleMajorNode, HeteroSyntheticTest, +INSTANTIATE_TEST_SUITE_P(smoke_SingleMajorNode, OVHeteroSyntheticTest, ::testing::Combine( ::testing::Values(std::vector{{"CPU0", cpu_plugin_file_name}, {"CPU1", cpu_plugin_file_name}}), - ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)), - HeteroSyntheticTest::getTestCaseName); + ::testing::ValuesIn(OVHeteroSyntheticTest::_singleMajorNodeFunctions)), + OVHeteroSyntheticTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_RandomMajorNodes, HeteroSyntheticTest, +INSTANTIATE_TEST_SUITE_P(nightly_RandomMajorNodes, OVHeteroSyntheticTest, ::testing::Combine( ::testing::Values(std::vector{{"CPU0", cpu_plugin_file_name}, {"CPU1", cpu_plugin_file_name}}), - ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)), - HeteroSyntheticTest::getTestCaseName); + ::testing::ValuesIn(OVHeteroSyntheticTest::_randomMajorNodeFunctions)), + OVHeteroSyntheticTest::getTestCaseName); #endif // !OPENVINO_STATIC_LIBRARY diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 524c8fdee99e27..70d0ee093e2b26 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -62,8 +62,6 @@ std::vector disabledTestPatterns() { R"(.*KernelCachingSupportCase.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)", #endif R"(.*CachingSupportCase.*GPU.*CompileModelCacheTestBase.*CompareWithRefImpl.*)", - // Looks like the test is targeting CPU plugin and doesn't respect that execution graph may vary from plugin to plugin - R"(.*ExecGraphSerializationTest.*)", // unsupported metrics R"(.*nightly_HeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", // Issue: 111437 diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp index 6692159f296e29..bfbd7437668efb 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp @@ -454,6 +454,19 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) { } } +TEST_P(OVCompiledModelBaseTest, CheckExecGraphInfoSerialization) { + auto filePrefix = ov::test::utils::generateTestFilePrefix(); + std::string out_xml_path = filePrefix + ".xml"; + std::string out_bin_path = filePrefix + ".bin"; + + std::shared_ptr runtime_model; + + auto compiled_model = core->compile_model(function, target_device, configuration); + ASSERT_NO_THROW(runtime_model = compiled_model.get_runtime_model()); + ASSERT_NO_THROW(ov::serialize(runtime_model, out_xml_path, out_bin_path)); + ov::test::utils::removeIRFiles(out_xml_path, out_bin_path); +} + TEST_P(OVCompiledModelBaseTest, getInputFromFunctionWithSingleInput) { ov::CompiledModel execNet; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp index 4ebcfb1c34beb3..a0e360c596922c 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp @@ -8,6 +8,7 @@ #include "exec_graph_info.hpp" #include "base/ov_behavior_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "pugixml.hpp" namespace ov { namespace test { @@ -45,6 +46,36 @@ class OVExecGraphUniqueNodeNames : public testing::WithParamInterface fnPtr; }; +class OVExecGraphSerializationTest : public testing::WithParamInterface, + public OVCompiledNetworkTestBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + void SetUp() override; + void TearDown() override; + +private: + // walker traverse (DFS) xml document and store layer & data nodes in + // vector which is later used for comparison + struct exec_graph_walker : pugi::xml_tree_walker { + std::vector nodes; + bool for_each(pugi::xml_node &node) override; + }; + + // compare_docs() helper + std::pair compare_nodes(const pugi::xml_node &node1, + const pugi::xml_node &node2); + +protected: + // checks if two exec graph xml's are equivalent: + // - the same count of and nodes + // - the same count of attributes of each node + // - the same name of each attribute (value is not checked, since it can differ + // beetween different devices) + std::pair compare_docs(const pugi::xml_document &doc1, + const pugi::xml_document &doc2); + + std::string m_out_xml_path, m_out_bin_path; +}; } // namespace behavior } // namespace test } // namespace ov diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp index 5f77a7f28d0edc..bc6df2494a9012 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp @@ -518,6 +518,18 @@ TEST_P(OVClassBasicTestP, SetConfigAllNoThrow) { OV_ASSERT_NO_THROW(ie.get_versions(target_device)); } +TEST_P(OVClassBasicTestP, SetGetConfigForTbbTerminateThrows) { + ov::Core ie = createCoreWithTemplate(); + bool value = false; + ASSERT_NO_THROW(ie.set_property({ov::force_tbb_terminate(true)})); + ASSERT_NO_THROW(value = ie.get_property(target_device, ov::force_tbb_terminate)); + ASSERT_TRUE(value); + + ASSERT_NO_THROW(ie.set_property({{ov::force_tbb_terminate(false)}})); + ASSERT_NO_THROW(value = ie.get_property(target_device, ov::force_tbb_terminate)); + ASSERT_FALSE(value); +} + TEST(OVClassBasicTest, smoke_SetConfigHeteroThrows) { ov::Core ie = createCoreWithTemplate(); OV_ASSERT_NO_THROW(ie.set_property(ov::test::utils::DEVICE_HETERO, ov::enable_profiling(true))); diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp index bc9d8e9ace2249..56c4e0f9e6230d 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_threading.hpp @@ -334,6 +334,16 @@ TEST_P(CoreThreadingTest, smoke_GetVersions) { }); } +// tested function: get_property, UnregisterPlugin +TEST_P(CoreThreadingTest, smoke_GetMetric) { + ov::Core core; + + runParallel([&] () { + core.get_property(target_device, ov::internal::supported_properties); + safePluginUnload(core, target_device); + }); +} + // tested function: set_property for already created plugins TEST_P(CoreThreadingTest, smoke_SetProperty_PluginExists) { ov::Core core; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/hetero_synthetic.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/hetero_synthetic.hpp new file mode 100644 index 00000000000000..53eac08ca97ded --- /dev/null +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/hetero_synthetic.hpp @@ -0,0 +1,64 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "ov_models/utils/ov_helpers.hpp" + +namespace ov { +namespace test { +namespace behavior { + +struct PluginParameter { + std::string _name; + std::string _location; +}; + +struct FunctionParameter { + std::unordered_set _majorPluginNodeIds; + std::shared_ptr _function; + bool _dynamic_batch; + uint32_t _seed; +}; + +using OVHeteroSyntheticTestParameters = std::tuple< + std::vector, + FunctionParameter +>; + +class OVHeteroSyntheticTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseStaticTest { +protected: + enum {Plugin, Function}; + + ~OVHeteroSyntheticTest() override = default; + void SetUp() override; + void TearDown() override; + + std::string SetUpAffinity(); + + std::vector _registredPlugins; + +public: + static std::string getTestCaseName(const ::testing::TestParamInfo& obj); + + static std::vector singleMajorNodeFunctions( + const std::vector()>>& builders, bool dynamic_batch = false); + + static std::vector randomMajorNodeFunctions( + const std::vector()>>& builders, bool dynamic_batch = false, uint32_t seed = 0); + + static std::vector _singleMajorNodeFunctions; + static std::vector _randomMajorNodeFunctions; +}; + +} // namespace behavior +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/exec_graph_info.cpp index 1e1b93baf5d50d..4c4a6e94d5140e 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_executable_network/exec_graph_info.cpp @@ -353,6 +353,437 @@ TEST_P(OVExecGraphUniqueNodeNames, CheckUniqueNodeNames) { } }; + + + +const char serialize_test_model[] = R"V0G0N( + + + + + + + + 1 + + + + + + + + 1 + + + + + + + 1 + + + 1 + + + + + 1 + + + + + + + 1 + + + 1 + + + + + 1 + + + + + + + 1 + + + 1 + + + + + 1 + + + + + + + + 1 + + + + + + + 1 + + + 1 + + + + + 1 + + + + + + + 1 + + + 1 + + + + + 1 + + + + + + + 1 + + + + + + + + + + + + + + + + + + +)V0G0N"; + +const char expected_serialized_model[] = R"V0G0N( + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + 1 + + + 1 + + + + + 1 + + + + + + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 1 + + + + + + + + 1 + + + 1 + + + + + 1 + + + + + + + + 1 + + + + + + + + + + + + + + + + +)V0G0N"; + +const char expected_serialized_model_cpu[] = R"V0G0N( + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + 1 + + + 1 + + + 1 + + + 1 + + + + + 1 + + + + + + + + 1 + + + + + + + + + + + + + +)V0G0N"; + + +std::string OVExecGraphSerializationTest::getTestCaseName(testing::TestParamInfo obj) { + std::ostringstream result; + std::string target_device = obj.param; + std::replace(target_device.begin(), target_device.end(), ':', '.'); + result << "TargetDevice=" << target_device; + return result.str(); +} + +void OVExecGraphSerializationTest::SetUp() { + target_device = this->GetParam(); + SKIP_IF_CURRENT_TEST_IS_DISABLED() + APIBaseTest::SetUp(); + + const std::string XML_EXT = ".xml"; + const std::string BIN_EXT = ".bin"; + + std::string filePrefix = ov::test::utils::generateTestFilePrefix(); + + m_out_xml_path = filePrefix + XML_EXT; + m_out_bin_path = filePrefix + BIN_EXT; +} + +void OVExecGraphSerializationTest::TearDown() { + APIBaseTest::TearDown(); + ov::test::utils::removeIRFiles(m_out_xml_path, m_out_bin_path); +} + +bool OVExecGraphSerializationTest::exec_graph_walker::for_each(pugi::xml_node &node) { + std::string node_name{node.name()}; + if (node_name == "layer" || node_name == "data") { + nodes.push_back(node); + } + return true; // continue traversal +} + +std::pair OVExecGraphSerializationTest::compare_nodes(const pugi::xml_node &node1, + const pugi::xml_node &node2) { + // node names must be the same + const std::string node1_name{node1.name()}; + const std::string node2_name{node2.name()}; + if (node1_name != node2_name) { + return {false, "Node name is different: " + node1_name + " != " + node2_name}; + } + + // node attribute count must be the same + const auto attr1 = node1.attributes(); + const auto attr2 = node2.attributes(); + const auto attr1_size = std::distance(attr1.begin(), attr1.end()); + const auto attr2_size = std::distance(attr2.begin(), attr2.end()); + if (attr1_size != attr2_size) { + return {false, "Attribute count is different in <" + node1_name + "> :" + + std::to_string(attr1_size) + " != " + + std::to_string(attr2_size)}; + } + + // every node attribute name must be the same + auto a1 = attr1.begin(); + auto a2 = attr2.begin(); + for (int j = 0; j < attr1_size; ++j, ++a1, ++a2) { + const std::string a1_name{a1->name()}; + const std::string a2_name{a2->name()}; + const std::string a1_value{a1->value()}; + const std::string a2_value{a2->value()}; + if (a1_name != a2_name || (a1_name == "type" && a1_value != a2_value)) { + // TODO: Remove temporary w/a later + if (a1_value == "Output" && a2_value == "Result") { + continue; + } + return {false, "Attributes are different in <" + node1_name + "> : " + + a1_name + "=" + a1_value + " != " + a2_name + + "=" + a2_value}; + } + } + return {true, ""}; +} + +std::pair OVExecGraphSerializationTest::compare_docs(const pugi::xml_document &doc1, + const pugi::xml_document &doc2) { + // traverse document and prepare vector of & nodes to compare + exec_graph_walker walker1, walker2; + doc1.child("net").child("layers").traverse(walker1); + doc2.child("net").child("layers").traverse(walker2); + + // nodes count must be the same + const auto &nodes1 = walker1.nodes; + const auto &nodes2 = walker2.nodes; + if (nodes1.size() != nodes2.size()) { + return {false, "Node count differ: " + std::to_string(nodes1.size()) + + " != " + std::to_string(nodes2.size())}; + } + + // every node must be equivalent + for (int i = 0; i < nodes1.size(); i++) { + const auto res = compare_nodes(nodes1[i], nodes2[i]); + if (res.first == false) { + return res; + } + } + return {true, ""}; +} + +TEST_P(OVExecGraphSerializationTest, ExecutionGraph) { + auto core = utils::PluginCache::get().core(); + auto model = core->read_model(serialize_test_model); + auto compiled_model = core->compile_model(model, target_device); + auto runtime_model = compiled_model.get_runtime_model(); + + ov::serialize(runtime_model, m_out_xml_path, m_out_bin_path); + + pugi::xml_document expected; + pugi::xml_document result; + if (target_device == "CPU" || target_device == "AUTO:CPU" || target_device == "MULTI:CPU") { + ASSERT_TRUE(expected.load_string(expected_serialized_model_cpu)); + } else { + ASSERT_TRUE(expected.load_string(expected_serialized_model)); + } + ASSERT_TRUE(result.load_file(m_out_xml_path.c_str())); + + bool status; + std::string message; + std::tie(status, message) = this->compare_docs(expected, result); + + ASSERT_TRUE(status) << message; +} + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp index c1bfefc2eb2e43..a6e24feebd50a7 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp @@ -86,6 +86,29 @@ TEST_P(OVInferRequestIOTensorTest, canSetAndGetOutput) { ASSERT_EQ(output.get_shape(), actual_tensor.get_shape()); } + +TEST_P(OVInferRequestIOTensorTest, getAfterSetInputDoNotChangeInput) { + auto tensor = utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); + OV_ASSERT_NO_THROW(req.set_tensor(input, tensor)); + ov::Tensor actual_tensor; + ASSERT_NO_THROW(actual_tensor = req.get_tensor(input)); + + ASSERT_EQ(tensor.data(), actual_tensor.data()); + ASSERT_EQ(tensor.get_shape(), actual_tensor.get_shape()); + ASSERT_EQ(tensor.get_element_type(), actual_tensor.get_element_type()); +} + +TEST_P(OVInferRequestIOTensorTest, getAfterSetOutputDoNotChangeOutput) { + auto tensor = utils::create_and_fill_tensor(output.get_element_type(), output.get_shape()); + OV_ASSERT_NO_THROW(req.set_tensor(output, tensor)); + ov::Tensor actual_tensor; + ASSERT_NO_THROW(actual_tensor = req.get_tensor(output)); + + ASSERT_EQ(tensor.data(), actual_tensor.data()); + ASSERT_EQ(tensor.get_shape(), actual_tensor.get_shape()); + ASSERT_EQ(tensor.get_element_type(), actual_tensor.get_element_type()); +} + TEST_P(OVInferRequestIOTensorTest, failToSetTensorWithIncorrectName) { auto tensor = utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); ASSERT_THROW(req.set_tensor("incorrect_input", tensor), ov::Exception); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/hetero_synthetic.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/hetero_synthetic.cpp new file mode 100644 index 00000000000000..125735eb476fd1 --- /dev/null +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/hetero_synthetic.cpp @@ -0,0 +1,196 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/hetero_synthetic.hpp" + +#include + +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/nested_split_conv_concat.hpp" +#include "functional_test_utils/ov_plugin_cache.hpp" +#include "common_test_utils/file_utils.hpp" +#include "openvino/op/util/op_types.hpp" + +namespace ov { +namespace test { +namespace behavior { + +static std::vector()>> builders = { + [] {return ov::test::utils::make_split_multi_conv_concat();}, + [] {return ov::test::utils::make_nested_split_conv_concat();}, + [] {return ov::test::utils::make_cplit_conv_concat_nested_in_branch();}, + [] {return ov::test::utils::make_cplit_conv_concat_nested_in_branch_nested_out();}, + [] {return ov::test::utils::make_nested_branch_conv_concat();}, +}; + +std::string OVHeteroSyntheticTest::getTestCaseName(const ::testing::TestParamInfo& obj) { + std::vector pluginParameters; + FunctionParameter functionParamter; + std::tie(pluginParameters, functionParamter) = obj.param; + std::string name = "function=" + functionParamter._function->get_friendly_name(); + name += "_layers="; + std::size_t num = functionParamter._majorPluginNodeIds.size() - 1; + for (auto&& id : functionParamter._majorPluginNodeIds) { + name += id + ((num !=0) ? "," : ""); + num--; + } + name += "_targetDevice=HETERO:"; + num = pluginParameters.size() - 1; + for (auto&& pluginParameter : pluginParameters) { + name += pluginParameter._name + ((num !=0) ? "," : ""); + num--; + } + return name; +} + +void OVHeteroSyntheticTest::SetUp() { + auto& param = GetParam(); + targetDevice = "HETERO:"; + int num = std::get(param).size() - 1; + + auto core = ov::test::utils::PluginCache::get().core(); + for (auto&& pluginParameter : std::get(param)) { + bool registred = true; + try { + if (pluginParameter._location == "openvino_template_plugin") { + core->register_plugin(ov::util::make_plugin_library_name( + ov::test::utils::getExecutableDirectory(), pluginParameter._location + OV_BUILD_POSTFIX), pluginParameter._name); + } else { + core->register_plugin(pluginParameter._location + OV_BUILD_POSTFIX, pluginParameter._name); + } + } catch (ov::Exception& ex) { + if (std::string{ex.what()}.find("Device with \"" + pluginParameter._name + + "\" is already registered in the OpenVINO Runtime") + == std::string::npos) { + throw ex; + } else { + registred = false; + } + } + if (registred) { + _registredPlugins.push_back(pluginParameter._name); + } + targetDevice += pluginParameter._name; + targetDevice += ((num !=0) ? "," : ""); + --num; + } + function = std::get(param)._function; + if (std::get(param)._dynamic_batch) { + for (auto&& input : function->inputs()) { + auto shape = input.get_partial_shape(); + shape[0] = ov::Dimension(1, 16); + } + } +} + +void OVHeteroSyntheticTest::TearDown() { + auto core = ov::test::utils::PluginCache::get().core(); + for (auto&& pluginName : _registredPlugins) { + core->unload_plugin(pluginName); + } +} + +std::string OVHeteroSyntheticTest::SetUpAffinity() { + auto& param = GetParam(); + std::string affinities; + auto& pluginParameters = std::get(param); + affinities += "\n{\n"; + for (auto&& node : std::get(param)._function->get_ordered_ops()) { + std::string affinity; + auto get_affinity = [&](const std::string& name) { + if (std::get(param)._majorPluginNodeIds.end() != + std::get(param)._majorPluginNodeIds.find(name)) { + return pluginParameters.at(0)._name; + } else { + return pluginParameters.at(1)._name; + } + }; + if (ov::op::util::is_constant(node) || ov::op::util::is_output(node) || ov::op::util::is_parameter(node)) { + auto& node_with_affinity_name = + ov::op::util::is_output(node) + ? node->input_value(0).get_node()->get_friendly_name() + : node->output(0).get_target_inputs().begin()->get_node()->get_friendly_name(); + affinity = get_affinity(node_with_affinity_name); + } else { + affinity = get_affinity(node->get_friendly_name()); + } + node->get_rt_info()["affinity"] = affinity; + affinities += "\t{\"" + node->get_friendly_name() + "\",\t\t\"" + affinity + "\"}\n"; + } + affinities += "}"; + affinities += "\nseed = " + std::to_string(std::get(param)._seed); + return affinities; +} + +TEST_P(OVHeteroSyntheticTest, someLayersToMajorPluginOthersToFallback) { + auto affinities = SetUpAffinity(); + SCOPED_TRACE(affinities); + run(); +} + +std::vector OVHeteroSyntheticTest::singleMajorNodeFunctions( + const std::vector()>>& builders, + bool dynamic_batch) { + std::vector result; + for (auto&& builder : builders) { + auto function = builder(); + for (auto&& node : function->get_ordered_ops()) { + if (!ov::op::util::is_constant(node) && + !(ov::op::util::is_parameter(node)) && + !(ov::op::util::is_output(node))) { + result.push_back(FunctionParameter{{node->get_friendly_name()}, function, dynamic_batch, 0}); + } + } + } + return result; +} + + +std::vector OVHeteroSyntheticTest::randomMajorNodeFunctions( + const std::vector()>>& builders, + bool dynamic_batch, + uint32_t seed) { + std::vector results; + for (auto p = 0.2; p < 1.; p+=0.2) { + while (seed == 0) { + seed = std::random_device {}(); + } + std::mt19937 e{seed}; + std::bernoulli_distribution d{p}; + for (auto&& builder : builders) { + auto function = builder(); + auto ordered_ops = function->get_ordered_ops(); + for (std::size_t i = 0; i < ordered_ops.size(); ++i) { + std::unordered_set majorPluginNodeIds; + for (auto&& node : ordered_ops) { + if (!(ov::op::util::is_constant(node)) && + !(ov::op::util::is_parameter(node)) && + !(ov::op::util::is_output(node)) && d(e)) { + majorPluginNodeIds.emplace(node->get_friendly_name()); + } + } + if (std::any_of(std::begin(results), std::end(results), [&] (const FunctionParameter& param) { + return majorPluginNodeIds == param._majorPluginNodeIds; + })) { + continue; + } + results.push_back(FunctionParameter{majorPluginNodeIds, function, dynamic_batch, seed}); + } + } + } + return results; +} + + +std::vector OVHeteroSyntheticTest::_singleMajorNodeFunctions + = OVHeteroSyntheticTest::singleMajorNodeFunctions(builders); + +std::vector OVHeteroSyntheticTest::_randomMajorNodeFunctions + = OVHeteroSyntheticTest::randomMajorNodeFunctions(builders); + +} // namespace behavior +} // namespace test +} // namespace ov From 314eb0c6fb81fd12606298ecefb0e0497fa60425 Mon Sep 17 00:00:00 2001 From: yanlan song Date: Fri, 12 Jan 2024 21:14:59 +0800 Subject: [PATCH 25/43] [GPU] fix null event when retrieving profiling info from unfused subgraph (#22087) --------- Signed-off-by: fishbell --- .../include/intel_gpu/graph/network.hpp | 2 +- .../intel_gpu/src/graph/primitive_inst.cpp | 3 +- .../passes/prepare_primitive_fusing_test.cpp | 45 +++++++++++++++++++ 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp b/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp index 02f57ac21a76ad..3080853d437b3d 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp @@ -123,7 +123,7 @@ struct network { network_output get_output(const primitive_id& output_id) { event::ptr evt; - if (get_stream().get_queue_type() == QueueTypes::out_of_order) + if (get_stream().get_queue_type() == QueueTypes::out_of_order || _enable_profiling) evt = get_primitive_event(output_id); return network_output(evt, get_output_memory(output_id), get_stream_ptr(), get_output_layout(output_id)); } diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 2abae896317657..937179e14b03f2 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -1854,7 +1854,8 @@ cldnn::network::ptr primitive_inst::get_unfused_subgraph() { } ExecutionConfig subgraph_config{ ov::intel_gpu::allow_static_input_reorder(true), - ov::intel_gpu::allow_new_shape_infer(true) + ov::intel_gpu::allow_new_shape_infer(true), + ov::enable_profiling(get_network().get_config().get_property(ov::enable_profiling)) }; auto prog = program::build_program(get_network().get_engine(), t, diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp index a5f1d9e4706b36..f1b34831510925 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_primitive_fusing_test.cpp @@ -604,3 +604,48 @@ TEST(prepare_primitive_fusing, fuse_constant_transposes_accuracy_test) { ASSERT_EQ(output_ptr[i], output_ptr_ref[i]); } } + +TEST(prepare_primitive_fusing, can_profiling_data_when_fuse_illegal) { + auto& engine = get_test_engine(); + auto weights = engine.allocate_memory({ov::PartialShape{2, 10}, data_types::u8, format::bfyx}); + auto in_layout = layout{ov::PartialShape::dynamic(2), data_types::u8, format::bfyx}; + auto in_eltw_layout = layout{ov::PartialShape::dynamic(2), data_types::f32, format::bfyx}; + + set_values(weights, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + + topology topology; + topology.add(data("weights", weights)); + topology.add(input_layout("input", in_layout)); + topology.add(input_layout("extra_input", in_eltw_layout)); + topology.add(fully_connected("fc", input_info("input"), {"weights"}, "", data_types::f32)); + topology.add(eltwise("eltw", {input_info("fc"), input_info("extra_input")}, eltwise_mode::sum)); + topology.add(reorder("reorder", input_info("eltw"), format::bfyx, data_types::f32)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::queue_type(ov::intel_gpu::QueueTypes::in_order)); + config.set_property(ov::intel_gpu::optimize_data(true)); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::enable_profiling(true)); + auto prog = program::build_program(engine, topology, config, false, true); + + layout_optimizer lo(true); + + program_wrapper::apply_opt_pass(*prog, lo); + + ASSERT_NE(prog, nullptr); + ASSERT_FALSE(has_node_with_type(*prog)); + + cldnn::network net(prog, 0); + + auto input_memory = engine.allocate_memory(layout{ov::PartialShape{1, 10}, data_types::u8, format::bfyx}); + auto extra_input_memory = engine.allocate_memory(layout{ov::PartialShape{2, 2}, data_types::f32, format::bfyx}); + set_values(input_memory, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + set_values(extra_input_memory, {10, 20, 30, 40}); + + net.set_input_data("input", input_memory); + net.set_input_data("extra_input", extra_input_memory); + + auto output = net.execute(); + for (auto& iter : output) + ASSERT_NE(iter.second.get_event(), nullptr); +} \ No newline at end of file From 9ebacbb379e6c022cf9f76add4aa4ef405edaa58 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Fri, 12 Jan 2024 14:24:15 +0100 Subject: [PATCH 26/43] [DOCS] Merge Samples Articles Language Versions (#21661) * Merge samples * Update docs/articles_en/learn_openvino/openvino_samples/automatic_speech_recognition.rst Co-authored-by: Maciej Smyk * Update docs/articles_en/learn_openvino/openvino_samples.rst Co-authored-by: Maciej Smyk * Update docs/articles_en/learn_openvino/openvino_samples.rst Co-authored-by: Maciej Smyk * additional resources * Update docs/articles_en/learn_openvino/openvino_samples/model_creation.rst Co-authored-by: Maciej Smyk * Update docs/articles_en/learn_openvino/openvino_samples/automatic_speech_recognition.rst Co-authored-by: Maciej Smyk * reorganize contents of requirements * remove api reference * add links to READMEs on repo * remove speech recognition sample * removal of deprecation notice * update conversion steps * Revert remove speech recognition sample * remove trailing spaces * fix links * Removed unwanted changes from submodules * applying suggestions * update api reference * apply suggestions --------- Co-authored-by: Maciej Smyk --- .../Getting_performance_numbers.rst | 8 +- .../performance_benchmarks_faq.rst | 6 +- .../documentation/openvino_extensibility.rst | 2 +- .../low_precision_transformations.rst | 2 +- .../--installing-model-dev-tools.rst | 6 +- .../graph_construction.rst | 4 +- .../preprocessing.rst | 2 +- .../mxnet_caffe_kaldi/aspire_tdnn_model.rst | 4 +- .../pot_api_examples/pot_example_speech.rst | 2 +- .../pot_examples/pot_cli_example.rst | 4 +- docs/articles_en/get_started.rst | 2 +- .../configurations-for-intel-gna.rst | 6 +- .../installing-openvino-apt.rst | 4 +- ...installing-openvino-from-archive-linux.rst | 4 +- .../installing-openvino-yum.rst | 4 +- ...installing-openvino-from-archive-macos.rst | 4 +- .../installing-openvino-conda.rst | 4 +- ...stalling-openvino-from-archive-windows.rst | 4 +- .../learn_openvino/openvino_samples.rst | 128 ++- .../automatic_speech_recognition.rst | 596 +++++++++++ .../openvino_samples/benchmark_tool.rst | 931 ++++++++++++++++++ .../openvino_samples/bert_benchmark.rst | 69 ++ .../c_sample_hello_classification.rst | 157 --- ...sample_hello_nv12_input_classification.rst | 147 --- .../openvino_samples/cpp_benchmark_tool.rst | 419 -------- ...pp_sample_automatic_speech_recognition.rst | 308 ------ .../cpp_sample_hello_classification.rst | 174 ---- ...sample_hello_nv12_input_classification.rst | 171 ---- .../cpp_sample_hello_query_device.rst | 126 --- .../cpp_sample_hello_reshape_ssd.rst | 162 --- .../cpp_sample_image_classification_async.rst | 222 ----- .../cpp_sample_model_creation.rst | 238 ----- .../cpp_sample_sync_benchmark.rst | 145 --- .../cpp_sample_throughput_benchmark.rst | 150 --- .../openvino_samples/get_started_demos.rst | 247 ++--- .../openvino_samples/hello_classification.rst | 267 +++++ .../hello_nv12_input_classification.rst | 218 ++++ .../openvino_samples/hello_query_device.rst | 191 ++++ .../openvino_samples/hello_reshape_ssd.rst | 213 ++++ .../image_classification_async.rst | 334 +++++++ .../openvino_samples/model_creation.rst | 299 ++++++ .../python_benchmark_tool.rst | 501 ---------- ...on_sample_automatic_speech_recognition.rst | 401 -------- .../python_sample_bert_benchmark.rst | 83 -- .../python_sample_hello_classification.rst | 151 --- .../python_sample_hello_query_device.rst | 121 --- .../python_sample_hello_reshape_ssd.rst | 134 --- ...thon_sample_image_classification_async.rst | 188 ---- .../python_sample_model_creation.rst | 177 ---- .../python_sample_sync_benchmark.rst | 142 --- .../python_sample_throughput_benchmark.rst | 148 --- .../openvino_samples/sync_benchmark.rst | 174 ++++ .../openvino_samples/throughput_benchmark.rst | 179 ++++ .../Device_Plugins.rst | 2 +- .../Device_Plugins/CPU.rst | 2 +- .../Device_Plugins/GNA.rst | 6 +- .../Device_Plugins/GPU.rst | 4 +- .../Device_Plugins/config_properties.rst | 2 +- .../ShapeInference.rst | 2 +- .../dldt_deployment_optimization_common.rst | 2 +- .../performance_hints.rst | 2 +- .../auto_device_selection.rst | 4 +- .../automatic_batching.rst | 2 +- .../inference_modes_overview/multi_device.rst | 2 +- .../ov_infer_request.rst | 2 +- docs/dev/pypi_publish/pypi-openvino-rt.md | 2 +- ...classification-to-openvino-with-output.rst | 2 +- ...2-pytorch-onnx-to-openvino-with-output.rst | 2 +- ...to-openvino-classification-with-output.rst | 2 +- ...105-language-quantize-bert-with-output.rst | 2 +- ...tion-quantization-data2vec-with-output.rst | 2 +- docs/notebooks/108-gpu-device-with-output.rst | 244 +++-- .../109-latency-tricks-with-output.rst | 2 +- .../109-throughput-tricks-with-output.rst | 2 +- ...110-ct-scan-live-inference-with-output.rst | 2 +- ...segmentation-quantize-nncf-with-output.rst | 2 +- ...training-quantization-nncf-with-output.rst | 2 +- ...lassification-quantization-with-output.rst | 2 +- .../119-tflite-to-openvino-with-output.rst | 2 +- ...nter-semantic-segmentation-with-output.rst | 2 +- ...219-knowledge-graphs-conve-with-output.rst | 2 +- ...ss-lingual-books-alignment-with-output.rst | 4 +- .../226-yolov7-optimization-with-output.rst | 2 +- ...lov8-instance-segmentation-with-output.rst | 9 +- ...-yolov8-keypoint-detection-with-output.rst | 9 +- ...30-yolov8-object-detection-with-output.rst | 2 +- .../237-segment-anything-with-output.rst | 6 +- ...238-deep-floyd-if-optimize-with-output.rst | 2 +- ...low-training-openvino-nncf-with-output.rst | 4 +- ...uantization-aware-training-with-output.rst | 2 +- ...uantization-aware-training-with-output.rst | 2 +- samples/c/hello_classification/README.md | 6 +- .../hello_nv12_input_classification/README.md | 6 +- .../cpp/benchmark/sync_benchmark/README.md | 4 +- .../benchmark/throughput_benchmark/README.md | 6 +- samples/cpp/benchmark_app/README.md | 6 +- .../cpp/classification_sample_async/README.md | 4 +- samples/cpp/hello_classification/README.md | 5 +- .../hello_nv12_input_classification/README.md | 6 +- samples/cpp/hello_query_device/README.md | 6 +- samples/cpp/hello_reshape_ssd/README.md | 6 +- samples/cpp/model_creation_sample/README.md | 10 +- .../python/benchmark/bert_benchmark/README.md | 2 +- .../python/benchmark/sync_benchmark/README.md | 4 +- .../benchmark/throughput_benchmark/README.md | 6 +- .../classification_sample_async/README.md | 8 +- samples/python/hello_classification/README.md | 5 +- samples/python/hello_query_device/README.md | 4 +- samples/python/hello_reshape_ssd/README.md | 4 +- .../python/model_creation_sample/README.md | 10 +- .../docs/gpu_plugin_driver_troubleshooting.md | 2 +- tools/benchmark_tool/README.md | 4 +- 112 files changed, 3918 insertions(+), 4927 deletions(-) create mode 100644 docs/articles_en/learn_openvino/openvino_samples/automatic_speech_recognition.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/benchmark_tool.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/bert_benchmark.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_classification.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_automatic_speech_recognition.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_classification.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_nv12_input_classification.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_query_device.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_reshape_ssd.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_image_classification_async.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_model_creation.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/hello_nv12_input_classification.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/hello_query_device.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/hello_reshape_ssd.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/image_classification_async.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/model_creation.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_benchmark_tool.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_bert_benchmark.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.rst delete mode 100644 docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/sync_benchmark.rst create mode 100644 docs/articles_en/learn_openvino/openvino_samples/throughput_benchmark.rst diff --git a/docs/articles_en/about_openvino/performance_benchmarks/Getting_performance_numbers.rst b/docs/articles_en/about_openvino/performance_benchmarks/Getting_performance_numbers.rst index c683119ba28fd3..a10546e3e0a235 100644 --- a/docs/articles_en/about_openvino/performance_benchmarks/Getting_performance_numbers.rst +++ b/docs/articles_en/about_openvino/performance_benchmarks/Getting_performance_numbers.rst @@ -16,9 +16,7 @@ Test performance with the benchmark_app You can run OpenVINO benchmarks in both C++ and Python APIs, yet the experience differs in each case. The Python one is part of OpenVINO Runtime installation, while C++ is available as a code sample. -For a detailed description, see: -* :doc:`benchmark_app for C++ ` -* :doc:`benchmark_app for Python `. +For a detailed description, see: :doc:`benchmark_app `. Make sure to install the latest release package with support for frameworks of the models you want to test. For the most reliable performance benchmarks, :doc:`prepare the model for use with OpenVINO `. @@ -87,7 +85,7 @@ slower than the subsequent ones, an aggregated value can be used for the executi When comparing the OpenVINO Runtime performance with the framework or another reference code, make sure that both versions are as similar as possible: -- Wrap the exact inference execution (for examples, see :doc:`Benchmark app `). +- Wrap the exact inference execution (for examples, see :doc:`Benchmark app `). - Do not include model loading time. - Ensure that the inputs are identical for OpenVINO Runtime and the framework. For example, watch out for random values that can be used to populate the inputs. - In situations when any user-side pre-processing should be tracked separately, consider :doc:`image pre-processing and conversion `. @@ -98,7 +96,7 @@ Internal Inference Performance Counters and Execution Graphs +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ More detailed insights into inference performance breakdown can be achieved with device-specific performance counters and/or execution graphs. -Both :doc:`C++ ` and :doc:`Python ` +Both :doc:`C++ and Python ` versions of the *benchmark_app* support a ``-pc`` command-line parameter that outputs internal execution breakdown. For example, the table shown below is part of performance counters for quantized diff --git a/docs/articles_en/about_openvino/performance_benchmarks/performance_benchmarks_faq.rst b/docs/articles_en/about_openvino/performance_benchmarks/performance_benchmarks_faq.rst index dceaeeac43cb49..0fdc49525dc9f0 100644 --- a/docs/articles_en/about_openvino/performance_benchmarks/performance_benchmarks_faq.rst +++ b/docs/articles_en/about_openvino/performance_benchmarks/performance_benchmarks_faq.rst @@ -31,10 +31,8 @@ Performance Information F.A.Q. All of the performance benchmarks are generated using the open-source tool within the Intel® Distribution of OpenVINO™ toolkit - called ``benchmark_app``. This tool is available - :doc:`for C++ apps `. - as well as - :doc:`for Python apps `. + called :doc:`benchmark_app `. + This tool is available for Python and C++ apps. For a simple instruction on testing performance, see the :doc:`Getting Performance Numbers Guide `. diff --git a/docs/articles_en/documentation/openvino_extensibility.rst b/docs/articles_en/documentation/openvino_extensibility.rst index eaed1bab06d0a5..783c94b6f220cd 100644 --- a/docs/articles_en/documentation/openvino_extensibility.rst +++ b/docs/articles_en/documentation/openvino_extensibility.rst @@ -194,5 +194,5 @@ See Also * :doc:`OpenVINO Transformations ` * :doc:`Using OpenVINO Runtime Samples ` -* :doc:`Hello Shape Infer SSD sample ` +* :doc:`Hello Shape Infer SSD sample ` diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations.rst b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations.rst index c64610f8a09f04..a541ba8d2a7b7c 100644 --- a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations.rst +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/low_precision_transformations.rst @@ -329,7 +329,7 @@ After that you should quantize model by the :doc:`Model Quantizer `. +The simplest way to infer the model and collect performance counters is :doc:`Benchmark Application `. .. code-block:: sh diff --git a/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst b/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst index 66db6e8d245851..55d3494ae789be 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/--installing-model-dev-tools.rst @@ -196,8 +196,8 @@ Try the :doc:`C++ Quick Start Example ` page for other C++ example applications to get you started with OpenVINO, such as: -* :doc:`Basic object detection with the Hello Reshape SSD C++ sample ` -* :doc:`Automatic speech recognition C++ sample ` +* :doc:`Basic object detection with the Hello Reshape SSD C++ sample ` +* :doc:`Automatic speech recognition C++ sample ` Learn OpenVINO Development Tools ++++++++++++++++++++++++++++++++ @@ -205,7 +205,7 @@ Learn OpenVINO Development Tools * Explore a variety of pre-trained deep learning models in the :doc:`Open Model Zoo ` and deploy them in demo applications to see how they work. * Want to import a model from another framework and optimize its performance with OpenVINO? Visit the :doc:`Convert a Model ` page. * Accelerate your model's speed even further with quantization and other compression techniques using :doc:`Neural Network Compression Framework (NNCF) `. -* Benchmark your model's inference speed with one simple command using the :doc:`Benchmark Tool `. +* Benchmark your model's inference speed with one simple command using the :doc:`Benchmark Tool `. Additional Resources #################### diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst index 8a73685e3eaa26..e25951582bc976 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/graph_construction.rst @@ -37,6 +37,4 @@ API 2.0 Additional Resources #################### -* :doc:`Hello Model Creation C++ Sample ` -* :doc:`Hello Model Creation Python Sample ` - +* :doc:`Hello Model Creation Sample ` diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst index ad363a47fff38a..263e6bc32b171b 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/preprocessing.rst @@ -199,5 +199,5 @@ Additional Resources #################### - :doc:`Preprocessing details ` -- :doc:`NV12 classification sample ` +- :doc:`NV12 classification sample ` diff --git a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst index 4cf7e581a85307..0b55973e6a2b57 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mxnet_caffe_kaldi/aspire_tdnn_model.rst @@ -34,7 +34,7 @@ Example: Running ASpIRE Chain TDNN Model with the Speech Recognition Sample .. note:: Before you continue with this part of the article, get familiar with the - :doc:`Speech Recognition sample `. + :doc:`Speech Recognition sample `. In this example, the input data contains one utterance from one speaker. @@ -151,5 +151,5 @@ Run the Speech Recognition sample with the created ivector ``.ark`` file: Results can be decoded as described in "Use of Sample in Kaldi Speech Recognition Pipeline" -in the :doc:`Speech Recognition Sample description ` article. +in the :doc:`Speech Recognition Sample description ` article. diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.rst index 56f153b46652c2..9a1ddc1dee9ade 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.rst @@ -45,5 +45,5 @@ How to Run the Example - ``-s``, ``--subset_size`` option. Defines subset size for calibration; - ``-o``, ``--output`` option. Defines output folder for the quantized model. -3. Validate your INT8 model using ``./speech_example`` from the Inference Engine examples. Follow the :doc:`speech example description link ` for details. +3. Validate your INT8 model using ``./speech_example`` from the Inference Engine examples. Follow the :doc:`speech example description link ` for details. diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.rst index c5fd319d988ef2..0fc5f881f7cafe 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.rst @@ -52,7 +52,7 @@ For more information about Model Conversion API, refer to its :doc:`documentatio Performance Benchmarking of Full-Precision Models ################################################# -Check the performance of the full-precision model in the IR format using :doc:`Deep Learning Benchmark ` tool: +Check the performance of the full-precision model in the IR format using :doc:`Deep Learning Benchmark ` tool: .. code-block:: sh @@ -206,7 +206,7 @@ Model Quantization Performance Benchmarking of Quantized Model ########################################### -Check the performance of the quantized model using :doc:`Deep Learning Benchmark ` tool: +Check the performance of the quantized model using :doc:`Deep Learning Benchmark ` tool: .. code-block:: sh diff --git a/docs/articles_en/get_started.rst b/docs/articles_en/get_started.rst index d985335cd3a574..1ffb9b283143b2 100644 --- a/docs/articles_en/get_started.rst +++ b/docs/articles_en/get_started.rst @@ -123,7 +123,7 @@ Pipeline and model configuration features in OpenVINO Runtime allow you to easil * :doc:`Automatic Batching ` performs on-the-fly grouping of inference requests to maximize utilization of the target hardware’s memory and processing cores. * :doc:`Performance Hints ` automatically adjust runtime parameters to prioritize for low latency or high throughput * :doc:`Dynamic Shapes ` reshapes models to accept arbitrarily-sized inputs, increasing flexibility for applications that encounter different data shapes -* :doc:`Benchmark Tool ` characterizes model performance in various hardware and pipeline configurations +* :doc:`Benchmark Tool ` characterizes model performance in various hardware and pipeline configurations .. _additional-resources: diff --git a/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst b/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst index 5d29334f2427e4..606572e97284af 100644 --- a/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst +++ b/docs/articles_en/get_started/configurations-header/configurations-for-intel-gna.rst @@ -84,8 +84,8 @@ Now you are ready to try out OpenVINO™. You can use the following tutorials to * Developing in C/C++: - * :doc:`Image Classification Async C++ Sample ` - * :doc:`Hello Classification C++ Sample ` - * :doc:`Hello Reshape SSD C++ Sample ` + * :doc:`Image Classification Async C++ Sample ` + * :doc:`Hello Classification C++ Sample ` + * :doc:`Hello Reshape SSD C++ Sample ` diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst index f6b89342525229..d5360a92bb7813 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-apt.rst @@ -238,8 +238,8 @@ Learn more about how to integrate a model in OpenVINO applications by trying out * Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ + * `Basic object detection with the Hello Reshape SSD C++ sample `_ + * `Automatic speech recognition C++ sample `_ You can also try the following: diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst index 9be37b34b50c32..6d304da1350a26 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-from-archive-linux.rst @@ -299,8 +299,8 @@ Learn more about how to integrate a model in OpenVINO applications by trying out Visit the :doc:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - * `Basic object detection with the Hello Reshape SSD C++ sample `__ - * `Automatic speech recognition C++ sample `__ + * `Basic object detection with the Hello Reshape SSD C++ sample `__ + * `Automatic speech recognition C++ sample `__ diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst index 53df2d1fccfff1..0fb0ebf3551b50 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-linux-header/installing-openvino-yum.rst @@ -216,8 +216,8 @@ Learn more about how to integrate a model in OpenVINO applications by trying out * Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ + * `Basic object detection with the Hello Reshape SSD C++ sample `_ + * `Automatic speech recognition C++ sample `_ You can also try the following things: diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst index 05350146dc3301..4f198c3c30f3d4 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-macos-header/installing-openvino-from-archive-macos.rst @@ -173,8 +173,8 @@ Now that you've installed OpenVINO Runtime, you're ready to run your own machine Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ + * `Basic object detection with the Hello Reshape SSD C++ sample `_ + * `Automatic speech recognition C++ sample `_ Uninstalling Intel® Distribution of OpenVINO™ Toolkit ##################################################### diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst index 2eef31241a2b44..cb3045da5aaa45 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-conda.rst @@ -119,8 +119,8 @@ on building and running a basic image classification C++ application. Visit the :doc:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: -* `Basic object detection with the Hello Reshape SSD C++ sample `__ -* `Automatic speech recognition C++ sample `__ +* `Basic object detection with the Hello Reshape SSD C++ sample `__ +* `Automatic speech recognition C++ sample `__ diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst index 1a587d23ec829e..2d164bb09cb56b 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst @@ -199,8 +199,8 @@ Now that you've installed OpenVINO Runtime, you're ready to run your own machine Visit the :ref:`Samples ` page for other C++ example applications to get you started with OpenVINO, such as: - * `Basic object detection with the Hello Reshape SSD C++ sample `_ - * `Automatic speech recognition C++ sample `_ + * `Basic object detection with the Hello Reshape SSD C++ sample `_ + * `Automatic speech recognition C++ sample `_ .. _uninstall-from-windows: diff --git a/docs/articles_en/learn_openvino/openvino_samples.rst b/docs/articles_en/learn_openvino/openvino_samples.rst index 3cff56377bd7fe..a12f1048e4dac6 100644 --- a/docs/articles_en/learn_openvino/openvino_samples.rst +++ b/docs/articles_en/learn_openvino/openvino_samples.rst @@ -7,106 +7,90 @@ OpenVINO™ Samples .. _code samples: .. meta:: - :description: OpenVINO™ samples include a collection of simple console applications - that explain how to implement the capabilities and features of + :description: OpenVINO™ samples include a collection of simple console applications + that explain how to implement the capabilities and features of OpenVINO API into an application. .. toctree:: :maxdepth: 1 :hidden: - + Get Started with C++ Samples - openvino_inference_engine_samples_classification_sample_async_README - openvino_inference_engine_ie_bridges_python_sample_classification_sample_async_README - openvino_inference_engine_samples_hello_classification_README - openvino_inference_engine_ie_bridges_c_samples_hello_classification_README - openvino_inference_engine_ie_bridges_python_sample_hello_classification_README - openvino_inference_engine_samples_hello_reshape_ssd_README - openvino_inference_engine_ie_bridges_python_sample_hello_reshape_ssd_README - openvino_inference_engine_samples_hello_nv12_input_classification_README - openvino_inference_engine_ie_bridges_c_samples_hello_nv12_input_classification_README - openvino_inference_engine_samples_hello_query_device_README - openvino_inference_engine_ie_bridges_python_sample_hello_query_device_README - openvino_inference_engine_samples_model_creation_sample_README - openvino_inference_engine_ie_bridges_python_sample_model_creation_sample_README - openvino_inference_engine_samples_speech_sample_README - openvino_inference_engine_ie_bridges_python_sample_speech_sample_README - openvino_inference_engine_samples_sync_benchmark_README - openvino_inference_engine_ie_bridges_python_sample_sync_benchmark_README - openvino_inference_engine_samples_throughput_benchmark_README - openvino_inference_engine_ie_bridges_python_sample_throughput_benchmark_README - openvino_inference_engine_ie_bridges_python_sample_bert_benchmark_README - openvino_inference_engine_samples_benchmark_app_README - openvino_inference_engine_tools_benchmark_tool_README - - -The OpenVINO™ samples are simple console applications that show how to utilize specific OpenVINO API capabilities within an application. They can assist you in executing specific tasks such as loading a model, running inference, querying specific device capabilities, etc. + openvino_sample_hello_classification + openvino_sample_hello_nv12_input_classification + openvino_sample_hello_query_device + openvino_sample_hello_reshape_ssd + openvino_sample_image_classification_async + openvino_sample_model_creation + openvino_sample_sync_benchmark + openvino_sample_throughput_benchmark + openvino_sample_bert_benchmark + openvino_sample_benchmark_tool + openvino_sample_automatic_speech_recognition + + +The OpenVINO™ samples are simple console applications that show how to utilize +specific OpenVINO API capabilities within an application. They can assist you in +executing specific tasks such as loading a model, running inference, querying +specific device capabilities, etc. The applications include: .. important:: - - All C++ samples support input paths containing only ASCII characters, except for the Hello Classification Sample, which supports Unicode. - -- **Hello Classification Sample** – Inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API. Input of any size and layout can be set to an infer request which will be pre-processed automatically during inference. The sample supports only images as input and supports input paths containing only Unicode characters. - - - :doc:`Python Sample ` - - :doc:`C++ Sample ` - - :doc:`C Sample ` - -- **Hello NV12 Input Classification Sample** – Input of any size and layout can be provided to an infer request. The sample transforms the input to the NV12 color format and pre-process it automatically during inference. The sample supports only images as input. - - - :doc:`C++ Sample ` - - :doc:`C Sample ` -- **Hello Query Device Sample** – Query of available OpenVINO devices and their metrics, configuration values. + All C++ samples support input paths containing only ASCII characters, except + for the Hello Classification Sample, which supports Unicode. - - :doc:`Python* Sample ` - - :doc:`C++ Sample ` +- :doc:`Hello Classification Sample ` - + Inference of image classification networks like AlexNet and GoogLeNet using + Synchronous Inference Request API. Input of any size and layout can be set to + an infer request which will be pre-processed automatically during inference. + The sample supports only images as input and supports input paths containing + only Unicode characters. -- **Hello Reshape SSD Sample** – Inference of SSD networks resized by ShapeInfer API according to an input size. +- :doc:`Hello NV12 Input Classification Sample ` - + Input of any size and layout can be provided to an infer request. The sample + transforms the input to the NV12 color format and pre-process it automatically + during inference. The sample supports only images as input. - - :doc:`Python Sample** ` - - :doc:`C++ Sample** ` +- :doc:`Hello Query Device Sample ` - + Query of available OpenVINO devices and their metrics, configuration values. -- **Image Classification Async Sample** – Inference of image classification networks like AlexNet and GoogLeNet using Asynchronous Inference Request API. The sample supports only images as inputs. +- :doc:`Hello Reshape SSD Sample ` - + Inference of SSD networks resized by ShapeInfer API according to an input size. - - :doc:`Python* Sample ` - - :doc:`C++ Sample ` +- :doc:`Image Classification Async Sample ` - + Inference of image classification networks like AlexNet and GoogLeNet using + Asynchronous Inference Request API. The sample supports only images as inputs. -- **OpenVINO Model Creation Sample** – Construction of the LeNet model using the OpenVINO model creation sample. - - - :doc:`Python Sample ` - - :doc:`C++ Sample ` +- :doc:`OpenVINO Model Creation Sample ` - + Construction of the LeNet model using the OpenVINO model creation sample. - **Benchmark Samples** - Simple estimation of a model inference performance - - :doc:`Sync Python* Sample ` - - :doc:`Sync C++ Sample ` - - :doc:`Throughput Python* Sample ` - - :doc:`Throughput C++ Sample ` - - :doc:`Bert Python* Sample ` - -- **Benchmark Application** – Estimates deep learning inference performance on supported devices for synchronous and asynchronous modes. + - :doc:`Sync Samples ` + - :doc:`Throughput Samples ` + - :doc:`Bert Python Sample ` - - :doc:`Benchmark Python Tool ` +- :doc:`Benchmark Application ` - Estimates deep + learning inference performance on supported devices for synchronous and + asynchronous modes. - - Python version of the benchmark tool is a core component of the OpenVINO installation package and - may be executed with the following command: ``benchmark_app -m -i -d ``. - - :doc:`Benchmark C++ Tool ` + Python version of the benchmark tool is a core component of the OpenVINO + installation package and may be executed with the following command: + .. code-block:: console -- **Automatic Speech Recognition Sample** - ``[DEPRECATED]`` Acoustic model inference based on Kaldi neural networks and speech feature vectors. + benchmark_app -m -i -d - - :doc:`Python Sample ` - - :doc:`C++ Sample ` +- ``[DEPRECATED]`` :doc:`Automatic Speech Recognition Sample ` - + Acoustic model inference based on Kaldi neural networks and + speech feature vectors. -See Also -######## +Additional Resources +#################### * :doc:`Get Started with Samples ` * :doc:`OpenVINO Runtime User Guide ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/automatic_speech_recognition.rst b/docs/articles_en/learn_openvino/openvino_samples/automatic_speech_recognition.rst new file mode 100644 index 00000000000000..3f25d42635741c --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/automatic_speech_recognition.rst @@ -0,0 +1,596 @@ +.. {#openvino_sample_automatic_speech_recognition} + +[DEPRECATED] Automatic Speech Recognition Sample +==================================================== + + + +.. meta:: + :description: Learn how to infer an acoustic model based on Kaldi + neural networks and speech feature vectors using Asynchronous + Inference Request (Python) API. + + +.. note:: + + This sample is now deprecated and will be removed with OpenVINO 2024.0. + The sample was mainly designed to demonstrate the features of the GNA plugin + and the use of models produced by the Kaldi framework. OpenVINO support for + these components is now deprecated and will be discontinued, making the sample + redundant. + + +This sample demonstrates how to do a Synchronous Inference of acoustic model based +on Kaldi neural models and speech feature vectors. + +The sample works with Kaldi ARK or Numpy uncompressed NPZ files, so it does not +cover an end-to-end speech recognition scenario (speech to text), requiring additional +preprocessing (feature extraction) to get a feature vector from a speech signal, +as well as postprocessing (decoding) to produce text from scores. Before using the +sample, refer to the following requirements: + +- The sample accepts any file format supported by ``core.read_model``. +- The sample has been validated with an acoustic model based on Kaldi neural models + (see :ref:`Model Preparation ` section) +- To build the sample, use instructions available at :ref:`Build the Sample Applications ` + section in "Get Started with Samples" guide. + + +How It Works +#################### + +At startup, the sample application reads command-line parameters, loads a specified +model and input data to the OpenVINO™ Runtime plugin, performs synchronous inference +on all speech utterances stored in the input file, logging each step in a standard output stream. + + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. doxygensnippet:: samples/python/speech_sample/speech_sample.py + :language: python + + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/speech_sample/main.cpp + :language: cpp + + +You can see the explicit description ofeach sample step at +:doc:`Integration Steps ` +section of "Integrate OpenVINO™ Runtime with Your Application" guide. + + +GNA-specific details +#################### + +Quantization +++++++++++++++++++++ + +If the GNA device is selected (for example, using the ``-d`` GNA flag), the GNA +OpenVINO™ Runtime plugin quantizes the model and input feature vector sequence +to integer representation before performing inference. + +Several neural model quantization modes: + +- *static* - The first utterance in the input file is scanned for dynamic range. + The scale factor (floating point scalar multiplier) required to scale the maximum + input value of the first utterance to 16384 (15 bits) is used for all subsequent + inputs. The model is quantized to accommodate the scaled input dynamic range. +- *user-defined* - The user may specify a scale factor via the ``-sf`` flag that + will be used for static quantization. + +The ``-qb`` flag provides a hint to the GNA plugin regarding the preferred target weight resolution for all layers. +For example, when ``-qb 8`` is specified, the plugin will use 8-bit weights wherever possible in the +model. + +.. note:: + + It is not always possible to use 8-bit weights due to GNA hardware limitations. + For example, convolutional layers always use 16-bit weights (GNA hardware version + 1 and 2). This limitation will be removed in GNA hardware version 3 and higher. + +.. _execution-modes: + +Execution Modes +++++++++++++++++++++ + +Several execution modes are supported via the ``-d`` flag: + +- ``CPU`` - All calculations are performed on CPU device using CPU Plugin. +- ``GPU`` - All calculations are performed on GPU device using GPU Plugin. +- ``NPU`` - All calculations are performed on NPU device using NPU Plugin. +- ``GNA_AUTO`` - GNA hardware is used if available and the driver is installed. Otherwise, the GNA device is emulated in fast-but-not-bit-exact mode. +- ``GNA_HW`` - GNA hardware is used if available and the driver is installed. Otherwise, an error will occur. +- ``GNA_SW`` - Deprecated. The GNA device is emulated in fast-but-not-bit-exact mode. +- ``GNA_SW_FP32`` - Substitutes parameters and calculations from low precision to floating point (FP32). +- ``GNA_SW_EXACT`` - GNA device is emulated in bit-exact mode. + +Loading and Saving Models ++++++++++++++++++++++++++ + +The GNA plugin supports loading and saving of the GNA-optimized model (non-IR) via the ``-rg`` and ``-wg`` flags. +Thereby, it is possible to avoid the cost of full model quantization at run time. +The GNA plugin also supports export of firmware-compatible embedded model images +for the Intel® Speech Enabling Developer Kit and Amazon Alexa Premium Far-Field +Voice Development Kit via the ``-we`` flag (save only). + +In addition to performing inference directly from a GNA model file, these options make it possible to: + +- Convert from IR format to GNA format model file (``-m``, ``-wg``) +- Convert from IR format to embedded format model file (``-m``, ``-we``) +- Convert from GNA format to embedded format model file (``-rg``, ``-we``) + +Running +#################### + +Run the application with the ``-h`` option to see the usage message: + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python speech_sample.py -h + + Usage message: + + .. code-block:: console + + usage: speech_sample.py [-h] (-m MODEL | -rg IMPORT_GNA_MODEL) -i INPUT [-o OUTPUT] [-r REFERENCE] [-d DEVICE] [-bs [1-8]] + [-layout LAYOUT] [-qb [8, 16]] [-sf SCALE_FACTOR] [-wg EXPORT_GNA_MODEL] + [-we EXPORT_EMBEDDED_GNA_MODEL] [-we_gen [GNA1, GNA3]] + [--exec_target [GNA_TARGET_2_0, GNA_TARGET_3_0]] [-pc] [-a [CORE, ATOM]] [-iname INPUT_LAYERS] + [-oname OUTPUT_LAYERS] [-cw_l CONTEXT_WINDOW_LEFT] [-cw_r CONTEXT_WINDOW_RIGHT] [-pwl_me PWL_ME] + + optional arguments: + -m MODEL, --model MODEL + Path to an .xml file with a trained model (required if -rg is missing). + -rg IMPORT_GNA_MODEL, --import_gna_model IMPORT_GNA_MODEL + Read GNA model from file using path/filename provided (required if -m is missing). + + Options: + -h, --help Show this help message and exit. + -i INPUT, --input INPUT + Required. Path(s) to input file(s). + Usage for a single file/layer: or . + Example of usage for several files/layers: :=,:=. + -o OUTPUT, --output OUTPUT + Optional. Output file name(s) to save scores (inference results). + Usage for a single file/layer: or . + Example of usage for several files/layers: :=,:=. + -r REFERENCE, --reference REFERENCE + Read reference score file(s) and compare inference results with reference scores. + Usage for a single file/layer: or . + Example of usage for several files/layers: :=,:=. + -d DEVICE, --device DEVICE + Optional. Specify a target device to infer on. CPU, GPU, NPU, GNA_AUTO, GNA_HW, GNA_SW_FP32, + GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g. + HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified. + Default value is CPU. + -bs [1-8], --batch_size [1-8] + Optional. Batch size 1-8. + -layout LAYOUT Optional. Custom layout in format: "input0[value0],input1[value1]" or "[value]" (applied to all + inputs) + -qb [8, 16], --quantization_bits [8, 16] + Optional. Weight resolution in bits for GNA quantization: 8 or 16 (default 16). + -sf SCALE_FACTOR, --scale_factor SCALE_FACTOR + Optional. User-specified input scale factor for GNA quantization. + If the model contains multiple inputs, provide scale factors by separating them with commas. + For example: :,: or just to be applied to all inputs. + -wg EXPORT_GNA_MODEL, --export_gna_model EXPORT_GNA_MODEL + Optional. Write GNA model to file using path/filename provided. + -we EXPORT_EMBEDDED_GNA_MODEL, --export_embedded_gna_model EXPORT_EMBEDDED_GNA_MODEL + Optional. Write GNA embedded model to file using path/filename provided. + -we_gen [GNA1, GNA3], --embedded_gna_configuration [GNA1, GNA3] + Optional. GNA generation configuration string for embedded export. Can be GNA1 (default) or GNA3. + --exec_target [GNA_TARGET_2_0, GNA_TARGET_3_0] + Optional. Specify GNA execution target generation. By default, generation corresponds to the GNA HW + available in the system or the latest fully supported generation by the software. See the GNA + Plugin's GNA_EXEC_TARGET config option description. + -pc, --performance_counter + Optional. Enables performance report (specify -a to ensure arch accurate results). + -a [CORE, ATOM], --arch [CORE, ATOM] + Optional. Specify architecture. CORE, ATOM with the combination of -pc. + -cw_l CONTEXT_WINDOW_LEFT, --context_window_left CONTEXT_WINDOW_LEFT + Optional. Number of frames for left context windows (default is 0). Works only with context window + models. If you use the cw_l or cw_r flag, then batch size argument is ignored. + -cw_r CONTEXT_WINDOW_RIGHT, --context_window_right CONTEXT_WINDOW_RIGHT + Optional. Number of frames for right context windows (default is 0). Works only with context window + models. If you use the cw_l or cw_r flag, then batch size argument is ignored. + -pwl_me PWL_ME Optional. The maximum percent of error for PWL function. The value must be in <0, 100> range. The + default value is 1.0. + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + speech_sample -h + + Usage message: + + .. code-block:: console + + [ INFO ] OpenVINO Runtime version ......... + [ INFO ] Build ........... + [ INFO ] + [ INFO ] Parsing input parameters + + speech_sample [OPTION] + Options: + + -h Print a usage message. + -i "" Required. Path(s) to input file(s). Usage for a single file/layer: or . Example of usage for several files/layers: :=,:=. + -m "" Required. Path to an .xml file with a trained model (required if -rg is missing). + -o "" Optional. Output file name(s) to save scores (inference results). Example of usage for a single file/layer: or . Example of usage for several files/layers: :=,:=. + -d "" Optional. Specify a target device to infer on. CPU, GPU, NPU, GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g. HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified. + -pc Optional. Enables per-layer performance report. + -q "" Optional. Input quantization mode for GNA: static (default) or user defined (use with -sf). + -qb "" Optional. Weight resolution in bits for GNA quantization: 8 or 16 (default) + -sf "" Optional. User-specified input scale factor for GNA quantization (use with -q user). If the model contains multiple inputs, provide scale factors by separating them with commas. For example: :,: or just to be applied to all inputs. + -bs "" Optional. Batch size 1-8 (default 1) + -r "" Optional. Read reference score file(s) and compare inference results with reference scores. Usage for a single file/layer: or . Example of usage for several files/layers: :=,:=. + -rg "" Read GNA model from file using path/filename provided (required if -m is missing). + -wg "" Optional. Write GNA model to file using path/filename provided. + -we "" Optional. Write GNA embedded model to file using path/filename provided. + -cw_l "" Optional. Number of frames for left context windows (default is 0). Works only with context window networks. If you use the cw_l or cw_r flag, then batch size argument is ignored. + -cw_r "" Optional. Number of frames for right context windows (default is 0). Works only with context window networks. If you use the cw_r or cw_l flag, then batch size argument is ignored. + -layout "" Optional. Prompts how network layouts should be treated by application. For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size. + -pwl_me "" Optional. The maximum percent of error for PWL function.The value must be in <0, 100> range. The default value is 1.0. + -exec_target "" Optional. Specify GNA execution target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_EXEC_TARGET config option description. + -compile_target "" Optional. Specify GNA compile target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_COMPILE_TARGET config option description. + -memory_reuse_off Optional. Disables memory optimizations for compiled model. + + Available target devices: CPU GNA GPU NPU + + + +.. _model-preparation-speech: + +Model Preparation +#################### + +You can use the following model conversion command to convert a Kaldi nnet1 or nnet2 model to OpenVINO Intermediate Representation (IR) format: + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + mo --framework kaldi --input_model wsj_dnn5b.nnet --counts wsj_dnn5b.counts --remove_output_softmax --output_dir + + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + mo --framework kaldi --input_model wsj_dnn5b.nnet --counts wsj_dnn5b.counts --remove_output_softmax --output_dir + + +The following pre-trained models are available: + +- ``rm_cnn4a_smbr`` +- ``rm_lstm4f`` +- ``wsj_dnn5b_smbr`` + +All of them can be downloaded from `the storage `__ . + +Speech Inference +#################### + +Once the IR has been created, you can do inference on Intel® Processors with the GNA co-processor (or emulation library): + + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python speech_sample.py -m wsj_dnn5b.xml -i dev93_10.ark -r dev93_scores_10.ark -d GNA_AUTO -o result.npz + + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + speech_sample -m wsj_dnn5b.xml -i dev93_10.ark -r dev93_scores_10.ark -d GNA_AUTO -o result.ark + + Here, the floating point Kaldi-generated reference neural network scores (``dev93_scores_10.ark``) corresponding to the input feature file (``dev93_10.ark``) are assumed to be available for comparison. + +.. note:: + + - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API `. + - The sample supports input and output in numpy file format (.npz) + - When you specify single options multiple times, only the last value will be used. For example, the ``-m`` flag: + + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python classification_sample_async.py -m model.xml -m model2.xml + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + ./speech_sample -m model.xml -m model2.xml + + +Sample Output +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + The sample application logs each step in a standard output stream. + + .. code-block:: console + + [ INFO ] Creating OpenVINO Runtime Core + [ INFO ] Reading the model: /models/wsj_dnn5b_smbr_fp32.xml + [ INFO ] Using scale factor(s) calculated from first utterance + [ INFO ] For input 0 using scale factor of 2175.4322418 + [ INFO ] Loading the model to the plugin + [ INFO ] Starting inference in synchronous mode + [ INFO ] + [ INFO ] Utterance 0: + [ INFO ] Total time in Infer (HW and SW): 6326.06ms + [ INFO ] Frames in utterance: 1294 + [ INFO ] Average Infer time per frame: 4.89ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.7051840 + [ INFO ] avg error: 0.0448388 + [ INFO ] avg rms error: 0.0582387 + [ INFO ] stdev error: 0.0371650 + [ INFO ] + [ INFO ] Utterance 1: + [ INFO ] Total time in Infer (HW and SW): 4526.57ms + [ INFO ] Frames in utterance: 1005 + [ INFO ] Average Infer time per frame: 4.50ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.7575974 + [ INFO ] avg error: 0.0452166 + [ INFO ] avg rms error: 0.0586013 + [ INFO ] stdev error: 0.0372769 + [ INFO ] + [ INFO ] Utterance 2: + [ INFO ] Total time in Infer (HW and SW): 6636.56ms + [ INFO ] Frames in utterance: 1471 + [ INFO ] Average Infer time per frame: 4.51ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.7191710 + [ INFO ] avg error: 0.0472226 + [ INFO ] avg rms error: 0.0612991 + [ INFO ] stdev error: 0.0390846 + [ INFO ] + [ INFO ] Utterance 3: + [ INFO ] Total time in Infer (HW and SW): 3927.01ms + [ INFO ] Frames in utterance: 845 + [ INFO ] Average Infer time per frame: 4.65ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.7436461 + [ INFO ] avg error: 0.0477581 + [ INFO ] avg rms error: 0.0621334 + [ INFO ] stdev error: 0.0397457 + [ INFO ] + [ INFO ] Utterance 4: + [ INFO ] Total time in Infer (HW and SW): 3891.49ms + [ INFO ] Frames in utterance: 855 + [ INFO ] Average Infer time per frame: 4.55ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.7071600 + [ INFO ] avg error: 0.0449147 + [ INFO ] avg rms error: 0.0585048 + [ INFO ] stdev error: 0.0374897 + [ INFO ] + [ INFO ] Utterance 5: + [ INFO ] Total time in Infer (HW and SW): 3378.61ms + [ INFO ] Frames in utterance: 699 + [ INFO ] Average Infer time per frame: 4.83ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.8870468 + [ INFO ] avg error: 0.0479243 + [ INFO ] avg rms error: 0.0625490 + [ INFO ] stdev error: 0.0401951 + [ INFO ] + [ INFO ] Utterance 6: + [ INFO ] Total time in Infer (HW and SW): 4034.31ms + [ INFO ] Frames in utterance: 790 + [ INFO ] Average Infer time per frame: 5.11ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.7648273 + [ INFO ] avg error: 0.0482702 + [ INFO ] avg rms error: 0.0629734 + [ INFO ] stdev error: 0.0404429 + [ INFO ] + [ INFO ] Utterance 7: + [ INFO ] Total time in Infer (HW and SW): 2854.04ms + [ INFO ] Frames in utterance: 622 + [ INFO ] Average Infer time per frame: 4.59ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.7389560 + [ INFO ] avg error: 0.0465543 + [ INFO ] avg rms error: 0.0604941 + [ INFO ] stdev error: 0.0386294 + [ INFO ] + [ INFO ] Utterance 8: + [ INFO ] Total time in Infer (HW and SW): 2493.28ms + [ INFO ] Frames in utterance: 548 + [ INFO ] Average Infer time per frame: 4.55ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.6680136 + [ INFO ] avg error: 0.0439341 + [ INFO ] avg rms error: 0.0574614 + [ INFO ] stdev error: 0.0370353 + [ INFO ] + [ INFO ] Utterance 9: + [ INFO ] Total time in Infer (HW and SW): 1654.67ms + [ INFO ] Frames in utterance: 368 + [ INFO ] Average Infer time per frame: 4.50ms + [ INFO ] + [ INFO ] Output blob name: affinetransform14 + [ INFO ] Number scores per frame: 3425 + [ INFO ] + [ INFO ] max error: 0.6550579 + [ INFO ] avg error: 0.0467643 + [ INFO ] avg rms error: 0.0605045 + [ INFO ] stdev error: 0.0383914 + [ INFO ] + [ INFO ] Total sample time: 39722.60ms + [ INFO ] File result.npz was created! + [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool + + + .. tab-item:: C++ + :sync: cpp + + The sample application logs each step in a standard output stream. + + .. code-block:: console + + [ INFO ] OpenVINO runtime: OpenVINO Runtime version ......... 2022.1.0 + [ INFO ] Build ........... 2022.1.0-6311-a90bb1ff017 + [ INFO ] + [ INFO ] Parsing input parameters + [ INFO ] Loading model files: + [ INFO ] \test_data\models\wsj_dnn5b_smbr_fp32\wsj_dnn5b_smbr_fp32.xml + [ INFO ] Using scale factor of 2175.43 calculated from first utterance. + [ INFO ] Model loading time 0.0034 ms + [ INFO ] Loading model to the device GNA_AUTO + [ INFO ] Loading model to the device + [ INFO ] Number scores per frame : 3425 + Utterance 0: + Total time in Infer (HW and SW): 5687.53 ms + Frames in utterance: 1294 frames + Average Infer time per frame: 4.39531 ms + max error: 0.705184 + avg error: 0.0448388 + avg rms error: 0.0574098 + stdev error: 0.0371649 + + + End of Utterance 0 + + [ INFO ] Number scores per frame : 3425 + Utterance 1: + Total time in Infer (HW and SW): 4341.34 ms + Frames in utterance: 1005 frames + Average Infer time per frame: 4.31974 ms + max error: 0.757597 + avg error: 0.0452166 + avg rms error: 0.0578436 + stdev error: 0.0372769 + + + End of Utterance 1 + + ... + End of Utterance X + + [ INFO ] Execution successful + + +Use of C++ Sample in Kaldi Speech Recognition Pipeline +###################################################### + +The Wall Street Journal DNN model used in this example was prepared using the +Kaldi s5 recipe and the Kaldi Nnet (nnet1) framework. It is possible to recognize +speech by substituting the ``speech_sample`` for Kaldi's nnet-forward command. +Since the ``speech_sample`` does not yet use pipes, it is necessary to use temporary +files for speaker-transformed feature vectors and scores when running the Kaldi +speech recognition pipeline. The following operations assume that feature extraction +was already performed according to the ``s5`` recipe and that the working directory +within the Kaldi source tree is ``egs/wsj/s5``. + +1. Prepare a speaker-transformed feature set, given that the feature transform + is specified in ``final.feature_transform`` and the feature files are specified in ``feats.scp``: + + .. code-block:: console + + nnet-forward --use-gpu=no final.feature_transform "ark,s,cs:copy-feats scp:feats.scp ark:- |" ark:feat.ark + +2. Score the feature set, using the ``speech_sample``: + + .. code-block:: console + + ./speech_sample -d GNA_AUTO -bs 8 -i feat.ark -m wsj_dnn5b.xml -o scores.ark + + OpenVINO™ toolkit Intermediate Representation ``wsj_dnn5b.xml`` file was + generated in the previous :ref:`Model Preparation ` section. + +3. Run the Kaldi decoder to produce n-best text hypotheses and select most likely + text, given that the WFST (``HCLG.fst``), vocabulary (``words.txt``), and + TID/PID mapping (``final.mdl``) are specified: + + .. code-block:: console + + latgen-faster-mapped --max-active=7000 --max-mem=50000000 --beam=13.0 --lattice-beam=6.0 --acoustic-scale=0.0833 --allow-partial=true --word-symbol-table=words.txt final.mdl HCLG.fst ark:scores.ark ark:-| lattice-scale --inv-acoustic-scale=13 ark:- ark:- | lattice-best-path --word-symbol-table=words.txt ark:- ark,t:- > out.txt & + +4. Run the word error rate tool to check accuracy, given that the vocabulary + (``words.txt``) and reference transcript (``test_filt.txt``) are specified: + + .. code-block:: console + + cat out.txt | utils/int2sym.pl -f 2- words.txt | sed s:\::g | compute-wer --text --mode=present ark:test_filt.txt ark,p:- + + All of the files can be downloaded from `the storage `__ + + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO™ Toolkit Samples ` +- :doc:`Convert a Model ` diff --git a/docs/articles_en/learn_openvino/openvino_samples/benchmark_tool.rst b/docs/articles_en/learn_openvino/openvino_samples/benchmark_tool.rst new file mode 100644 index 00000000000000..aaf029273f75fb --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/benchmark_tool.rst @@ -0,0 +1,931 @@ +.. {#openvino_sample_benchmark_tool} + +Benchmark Tool +==================== + + +.. meta:: + :description: Learn how to use the Benchmark Tool (Python, C++) to + estimate deep learning inference performance on supported + devices. + + +This page demonstrates how to use the Benchmark Tool to estimate deep learning inference performance on supported devices. + +.. note:: + + The Python version is recommended for benchmarking models that will be used + in Python applications, and the C++ version is recommended for benchmarking + models that will be used in C++ applications. Both tools have a similar + command interface and backend. + + +Basic Usage +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + The Python ``benchmark_app`` is automatically installed when you install OpenVINO + using :doc:`PyPI `. + Before running ``benchmark_app``, make sure the ``openvino_env`` virtual + environment is activated, and navigate to the directory where your model is located. + + The benchmarking application works with models in the OpenVINO IR + (``model.xml`` and ``model.bin``) and ONNX (``model.onnx``) formats. + Make sure to :doc:`convert your models ` + if necessary. + + To run benchmarking with default options on a model, use the following command: + + .. code-block:: sh + + benchmark_app -m model.xml + + .. tab-item:: C++ + :sync: cpp + + To use the C++ ``benchmark_app``, you must first build it following the + :ref:`Build the Sample Applications ` instructions and + then set up paths and environment variables by following the + :doc:`Get Ready for Running the Sample Applications ` + instructions. Navigate to the directory where the ``benchmark_app`` C++ sample binary was built. + + .. note:: + + If you installed OpenVINO Runtime using PyPI or Anaconda Cloud, only the + :doc:`Benchmark Python Tool ` is available, + and you should follow the usage instructions on that page instead. + + The benchmarking application works with models in the OpenVINO IR, TensorFlow, + TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, + OpenVINO also allows you to :doc:`convert your models `. + + To run benchmarking with default options on a model, use the following command: + + .. code-block:: sh + + ./benchmark_app -m model.xml + + +By default, the application will load the specified model onto the CPU and perform +inference on batches of randomly-generated data inputs for 60 seconds. As it loads, +it prints information about the benchmark parameters. When benchmarking is completed, +it reports the minimum, average, and maximum inference latency and the average throughput. + +You may be able to improve benchmark results beyond the default configuration by +configuring some of the execution parameters for your model. For example, you can +use "throughput" or "latency" performance hints to optimize the runtime for higher +FPS or reduced inference time. Read on to learn more about the configuration +options available with ``benchmark_app``. + + + + + +Configuration Options +##################### + +The benchmark app provides various options for configuring execution parameters. +This section covers key configuration options for easily tuning benchmarking to +achieve better performance on your device. A list of all configuration options +is given in the :ref:`Advanced Usage ` section. + +Performance hints: latency and throughput ++++++++++++++++++++++++++++++++++++++++++ + +The benchmark app allows users to provide high-level "performance hints" for +setting latency-focused or throughput-focused inference modes. This hint causes +the runtime to automatically adjust runtime parameters, such as the number of +processing streams and inference batch size, to prioritize for reduced latency +or high throughput. + +The performance hints do not require any device-specific settings and they are +completely portable between devices. Parameters are automatically configured +based on whichever device is being used. This allows users to easily port +applications between hardware targets without having to re-determine the best +runtime parameters for the new device. + +If not specified, throughput is used as the default. To set the hint explicitly, +use ``-hint latency`` or ``-hint throughput`` when running ``benchmark_app``: + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + benchmark_app -m model.xml -hint latency + benchmark_app -m model.xml -hint throughput + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + ./benchmark_app -m model.xml -hint latency + ./benchmark_app -m model.xml -hint throughput + +.. note:: + + It is up to the user to ensure the environment on which the benchmark is running is optimized for maximum performance. Otherwise, different results may occur when using the application in different environment settings (such as power optimization settings, processor overclocking, thermal throttling). + When you specify single options multiple times, only the last value will be used. For example, the ``-m`` flag: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + benchmark_app -m model.xml -m model2.xml + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + ./benchmark_app -m model.xml -m model2.xml + + + +Latency +-------------------- + +Latency is the amount of time it takes to process a single inference request. +In applications where data needs to be inferenced and acted on as quickly as +possible (such as autonomous driving), low latency is desirable. For conventional +devices, lower latency is achieved by reducing the amount of parallel processing +streams so the system can utilize as many resources as possible to quickly calculate +each inference request. However, advanced devices like multi-socket CPUs and modern +GPUs are capable of running multiple inference requests while delivering the same latency. + +When ``benchmark_app`` is run with ``-hint latency``, it determines the optimal number +of parallel inference requests for minimizing latency while still maximizing the +parallelization capabilities of the hardware. It automatically sets the number of +processing streams and inference batch size to achieve the best latency. + +Throughput +-------------------- + +Throughput is the amount of data an inference pipeline can process at once, and +it is usually measured in frames per second (FPS) or inferences per second. In +applications where large amounts of data needs to be inferenced simultaneously +(such as multi-camera video streams), high throughput is needed. To achieve high +throughput, the runtime focuses on fully saturating the device with enough data +to process. It utilizes as much memory and as many parallel streams as possible +to maximize the amount of data that can be processed simultaneously. + +When ``benchmark_app`` is run with ``-hint throughput``, it maximizes the number of +parallel inference requests to utilize all the threads available on the device. +On GPU, it automatically sets the inference batch size to fill up the GPU memory available. + +For more information on performance hints, see the +:doc:`High-level Performance Hints ` page. +For more details on optimal runtime configurations and how they are automatically +determined using performance hints, see +:doc:`Runtime Inference Optimizations `. + + +Device +++++++++++++++++++++ + +To set which device benchmarking runs on, use the ``-d `` argument. This +will tell ``benchmark_app`` to run benchmarking on that specific device. The benchmark +app supports CPU, GPU, and GNA devices. In order to use GPU, the system +must have the appropriate drivers installed. If no device is specified, ``benchmark_app`` +will default to using ``CPU``. + +For example, to run benchmarking on GPU, use: + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + benchmark_app -m model.xml -d GPU + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + ./benchmark_app -m model.xml -d GPU + + +You may also specify ``AUTO`` as the device, in which case the ``benchmark_app`` will +automatically select the best device for benchmarking and support it with the +CPU at the model loading stage. This may result in increased performance, thus, +should be used purposefully. For more information, see the +:doc:`Automatic device selection ` page. + +.. note:: + + If the latency or throughput hint is set, it will automatically configure streams + and batch sizes for optimal performance based on the specified device.) + +Number of iterations +++++++++++++++++++++ + +By default, the benchmarking app will run for a predefined duration, repeatedly +performing inference with the model and measuring the resulting inference speed. +There are several options for setting the number of inference iterations: + +* Explicitly specify the number of iterations the model runs, using the + ``-niter `` option. +* Set how much time the app runs for, using the ``-t `` option. +* Set both of them (execution will continue until both conditions are met). +* If neither ``-niter`` nor ``-t`` are specified, the app will run for a + predefined duration that depends on the device. + +The more iterations a model runs, the better the statistics will be for determining +average latency and throughput. + +Inputs +++++++++++++++++++++ + +The benchmark tool runs benchmarking on user-provided input images in +``.jpg``, ``.bmp``, or ``.png`` formats. Use ``-i `` to specify +the path to an image or a folder of images. For example, to run benchmarking on +an image named ``test1.jpg``, use: + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + benchmark_app -m model.xml -i test1.jpg + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: sh + + ./benchmark_app -m model.xml -i test1.jpg + + +The tool will repeatedly loop through the provided inputs and run inference on +them for the specified amount of time or a number of iterations. If the ``-i`` +flag is not used, the tool will automatically generate random data to fit the +input shape of the model. + +Examples +++++++++++++++++++++ + +For more usage examples (and step-by-step instructions on how to set up a model for benchmarking), +see the :ref:`Examples of Running the Tool ` section. + +.. _advanced-usage-benchmark: + +Advanced Usage +#################### + +.. note:: + + By default, OpenVINO samples, tools and demos expect input with BGR channels + order. If you trained your model to work with RGB order, you need to manually + rearrange the default channel order in the sample or demo application or reconvert + your model using model conversion API with ``reverse_input_channels`` argument + specified. For more information about the argument, refer to When to Reverse + Input Channels section of Converting a Model to Intermediate Representation (IR). + + +Per-layer performance and logging ++++++++++++++++++++++++++++++++++ + +The application also collects per-layer Performance Measurement (PM) counters for +each executed infer request if you enable statistics dumping by setting the +``-report_type`` parameter to one of the possible values: + +* ``no_counters`` report includes configuration options specified, resulting + FPS and latency. +* ``average_counters`` report extends the ``no_counters`` report and additionally + includes average PM counters values for each layer from the network. +* ``detailed_counters`` report extends the ``average_counters`` report and + additionally includes per-layer PM counters and latency for each executed infer request. + +Depending on the type, the report is stored to ``benchmark_no_counters_report.csv``, +``benchmark_average_counters_report.csv``, or ``benchmark_detailed_counters_report.csv`` +file located in the path specified in ``-report_folder``. The application also +saves executable graph information serialized to an XML file if you specify a +path to it with the ``-exec_graph_path`` parameter. + +.. _all-configuration-options-python-benchmark: + +All configuration options ++++++++++++++++++++++++++ + +Running the application with the ``-h`` or ``--help`` option yields the +following usage message: + + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. code-block:: sh + + [Step 1/11] Parsing and validating input arguments + [ INFO ] Parsing input parameters + usage: benchmark_app.py [-h [HELP]] [-i PATHS_TO_INPUT [PATHS_TO_INPUT ...]] -m PATH_TO_MODEL [-d TARGET_DEVICE] + [-hint {throughput,cumulative_throughput,latency,none}] [-niter NUMBER_ITERATIONS] [-t TIME] [-b BATCH_SIZE] [-shape SHAPE] + [-data_shape DATA_SHAPE] [-layout LAYOUT] [-extensions EXTENSIONS] [-c PATH_TO_CLDNN_CONFIG] [-cdir CACHE_DIR] [-lfile [LOAD_FROM_FILE]] + [-api {sync,async}] [-nireq NUMBER_INFER_REQUESTS] [-nstreams NUMBER_STREAMS] [-inference_only [INFERENCE_ONLY]] + [-infer_precision INFER_PRECISION] [-ip {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64}] + [-op {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64}] [-iop INPUT_OUTPUT_PRECISION] [--mean_values [R,G,B]] [--scale_values [R,G,B]] + [-nthreads NUMBER_THREADS] [-pin {YES,NO,NUMA,HYBRID_AWARE}] [-latency_percentile LATENCY_PERCENTILE] + [-report_type {no_counters,average_counters,detailed_counters}] [-report_folder REPORT_FOLDER] [-pc [PERF_COUNTS]] + [-pcsort {no_sort,sort,simple_sort}] [-pcseq [PCSEQ]] [-exec_graph_path EXEC_GRAPH_PATH] [-dump_config DUMP_CONFIG] [-load_config LOAD_CONFIG] + + Options: + -h [HELP], --help [HELP] + Show this help message and exit. + + -i PATHS_TO_INPUT [PATHS_TO_INPUT ...], --paths_to_input PATHS_TO_INPUT [PATHS_TO_INPUT ...] + Optional. Path to a folder with images and/or binaries or to specific image or binary file.It is also allowed to map files to model inputs: + input_1:file_1/dir1,file_2/dir2,input_4:file_4/dir4 input_2:file_3/dir3 Currently supported data types: bin, npy. If OPENCV is enabled, this + functionalityis extended with the following data types: bmp, dib, jpeg, jpg, jpe, jp2, png, pbm, pgm, ppm, sr, ras, tiff, tif. + + -m PATH_TO_MODEL, --path_to_model PATH_TO_MODEL + Required. Path to an .xml/.onnx file with a trained model or to a .blob file with a trained compiled model. + + -d TARGET_DEVICE, --target_device TARGET_DEVICE + Optional. Specify a target device to infer on (the list of available devices is shown below). Default value is CPU. Use '-d HETERO:' format to specify HETERO plugin. Use '-d MULTI:' format to specify MULTI plugin. The + application looks for a suitable plugin for the specified device. + + -hint {throughput,cumulative_throughput,latency,none}, --perf_hint {throughput,cumulative_throughput,latency,none} + Optional. Performance hint (latency or throughput or cumulative_throughput or none). Performance hint allows the OpenVINO device to select the + right model-specific settings. 'throughput': device performance mode will be set to THROUGHPUT. 'cumulative_throughput': device performance + mode will be set to CUMULATIVE_THROUGHPUT. 'latency': device performance mode will be set to LATENCY. 'none': no device performance mode will + be set. Using explicit 'nstreams' or other device-specific options, please set hint to 'none' + + -niter NUMBER_ITERATIONS, --number_iterations NUMBER_ITERATIONS + Optional. Number of iterations. If not specified, the number of iterations is calculated depending on a device. + + -t TIME, --time TIME Optional. Time in seconds to execute topology. + + -api {sync,async}, --api_type {sync,async} + Optional. Enable using sync/async API. Default value is async. + + + Input shapes: + -b BATCH_SIZE, --batch_size BATCH_SIZE + Optional. Batch size value. If not specified, the batch size value is determined from Intermediate Representation + + -shape SHAPE Optional. Set shape for input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size. This parameter + affect model Parameter shape, can be dynamic. For dynamic dimesions use symbol `?`, `-1` or range `low.. up`. + + -data_shape DATA_SHAPE + Optional. Optional if model shapes are all static (original ones or set by -shape).Required if at least one input shape is dynamic and input + images are not provided.Set shape for input tensors. For example, "input1[1,3,224,224][1,3,448,448],input2[1,4][1,8]" or + "[1,3,224,224][1,3,448,448] in case of one input size. + + -layout LAYOUT Optional. Prompts how model layouts should be treated by application. For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input + size. + + + Advanced options: + -extensions EXTENSIONS, --extensions EXTENSIONS + Optional. Path or a comma-separated list of paths to libraries (.so or .dll) with extensions. + + -c PATH_TO_CLDNN_CONFIG, --path_to_cldnn_config PATH_TO_CLDNN_CONFIG + Optional. Required for GPU custom kernels. Absolute path to an .xml file with the kernels description. + + -cdir CACHE_DIR, --cache_dir CACHE_DIR + Optional. Enable model caching to specified directory + + -lfile [LOAD_FROM_FILE], --load_from_file [LOAD_FROM_FILE] + Optional. Loads model from file directly without read_model. + + -nireq NUMBER_INFER_REQUESTS, --number_infer_requests NUMBER_INFER_REQUESTS + Optional. Number of infer requests. Default value is determined automatically for device. + + -nstreams NUMBER_STREAMS, --number_streams NUMBER_STREAMS + Optional. Number of streams to use for inference on the CPU/GPU (for HETERO and MULTI device cases use format + :,: or just ). Default value is determined automatically for a device. Please note that + although the automatic selection usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very + small models. Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency estimations the number of streams + should be set to 1. See samples README for more details. + + -inference_only [INFERENCE_ONLY], --inference_only [INFERENCE_ONLY] + Optional. If true inputs filling only once before measurements (default for static models), else inputs filling is included into loop + measurement (default for dynamic models) + + -infer_precision INFER_PRECISION + Optional. Specifies the inference precision. Example #1: '-infer_precision bf16'. Example #2: '-infer_precision CPU:bf16,GPU:f32' + + -exec_graph_path EXEC_GRAPH_PATH, --exec_graph_path EXEC_GRAPH_PATH + Optional. Path to a file where to store executable graph information serialized. + + + Preprocessing options: + -ip {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64}, --input_precision {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64} + Optional. Specifies precision for all input layers of the model. + + -op {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64}, --output_precision {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64} + Optional. Specifies precision for all output layers of the model. + + -iop INPUT_OUTPUT_PRECISION, --input_output_precision INPUT_OUTPUT_PRECISION + Optional. Specifies precision for input and output layers by name. Example: -iop "input:f16, output:f16". Notice that quotes are required. + Overwrites precision from ip and op options for specified layers. + + --mean_values [R,G,B] + Optional. Mean values to be used for the input image per channel. Values to be provided in the [R,G,B] format. Can be defined for desired input + of the model, for example: "--mean_values data[255,255,255],info[255,255,255]". The exact meaning and order of channels depend on how the + original model was trained. Applying the values affects performance and may cause type conversion + + --scale_values [R,G,B] + Optional. Scale values to be used for the input image per channel. Values are provided in the [R,G,B] format. Can be defined for desired input + of the model, for example: "--scale_values data[255,255,255],info[255,255,255]". The exact meaning and order of channels depend on how the + original model was trained. If both --mean_values and --scale_values are specified, the mean is subtracted first and then scale is applied + regardless of the order of options in command line. Applying the values affects performance and may cause type conversion + + + Device-specific performance options: + -nthreads NUMBER_THREADS, --number_threads NUMBER_THREADS + Number of threads to use for inference on the CPU, GNA (including HETERO and MULTI cases). + + -pin {YES,NO,NUMA,HYBRID_AWARE}, --infer_threads_pinning {YES,NO,NUMA,HYBRID_AWARE} + Optional. Enable threads->cores ('YES' which is OpenVINO runtime's default for conventional CPUs), threads->(NUMA)nodes ('NUMA'), + threads->appropriate core types ('HYBRID_AWARE', which is OpenVINO runtime's default for Hybrid CPUs) or completely disable ('NO') CPU threads + pinning for CPU-involved inference. + + + Statistics dumping options: + -latency_percentile LATENCY_PERCENTILE, --latency_percentile LATENCY_PERCENTILE + Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median). + + -report_type {no_counters,average_counters,detailed_counters}, --report_type {no_counters,average_counters,detailed_counters} + Optional. Enable collecting statistics report. "no_counters" report contains configuration options specified, resulting FPS and latency. + "average_counters" report extends "no_counters" report and additionally includes average PM counters values for each layer from the model. + "detailed_counters" report extends "average_counters" report and additionally includes per-layer PM counters and latency for each executed + infer request. + + -report_folder REPORT_FOLDER, --report_folder REPORT_FOLDER + Optional. Path to a folder where statistics report is stored. + + -json_stats [JSON_STATS], --json_stats [JSON_STATS] + Optional. Enables JSON-based statistics output (by default reporting system will use CSV format). Should be used together with -report_folder option. + + -pc [PERF_COUNTS], --perf_counts [PERF_COUNTS] + Optional. Report performance counters. + + -pcsort {no_sort,sort,simple_sort}, --perf_counts_sort {no_sort,sort,simple_sort} + Optional. Report performance counters and analysis the sort hotpoint opts. sort: Analysis opts time cost, print by hotpoint order no_sort: + Analysis opts time cost, print by normal order simple_sort: Analysis opts time cost, only print EXECUTED opts by normal order + + -pcseq [PCSEQ], --pcseq [PCSEQ] + Optional. Report latencies for each shape in -data_shape sequence. + + -dump_config DUMP_CONFIG + Optional. Path to JSON file to dump OpenVINO parameters, which were set by application. + + -load_config LOAD_CONFIG + Optional. Path to JSON file to load custom OpenVINO parameters. + Please note, command line parameters have higher priority then parameters from configuration file. + Example 1: a simple JSON file for HW device with primary properties. + { + "CPU": {"NUM_STREAMS": "3", "PERF_COUNT": "NO"} + } + Example 2: a simple JSON file for meta device(AUTO/MULTI) with HW device properties. + { + "AUTO": { + "PERFORMANCE_HINT": "THROUGHPUT", + "PERF_COUNT": "NO", + "DEVICE_PROPERTIES": "{CPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:3},GPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:5}}" + } + } + + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. code-block:: sh + :force: + + [Step 1/11] Parsing and validating input arguments + [ INFO ] Parsing input parameters + usage: benchmark_app [OPTION] + + Options: + -h, --help Print the usage message + -m Required. Path to an .xml/.onnx file with a trained model or to a .blob files with a trained compiled model. + -i Optional. Path to a folder with images and/or binaries or to specific image or binary file. + In case of dynamic shapes models with several inputs provide the same number of files for each input (except cases with single file for any input) :"input1:1.jpg input2:1.bin", "input1:1.bin,2.bin input2:3.bin input3:4.bin,5.bin ". Also you can pass specific keys for inputs: "random" - for fillling input with random data, "image_info" - for filling input with image size. + You should specify either one files set to be used for all inputs (without providing input names) or separate files sets for every input of model (providing inputs names). + Currently supported data types: bmp, bin, npy. + If OPENCV is enabled, this functionality is extended with the following data types: + dib, jpeg, jpg, jpe, jp2, png, pbm, pgm, ppm, sr, ras, tiff, tif. + -d Optional. Specify a target device to infer on (the list of available devices is shown below). Default value is CPU. Use "-d HETERO:" format to specify HETERO plugin. Use "-d MULTI:" format to specify MULTI plugin. The application looks for a suitable plugin for the specified device. + -hint (latency or throughput or cumulative_throughput or none) Optional. Performance hint allows the OpenVINO device to select the right model-specific settings. + 'throughput' or 'tput': device performance mode will be set to THROUGHPUT. + 'cumulative_throughput' or 'ctput': device performance mode will be set to CUMULATIVE_THROUGHPUT. + 'latency': device performance mode will be set to LATENCY. + 'none': no device performance mode will be set. + Using explicit 'nstreams' or other device-specific options, please set hint to 'none' + -niter Optional. Number of iterations. If not specified, the number of iterations is calculated depending on a device. + -t Optional. Time in seconds to execute topology. + + Input shapes + -b Optional. Batch size value. If not specified, the batch size value is determined from Intermediate Representation. + -shape Optional. Set shape for model input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size. This parameter affect model input shape and can be dynamic. For dynamic dimensions use symbol `?` or '-1'. Ex. [?,3,?,?]. For bounded dimensions specify range 'min..max'. Ex. [1..10,3,?,?]. + -data_shape Required for models with dynamic shapes. Set shape for input blobs. In case of one input size: "[1,3,224,224]" or "input1[1,3,224,224],input2[1,4] ". In case of several input sizes provide the same number for each input (except cases with single shape for any input): "[1,3,128,128][3,3,128,128][1,3,320,320]", "input1[1,1, 128,128][1,1,256,256],input2[80,1]" or "input1[1,192][1,384],input2[1,192][1,384],input3[1,192][1,384],input4[1,192][1,384]". If model shapes are all static specifying the option will cause an exception. + -layout Optional. Prompts how model layouts should be treated by application. For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size. + + Advanced options + -extensions Required for custom layers (extensions). Absolute path to a shared library with the kernels implementations. + -c Required for GPU custom kernels. Absolute path to an .xml file with the kernels description. + -cache_dir Optional. Enables caching of loaded models to specified directory. List of devices which support caching is shown at the end of this message. + -load_from_file Optional. Loads model from file directly without read_model. All CNNNetwork options (like re-shape) will be ignored + -api Optional. Enable Sync/Async API. Default value is "async". + -nireq Optional. Number of infer requests. Default value is determined automatically for device. + -nstreams Optional. Number of streams to use for inference on the CPU or GPU devices (for HETERO and MULTI device cases use format :, : or just ). Default value is determined automatically for a device.Please note that although the automatic selection usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small models. See sample's README for more details. Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency estimations the number of streams should be set to 1. + -inference_only Optional. Measure only inference stage. Default option for static models. Dynamic models are measured in full mode which includes inputs setup stage, inference only mode available for them with single input data shape only. To enable full mode for static models pass "false" value to this argument: ex. "-inference_only=false". + -infer_precision Optional. Specifies the inference precision. Example #1: '-infer_precision bf16'. Example #2: '-infer_precision CPU:bf16,GPU:f32' + + Preprocessing options: + -ip Optional. Specifies precision for all input layers of the model. + -op Optional. Specifies precision for all output layers of the model. + -iop Optional. Specifies precision for input and output layers by name. + Example: -iop "input:f16, output:f16". + Notice that quotes are required. + Overwrites precision from ip and op options for specified layers. + -mean_values [R,G,B] Optional. Mean values to be used for the input image per channel. Values to be provided in the [R,G,B] format. Can be defined for desired input of the model, for example: "--mean_values data[255,255,255],info[255,255,255]". The exact meaning and order of channels depend on how the original model was trained. Applying the values affects performance and may cause type conversion + -scale_values [R,G,B] Optional. Scale values to be used for the input image per channel. Values are provided in the [R,G,B] format. Can be defined for desired input of the model, for example: "--scale_values data[255,255,255],info[255,255,255]". The exact meaning and order of channels depend on how the original model was trained. If both --mean_values and --scale_values are specified, the mean is subtracted first and then scale is applied regardless of the order of options in command line. Applying the values affects performance and may cause type conversion + + Device-specific performance options: + -nthreads Optional. Number of threads to use for inference on the CPU (including HETERO and MULTI cases). + -pin ("YES"|"CORE") / "HYBRID_AWARE" / ("NO"|"NONE") / "NUMA" Optional. Explicit inference threads binding options (leave empty to let the OpenVINO make a choice): + enabling threads->cores pinning("YES", which is already default for any conventional CPU), + letting the runtime to decide on the threads->different core types("HYBRID_AWARE", which is default on the hybrid CPUs) + threads->(NUMA)nodes("NUMA") or + completely disable("NO") CPU inference threads pinning + + Statistics dumping options: + -latency_percentile Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median). + -report_type Optional. Enable collecting statistics report. "no_counters" report contains configuration options specified, resulting FPS and latency. "average_counters" report extends "no_counters" report and additionally includes average PM counters values for each layer from the model. "detailed_counters" report extends "average_counters" report and additionally includes per-layer PM counters and latency for each executed infer request. + -report_folder Optional. Path to a folder where statistics report is stored. + -json_stats Optional. Enables JSON-based statistics output (by default reporting system will use CSV format). Should be used together with -report_folder option. + -pc Optional. Report performance counters. + -pcsort Optional. Report performance counters and analysis the sort hotpoint opts. "sort" Analysis opts time cost, print by hotpoint order "no_sort" Analysis opts time cost, print by normal order "simple_sort" Analysis opts time cost, only print EXECUTED opts by normal order + -pcseq Optional. Report latencies for each shape in -data_shape sequence. + -exec_graph_path Optional. Path to a file where to store executable graph information serialized. + -dump_config Optional. Path to JSON file to dump IE parameters, which were set by application. + -load_config Optional. Path to JSON file to load custom IE parameters. Please note, command line parameters have higher priority then parameters from configuration file. + Example 1: a simple JSON file for HW device with primary properties. + { + "CPU": {"NUM_STREAMS": "3", "PERF_COUNT": "NO"} + } + Example 2: a simple JSON file for meta device(AUTO/MULTI) with HW device properties. + { + "AUTO": { + "PERFORMANCE_HINT": "THROUGHPUT", + "PERF_COUNT": "NO", + "DEVICE_PROPERTIES": "{CPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:3},GPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:5}}" + } + } + + + +Running the application with the empty list of options yields the usage message given above and an error message. + +More information on inputs +++++++++++++++++++++++++++ + +The benchmark tool supports topologies with one or more inputs. If a topology is +not data sensitive, you can skip the input parameter, and the inputs will be filled +with random values. If a model has only image input(s), provide a folder with images +or a path to an image as input. If a model has some specific input(s) (besides images), +prepare a binary file(s) or numpy array(s) that is filled with data of appropriate +precision and provide a path to it as input. If a model has mixed input types, the +input folder should contain all required files. Image inputs are filled with image +files one by one. Binary inputs are filled with binary inputs one by one. + +.. _examples-of-running-the-tool-python: + +Examples of Running the Tool +############################ + +This section provides step-by-step instructions on how to run the Benchmark Tool +with the ``asl-recognition`` Intel model on CPU or GPU devices. It uses random data as the input. + +.. note:: + + Internet access is required to execute the following steps successfully. If you + have access to the Internet through a proxy server only, please make sure that + it is configured in your OS environment. + +Run the tool, specifying the location of the OpenVINO Intermediate Representation +(IR) model ``.xml`` file, the device to perform inference on, and a performance hint. +The following commands demonstrate examples of how to run the Benchmark Tool +in latency mode on CPU and throughput mode on GPU devices: + +* On CPU (latency mode): + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: sh + + benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: sh + + ./benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency + + +* On GPU (throughput mode): + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: sh + + benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d GPU -hint throughput + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: sh + + ./benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d GPU -hint throughput + + +The application outputs the number of executed iterations, total duration of execution, +latency, and throughput. Additionally, if you set the ``-report_type`` parameter, +the application outputs a statistics report. If you set the ``-pc`` parameter, +the application outputs performance counters. If you set ``-exec_graph_path``, +the application reports executable graph information serialized. All measurements +including per-layer PM counters are reported in milliseconds. + +An example of the information output when running ``benchmark_app`` on CPU in +latency mode is shown below: + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: sh + + benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency + + + .. code-block:: sh + + [Step 1/11] Parsing and validating input arguments + [ INFO ] Parsing input parameters + [ INFO ] Input command: /home/openvino/tools/benchmark_tool/benchmark_app.py -m omz_models/intel/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency + [Step 2/11] Loading OpenVINO Runtime + [ INFO ] OpenVINO: + [ INFO ] Build ................................. 2022.3.0-7750-c1109a7317e-feature/py_cpp_align + [ INFO ] + [ INFO ] Device info: + [ INFO ] CPU + [ INFO ] Build ................................. 2022.3.0-7750-c1109a7317e-feature/py_cpp_align + [ INFO ] + [ INFO ] + [Step 3/11] Setting device configuration + [Step 4/11] Reading model files + [ INFO ] Loading model files + [ INFO ] Read model took 147.82 ms + [ INFO ] Original model I/O parameters: + [ INFO ] Model inputs: + [ INFO ] input (node: input) : f32 / [N,C,D,H,W] / {1,3,16,224,224} + [ INFO ] Model outputs: + [ INFO ] output (node: output) : f32 / [...] / {1,100} + [Step 5/11] Resizing model to match image sizes and given batch + [ INFO ] Model batch size: 1 + [Step 6/11] Configuring input of the model + [ INFO ] Model inputs: + [ INFO ] input (node: input) : f32 / [N,C,D,H,W] / {1,3,16,224,224} + [ INFO ] Model outputs: + [ INFO ] output (node: output) : f32 / [...] / {1,100} + [Step 7/11] Loading the model to the device + [ INFO ] Compile model took 974.64 ms + [Step 8/11] Querying optimal runtime parameters + [ INFO ] Model: + [ INFO ] NETWORK_NAME: torch-jit-export + [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS: 2 + [ INFO ] NUM_STREAMS: 2 + [ INFO ] AFFINITY: Affinity.CORE + [ INFO ] INFERENCE_NUM_THREADS: 0 + [ INFO ] PERF_COUNT: False + [ INFO ] INFERENCE_PRECISION_HINT: + [ INFO ] PERFORMANCE_HINT: PerformanceMode.LATENCY + [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 + [Step 9/11] Creating infer requests and preparing input tensors + [ WARNING ] No input files were given for input 'input'!. This input will be filled with random values! + [ INFO ] Fill input 'input' with random values + [Step 10/11] Measuring performance (Start inference asynchronously, 2 inference requests, limits: 60000 ms duration) + [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). + [ INFO ] First inference took 38.41 ms + [Step 11/11] Dumping statistics report + [ INFO ] Count: 5380 iterations + [ INFO ] Duration: 60036.78 ms + [ INFO ] Latency: + [ INFO ] Median: 22.04 ms + [ INFO ] Average: 22.09 ms + [ INFO ] Min: 20.78 ms + [ INFO ] Max: 33.51 ms + [ INFO ] Throughput: 89.61 FPS + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: sh + + ./benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency + + + .. code-block:: sh + + [Step 1/11] Parsing and validating input arguments + [ INFO ] Parsing input parameters + [ INFO ] Input command: /home/openvino/bin/intel64/DEBUG/benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency + [Step 2/11] Loading OpenVINO Runtime + [ INFO ] OpenVINO: + [ INFO ] Build ................................. 2022.3.0-7750-c1109a7317e-feature/py_cpp_align + [ INFO ] + [ INFO ] Device info: + [ INFO ] CPU + [ INFO ] Build ................................. 2022.3.0-7750-c1109a7317e-feature/py_cpp_align + [ INFO ] + [ INFO ] + [Step 3/11] Setting device configuration + [ WARNING ] Device(CPU) performance hint is set to LATENCY + [Step 4/11] Reading model files + [ INFO ] Loading model files + [ INFO ] Read model took 141.11 ms + [ INFO ] Original model I/O parameters: + [ INFO ] Network inputs: + [ INFO ] input (node: input) : f32 / [N,C,D,H,W] / {1,3,16,224,224} + [ INFO ] Network outputs: + [ INFO ] output (node: output) : f32 / [...] / {1,100} + [Step 5/11] Resizing model to match image sizes and given batch + [ INFO ] Model batch size: 0 + [Step 6/11] Configuring input of the model + [ INFO ] Model batch size: 1 + [ INFO ] Network inputs: + [ INFO ] input (node: input) : f32 / [N,C,D,H,W] / {1,3,16,224,224} + [ INFO ] Network outputs: + [ INFO ] output (node: output) : f32 / [...] / {1,100} + [Step 7/11] Loading the model to the device + [ INFO ] Compile model took 989.62 ms + [Step 8/11] Querying optimal runtime parameters + [ INFO ] Model: + [ INFO ] NETWORK_NAME: torch-jit-export + [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS: 2 + [ INFO ] NUM_STREAMS: 2 + [ INFO ] AFFINITY: CORE + [ INFO ] INFERENCE_NUM_THREADS: 0 + [ INFO ] PERF_COUNT: NO + [ INFO ] INFERENCE_PRECISION_HINT: f32 + [ INFO ] PERFORMANCE_HINT: LATENCY + [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 + [Step 9/11] Creating infer requests and preparing input tensors + [ WARNING ] No input files were given: all inputs will be filled with random values! + [ INFO ] Test Config 0 + [ INFO ] input ([N,C,D,H,W], f32, {1, 3, 16, 224, 224}, static): random (binary data is expected) + [Step 10/11] Measuring performance (Start inference asynchronously, 2 inference requests, limits: 60000 ms duration) + [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). + [ INFO ] First inference took 37.27 ms + [Step 11/11] Dumping statistics report + [ INFO ] Count: 5470 iterations + [ INFO ] Duration: 60028.56 ms + [ INFO ] Latency: + [ INFO ] Median: 21.79 ms + [ INFO ] Average: 21.92 ms + [ INFO ] Min: 20.60 ms + [ INFO ] Max: 37.19 ms + [ INFO ] Throughput: 91.12 FPS + + +The Benchmark Tool can also be used with dynamically shaped networks to measure +expected inference time for various input data shapes. See the ``-shape`` and +``-data_shape`` argument descriptions in the :ref:`All configuration options ` +section to learn more about using dynamic shapes. Here is a command example for +using ``benchmark_app`` with dynamic networks and a portion of the resulting output: + + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: sh + + benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -shape [-1,3,16,224,224] -data_shape [1,3,16,224,224][2,3,16,224,224][4,3,16,224,224] -pcseq + + + .. code-block:: sh + + [Step 9/11] Creating infer requests and preparing input tensors + [ WARNING ] No input files were given for input 'input'!. This input will be filled with random values! + [ INFO ] Fill input 'input' with random values + [ INFO ] Defined 3 tensor groups: + [ INFO ] input: {1, 3, 16, 224, 224} + [ INFO ] input: {2, 3, 16, 224, 224} + [ INFO ] input: {4, 3, 16, 224, 224} + [Step 10/11] Measuring performance (Start inference asynchronously, 11 inference requests, limits: 60000 ms duration) + [ INFO ] Benchmarking in full mode (inputs filling are included in measurement loop). + [ INFO ] First inference took 201.15 ms + [Step 11/11] Dumping statistics report + [ INFO ] Count: 2811 iterations + [ INFO ] Duration: 60271.71 ms + [ INFO ] Latency: + [ INFO ] Median: 207.70 ms + [ INFO ] Average: 234.56 ms + [ INFO ] Min: 85.73 ms + [ INFO ] Max: 773.55 ms + [ INFO ] Latency for each data shape group: + [ INFO ] 1. input: {1, 3, 16, 224, 224} + [ INFO ] Median: 118.08 ms + [ INFO ] Average: 115.05 ms + [ INFO ] Min: 85.73 ms + [ INFO ] Max: 339.25 ms + [ INFO ] 2. input: {2, 3, 16, 224, 224} + [ INFO ] Median: 207.25 ms + [ INFO ] Average: 205.16 ms + [ INFO ] Min: 166.98 ms + [ INFO ] Max: 545.55 ms + [ INFO ] 3. input: {4, 3, 16, 224, 224} + [ INFO ] Median: 384.16 ms + [ INFO ] Average: 383.48 ms + [ INFO ] Min: 305.51 ms + [ INFO ] Max: 773.55 ms + [ INFO ] Throughput: 108.82 FPS + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: sh + + ./benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -shape [-1,3,16,224,224] -data_shape [1,3,16,224,224][2,3,16,224,224][4,3,16,224,224] -pcseq + + + .. code-block:: sh + + [Step 9/11] Creating infer requests and preparing input tensors + [ INFO ] Test Config 0 + [ INFO ] input ([N,C,D,H,W], f32, {1, 3, 16, 224, 224}, dyn:{?,3,16,224,224}): random (binary data is expected) + [ INFO ] Test Config 1 + [ INFO ] input ([N,C,D,H,W], f32, {2, 3, 16, 224, 224}, dyn:{?,3,16,224,224}): random (binary data is expected) + [ INFO ] Test Config 2 + [ INFO ] input ([N,C,D,H,W], f32, {4, 3, 16, 224, 224}, dyn:{?,3,16,224,224}): random (binary data is expected) + [Step 10/11] Measuring performance (Start inference asynchronously, 11 inference requests, limits: 60000 ms duration) + [ INFO ] Benchmarking in full mode (inputs filling are included in measurement loop). + [ INFO ] First inference took 204.40 ms + [Step 11/11] Dumping statistics report + [ INFO ] Count: 2783 iterations + [ INFO ] Duration: 60326.29 ms + [ INFO ] Latency: + [ INFO ] Median: 208.20 ms + [ INFO ] Average: 237.47 ms + [ INFO ] Min: 85.06 ms + [ INFO ] Max: 743.46 ms + [ INFO ] Latency for each data shape group: + [ INFO ] 1. input: {1, 3, 16, 224, 224} + [ INFO ] Median: 120.36 ms + [ INFO ] Average: 117.19 ms + [ INFO ] Min: 85.06 ms + [ INFO ] Max: 348.66 ms + [ INFO ] 2. input: {2, 3, 16, 224, 224} + [ INFO ] Median: 207.81 ms + [ INFO ] Average: 206.39 ms + [ INFO ] Min: 167.19 ms + [ INFO ] Max: 578.33 ms + [ INFO ] 3. input: {4, 3, 16, 224, 224} + [ INFO ] Median: 387.40 ms + [ INFO ] Average: 388.99 ms + [ INFO ] Min: 327.50 ms + [ INFO ] Max: 743.46 ms + [ INFO ] Throughput: 107.61 FPS + + +Additional Resources +#################### + +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO Samples ` +- :doc:`Convert a Model ` diff --git a/docs/articles_en/learn_openvino/openvino_samples/bert_benchmark.rst b/docs/articles_en/learn_openvino/openvino_samples/bert_benchmark.rst new file mode 100644 index 00000000000000..ce82c582e97f5b --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/bert_benchmark.rst @@ -0,0 +1,69 @@ +.. {#openvino_sample_bert_benchmark} + +Bert Benchmark Python Sample +============================ + + +.. meta:: + :description: Learn how to estimate performance of a Bert model using Asynchronous Inference Request (Python) API. + + +This sample demonstrates how to estimate performance of a Bert model using Asynchronous +Inference Request API. Unlike :doc:`demos ` this sample does not have +configurable command line arguments. Feel free to modify sample's source code to +try out different options. + + +How It Works +#################### + +The sample downloads a model and a tokenizer, exports the model to ONNX format, reads the +exported model and reshapes it to enforce dynamic input shapes. Then, it compiles the +resulting model, downloads a dataset and runs a benchmark on the dataset. + +.. scrollbox:: + + .. doxygensnippet:: samples/python/benchmark/bert_benchmark/bert_benchmark.py + :language: python + + +You can see the explicit description of each sample step at +:doc:`Integration Steps ` +section of "Integrate OpenVINO™ Runtime with Your Application" guide. + +Running +#################### + +1. Install the ``openvino`` Python package: + + .. code-block:: console + + python -m pip install openvino + + +2. Install packages from ``requirements.txt``: + + .. code-block:: console + + python -m pip install -r requirements.txt + +3. Run the sample + + .. code-block:: console + + python bert_benchmark.py + + +Sample Output +#################### + +The sample outputs how long it takes to process a dataset. + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO Samples ` +- :doc:`Convert a Model ` +- `Bert Benchmark Python Sample on Github `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_classification.rst deleted file mode 100644 index 2da7350e8d57ab..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_classification.rst +++ /dev/null @@ -1,157 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_c_samples_hello_classification_README} - -Hello Classification C Sample -============================= - - -.. meta:: - :description: Learn how to do inference of image - classification models, such as alexnet and googlenet-v1, using - Synchronous Inference Request (C) API. - - -This sample demonstrates how to execute an inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API and input auto-resize feature. - -.. tab-set:: - - .. tab-item:: Requirements - - +----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Options | Values | - +============================+============================================================================================================================================================================+ - | Validated Models | :doc:`alexnet `, :doc:`googlenet-v1 ` | - +----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Format | Inference Engine Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | - +----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Validated images | The sample uses OpenCV\* to `read input image `__ (\*.bmp, \*.png) | - +----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ `, :doc:`Python ` | - +----------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - .. tab-item:: C API - - Hello Classification C sample application demonstrates how to use the C API from OpenVINO in applications. - - +-------------------------------------+-------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Feature | API | Description | - +=====================================+=============================================================+=========================================================================================================================================================================================+ - | OpenVINO Runtime Version | ``ov_get_openvino_version`` | Get Openvino API version | - +-------------------------------------+-------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Basic Infer Flow | ``ov_core_create``, | Common API to do inference: read and compile a model, create an infer request, configure input and output tensors | - | | ``ov_core_read_model``, | | - | | ``ov_core_compile_model``, | | - | | ``ov_compiled_model_create_infer_request``, | | - | | ``ov_infer_request_set_input_tensor_by_index``, | | - | | ``ov_infer_request_get_output_tensor_by_index`` | | - +-------------------------------------+-------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Synchronous Infer | ``ov_infer_request_infer`` | Do synchronous inference | - +-------------------------------------+-------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Operations | ``ov_model_const_input``, | Get inputs and outputs of a model | - | | ``ov_model_const_output`` | + - +-------------------------------------+-------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Tensor Operations | ``ov_tensor_create_from_host_ptr`` | Create a tensor shape | - +-------------------------------------+-------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Preprocessing | ``ov_preprocess_prepostprocessor_create``, | Set image of the original size as input for a model with other input size. Resize and layout conversions are performed automatically by the corresponding plugin just before inference. | - | | ``ov_preprocess_prepostprocessor_get_input_info_by_index``, | | - | | ``ov_preprocess_input_info_get_tensor_info``, | | - | | ``ov_preprocess_input_tensor_info_set_from``, | | - | | ``ov_preprocess_input_tensor_info_set_layout``, | | - | | ``ov_preprocess_input_info_get_preprocess_steps``, | | - | | ``ov_preprocess_preprocess_steps_resize``, | | - | | ``ov_preprocess_input_model_info_set_layout``, | | - | | ``ov_preprocess_output_set_element_type``, | | - | | ``ov_preprocess_prepostprocessor_build`` | | - +-------------------------------------+-------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/c/hello_classification/main.c - :language: c - -How It Works -############ - -Upon the start-up, the sample application reads command line parameters, loads specified network and an image to the Inference Engine plugin. -Then, the sample creates an synchronous inference request object. When inference is done, the application outputs data to the standard output stream. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in Inference Engine Samples guide. - -Running -####### - -To run the sample, you need specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available at `the storage `__. - -.. note:: - - - By default, OpenVINO™ Toolkit Samples and Demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using ``mo`` with `reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (\*.onnx) that do not require preprocessing. - -Example -+++++++ - -1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader): - - .. code-block:: console - - python /downloader.py --name alexnet - -2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script: - - .. code-block:: console - - python /converter.py --name alexnet - -3. Perform inference of ``car.bmp`` using ``alexnet`` model on a ``GPU``, for example: - - .. code-block:: console - - /hello_classification_c /alexnet.xml /car.bmp GPU - -Sample Output -############# - -The application outputs top-10 inference results. - -.. code-block:: console - - Top 10 results: - - Image /opt/intel/openvino/samples/scripts/car.png - - classid probability - ------- ----------- - 656 0.666479 - 654 0.112940 - 581 0.068487 - 874 0.033385 - 436 0.026132 - 817 0.016731 - 675 0.010980 - 511 0.010592 - 569 0.008178 - 717 0.006336 - - This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool - -See Also -######## - -- :doc:`Integrate OpenVINO™ into Your Application ` -- :doc:`Using OpenVINO™ Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` -- :doc:`C API Reference ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.rst deleted file mode 100644 index 36010fcb4fdf7c..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.rst +++ /dev/null @@ -1,147 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_c_samples_hello_nv12_input_classification_README} - -Hello NV12 Input Classification C Sample -======================================== - - -.. meta:: - :description: Learn how to do inference of an image - classification model with images in NV12 color format using - Synchronous Inference Request (C) API. - - -This sample demonstrates how to execute an inference of image classification networks like AlexNet with images in NV12 color format using Synchronous Inference Request API. - -Hello NV12 Input Classification C Sample demonstrates how to use the NV12 automatic input pre-processing API in your applications. - -.. tab-set:: - - .. tab-item:: Requirements - - +-----------------------------------------+---------------------------------------------------------------------------------------+ - | Options | Values | - +=========================================+=======================================================================================+ - | Validated Models | :doc:`alexnet ` | - +-----------------------------------------+---------------------------------------------------------------------------------------+ - | Model Format | Inference Engine Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | - +-----------------------------------------+---------------------------------------------------------------------------------------+ - | Validated images | An uncompressed image in the NV12 color format - \*.yuv | - +-----------------------------------------+---------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +-----------------------------------------+---------------------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ ` | - +-----------------------------------------+---------------------------------------------------------------------------------------+ - - .. tab-item:: C API - - +-----------------------------------------+-----------------------------------------------------------+--------------------------------------------------------+ - | Feature | API | Description | - +=========================================+===========================================================+========================================================+ - | Node Operations | ``ov_port_get_any_name`` | Get a layer name | - +-----------------------------------------+-----------------------------------------------------------+--------------------------------------------------------+ - | Infer Request Operations | ``ov_infer_request_set_tensor``, | Operate with tensors | - | | ``ov_infer_request_get_output_tensor_by_index`` | | - +-----------------------------------------+-----------------------------------------------------------+--------------------------------------------------------+ - | Preprocessing | ``ov_preprocess_input_tensor_info_set_color_format``, | Change the color format of the input data | - | | ``ov_preprocess_preprocess_steps_convert_element_type``, | | - | | ``ov_preprocess_preprocess_steps_convert_color`` | | - +-----------------------------------------+-----------------------------------------------------------+--------------------------------------------------------+ - - - Basic Inference Engine API is covered by :doc:`Hello Classification C sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/c/hello_nv12_input_classification/main.c - :language: c - -How It Works -############ - -Upon the start-up, the sample application reads command-line parameters, loads specified network and an image in the NV12 color format to an Inference Engine plugin. Then, the sample creates an synchronous inference request object. When inference is done, the application outputs data to the standard output stream. - -You can see the explicit description of each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in Inference Engine Samples guide. - -Running -####### - -To run the sample, you need specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available at `the storage `__. - -The sample accepts an uncompressed image in the NV12 color format. To run the sample, you need to convert your BGR/RGB image to NV12. To do this, you can use one of the widely available tools such as FFmpeg\* or GStreamer\*. The following command shows how to convert an ordinary image into an uncompressed NV12 image using FFmpeg: - -.. code-block:: sh - - ffmpeg -i cat.jpg -pix_fmt nv12 cat.yuv - -.. note:: - - - Because the sample reads raw image files, you should provide a correct image size along with the image path. The sample expects the logical size of the image, not the buffer size. For example, for 640x480 BGR/RGB image the corresponding NV12 logical image size is also 640x480, whereas the buffer size is 640x720. - - By default, this sample expects that network input has BGR channels order. If you trained your model to work with RGB order, you need to reconvert your model using ``mo`` with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - -Example -+++++++ - -1. Download a pre-trained model using :doc:`Model Downloader `: - - .. code-block:: console - - python /downloader.py --name alexnet - -2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script: - - .. code-block:: console - - python /converter.py --name alexnet - -3. Perform inference of NV12 image using `alexnet` model on a `CPU`, for example: - - .. code-block:: console - - /hello_nv12_input_classification_c /alexnet.xml /cat.yuv 300x300 CPU - -Sample Output -############# - -The application outputs top-10 inference results. - -.. code-block:: console - - Top 10 results: - - Image ./cat.yuv - - classid probability - ------- ----------- - 435 0.091733 - 876 0.081725 - 999 0.069305 - 587 0.043726 - 666 0.038957 - 419 0.032892 - 285 0.030309 - 700 0.029941 - 696 0.021628 - 855 0.020339 - - This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool - -See Also -######## - -- :doc:`Integrate the OpenVINO™ into Your Application ` -- :doc:`Using OpenVINO™ Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` -- `C API Reference `__ - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.rst deleted file mode 100644 index a73b249a565332..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_benchmark_tool.rst +++ /dev/null @@ -1,419 +0,0 @@ -.. {#openvino_inference_engine_samples_benchmark_app_README} - -Benchmark C++ Tool -================== - - -.. meta:: - :description: Learn how to use the Benchmark C++ Tool to - estimate deep learning inference performance on supported - devices. - - -This page demonstrates how to use the Benchmark C++ Tool to estimate deep learning inference performance on supported devices. - -.. note:: - - This page describes usage of the C++ implementation of the Benchmark Tool. For the Python implementation, refer to the :doc:`Benchmark Python Tool ` page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. - - -Basic Usage -#################### - -To use the C++ benchmark_app, you must first build it following the :doc:`Build the Sample Applications ` instructions and then set up paths and environment variables by following the :doc:`Get Ready for Running the Sample Applications ` instructions. Navigate to the directory where the benchmark_app C++ sample binary was built. - -.. note:: - - If you installed OpenVINO Runtime using PyPI or Anaconda Cloud, only the :doc:`Benchmark Python Tool ` is available, and you should follow the usage instructions on that page instead. - -The benchmarking application works with models in the OpenVINO IR, TensorFlow, TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, OpenVINO also allows you to :doc:`convert your models `. - -To run benchmarking with default options on a model, use the following command: - -.. code-block:: sh - - ./benchmark_app -m model.xml - - -By default, the application will load the specified model onto the CPU and perform inferencing on batches of randomly-generated data inputs for 60 seconds. As it loads, it prints information about benchmark parameters. When benchmarking is completed, it reports the minimum, average, and maximum inferencing latency and average the throughput. - -You may be able to improve benchmark results beyond the default configuration by configuring some of the execution parameters for your model. For example, you can use "throughput" or "latency" performance hints to optimize the runtime for higher FPS or reduced inferencing time. Read on to learn more about the configuration options available with benchmark_app. - -Configuration Options -##################### - -The benchmark app provides various options for configuring execution parameters. This section covers key configuration options for easily tuning benchmarking to achieve better performance on your device. A list of all configuration options is given in the :ref:`Advanced Usage ` section. - -Performance hints: latency and throughput -+++++++++++++++++++++++++++++++++++++++++ - -The benchmark app allows users to provide high-level "performance hints" for setting latency-focused or throughput-focused inference modes. This hint causes the runtime to automatically adjust runtime parameters, such as the number of processing streams and inference batch size, to prioritize for reduced latency or high throughput. - -The performance hints do not require any device-specific settings and they are completely portable between devices. Parameters are automatically configured based on whichever device is being used. This allows users to easily port applications between hardware targets without having to re-determine the best runtime parameters for the new device. - -If not specified, throughput is used as the default. To set the hint explicitly, use ``-hint latency`` or ``-hint throughput`` when running benchmark_app: - -.. code-block:: sh - - ./benchmark_app -m model.xml -hint latency - ./benchmark_app -m model.xml -hint throughput - - -.. note:: - - It is up to the user to ensure the environment on which the benchmark is running is optimized for maximum performance. Otherwise, different results may occur when using the application in different environment settings (such as power optimization settings, processor overclocking, thermal throttling). - Stating flags that take only single option like `-m` multiple times, for example `./benchmark_app -m model.xml -m model2.xml`, results in only the first value being used. - -Latency --------------------- - -Latency is the amount of time it takes to process a single inference request. In applications where data needs to be inferenced and acted on as quickly as possible (such as autonomous driving), low latency is desirable. For conventional devices, lower latency is achieved by reducing the amount of parallel processing streams so the system can utilize as many resources as possible to quickly calculate each inference request. However, advanced devices like multi-socket CPUs and modern GPUs are capable of running multiple inference requests while delivering the same latency. - -When benchmark_app is run with ``-hint latency``, it determines the optimal number of parallel inference requests for minimizing latency while still maximizing the parallelization capabilities of the hardware. It automatically sets the number of processing streams and inference batch size to achieve the best latency. - -Throughput --------------------- - -Throughput is the amount of data an inferencing pipeline can process at once, and it is usually measured in frames per second (FPS) or inferences per second. In applications where large amounts of data needs to be inferenced simultaneously (such as multi-camera video streams), high throughput is needed. To achieve high throughput, the runtime focuses on fully saturating the device with enough data to process. It utilizes as much memory and as many parallel streams as possible to maximize the amount of data that can be processed simultaneously. - -When benchmark_app is run with ``-hint throughput``, it maximizes the number of parallel inference requests to utilize all the threads available on the device. On GPU, it automatically sets the inference batch size to fill up the GPU memory available. - -For more information on performance hints, see the :doc:`High-level Performance Hints ` page. For more details on optimal runtime configurations and how they are automatically determined using performance hints, see :doc:`Runtime Inference Optimizations `. - - -Device -++++++++++++++++++++ - -To set which device benchmarking runs on, use the ``-d `` argument. This will tell benchmark_app to run benchmarking on that specific device. The benchmark app supports "CPU", "GPU", and "GNA" devices. In order to use the GPU or GNA, the system must have the appropriate drivers installed. If no device is specified, benchmark_app will default to using CPU. - -For example, to run benchmarking on GPU, use: - -.. code-block:: sh - - ./benchmark_app -m model.xml -d GPU - - -You may also specify "AUTO" as the device, in which case the benchmark_app will automatically select the best device for benchmarking and support it with the CPU at the model loading stage. This may result in increased performance, thus, should be used purposefully. For more information, see the :doc:`Automatic device selection ` page. - -(Note: If the latency or throughput hint is set, it will automatically configure streams and batch sizes for optimal performance based on the specified device.) - -Number of iterations -++++++++++++++++++++ - -By default, the benchmarking app will run for a predefined duration, repeatedly performing inferencing with the model and measuring the resulting inference speed. There are several options for setting the number of inference iterations: - -* Explicitly specify the number of iterations the model runs using the ``-niter `` option. -* Set how much time the app runs for using the ``-t `` option. -* Set both of them (execution will continue until both conditions are met). -* If neither -niter nor -t are specified, the app will run for a predefined duration that depends on the device. - -The more iterations a model runs, the better the statistics will be for determining average latency and throughput. - -Inputs -++++++++++++++++++++ - -The benchmark tool runs benchmarking on user-provided input images in ``.jpg``, ``.bmp``, or ``.png`` format. Use ``-i `` to specify the path to an image, or folder of images. For example, to run benchmarking on an image named ``test1.jpg``, use: - -.. code-block:: sh - - ./benchmark_app -m model.xml -i test1.jpg - - -The tool will repeatedly loop through the provided inputs and run inferencing on them for the specified amount of time or number of iterations. If the ``-i`` flag is not used, the tool will automatically generate random data to fit the input shape of the model. - -Examples -++++++++++++++++++++ - -For more usage examples (and step-by-step instructions on how to set up a model for benchmarking), see the :ref:`Examples of Running the Tool ` section. - -.. _advanced-usage-cpp-benchmark: - -Advanced Usage -#################### - -.. note:: - - By default, OpenVINO samples, tools and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channel order in the sample or demo application or reconvert your model using ``mo`` with ``reverse_input_channels`` argument specified. For more information about the argument, refer to When to Reverse Input Channels section of Converting a Model to Intermediate Representation (IR). - -Per-layer performance and logging -+++++++++++++++++++++++++++++++++ - -The application also collects per-layer Performance Measurement (PM) counters for each executed infer request if you enable statistics dumping by setting the ``-report_type`` parameter to one of the possible values: - -* ``no_counters`` report includes configuration options specified, resulting FPS and latency. -* ``average_counters`` report extends the ``no_counters`` report and additionally includes average PM counters values for each layer from the network. -* ``detailed_counters`` report extends the ``average_counters`` report and additionally includes per-layer PM counters and latency for each executed infer request. - -Depending on the type, the report is stored to benchmark_no_counters_report.csv, benchmark_average_counters_report.csv, or benchmark_detailed_counters_report.csv file located in the path specified in -report_folder. The application also saves executable graph information serialized to an XML file if you specify a path to it with the -exec_graph_path parameter. - -.. _all-configuration-options-cpp-benchmark: - -All configuration options -+++++++++++++++++++++++++ - -Running the application with the ``-h`` or ``--help`` option yields the following usage message: - -.. scrollbox:: - - .. code-block:: sh - :force: - - [Step 1/11] Parsing and validating input arguments - [ INFO ] Parsing input parameters - usage: benchmark_app [OPTION] - - Options: - -h, --help Print the usage message - -m Required. Path to an .xml/.onnx file with a trained model or to a .blob files with a trained compiled model. - -i Optional. Path to a folder with images and/or binaries or to specific image or binary file. - In case of dynamic shapes models with several inputs provide the same number of files for each input (except cases with single file for any input) :"input1:1.jpg input2:1.bin", "input1:1.bin,2.bin input2:3.bin input3:4.bin,5.bin ". Also you can pass specific keys for inputs: "random" - for fillling input with random data, "image_info" - for filling input with image size. - You should specify either one files set to be used for all inputs (without providing input names) or separate files sets for every input of model (providing inputs names). - Currently supported data types: bmp, bin, npy. - If OPENCV is enabled, this functionality is extended with the following data types: - dib, jpeg, jpg, jpe, jp2, png, pbm, pgm, ppm, sr, ras, tiff, tif. - -d Optional. Specify a target device to infer on (the list of available devices is shown below). Default value is CPU. Use "-d HETERO:" format to specify HETERO plugin. Use "-d MULTI:" format to specify MULTI plugin. The application looks for a suitable plugin for the specified device. - -hint (latency or throughput or cumulative_throughput or none) Optional. Performance hint allows the OpenVINO device to select the right model-specific settings. - 'throughput' or 'tput': device performance mode will be set to THROUGHPUT. - 'cumulative_throughput' or 'ctput': device performance mode will be set to CUMULATIVE_THROUGHPUT. - 'latency': device performance mode will be set to LATENCY. - 'none': no device performance mode will be set. - Using explicit 'nstreams' or other device-specific options, please set hint to 'none' - -niter Optional. Number of iterations. If not specified, the number of iterations is calculated depending on a device. - -t Optional. Time in seconds to execute topology. - - Input shapes - -b Optional. Batch size value. If not specified, the batch size value is determined from Intermediate Representation. - -shape Optional. Set shape for model input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size. This parameter affect model input shape and can be dynamic. For dynamic dimensions use symbol `?` or '-1'. Ex. [?,3,?,?]. For bounded dimensions specify range 'min..max'. Ex. [1..10,3,?,?]. - -data_shape Required for models with dynamic shapes. Set shape for input blobs. In case of one input size: "[1,3,224,224]" or "input1[1,3,224,224],input2[1,4] ". In case of several input sizes provide the same number for each input (except cases with single shape for any input): "[1,3,128,128][3,3,128,128][1,3,320,320]", "input1[1,1, 128,128][1,1,256,256],input2[80,1]" or "input1[1,192][1,384],input2[1,192][1,384],input3[1,192][1,384],input4[1,192][1,384]". If model shapes are all static specifying the option will cause an exception. - -layout Optional. Prompts how model layouts should be treated by application. For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size. - - Advanced options - -extensions Required for custom layers (extensions). Absolute path to a shared library with the kernels implementations. - -c Required for GPU custom kernels. Absolute path to an .xml file with the kernels description. - -cache_dir Optional. Enables caching of loaded models to specified directory. List of devices which support caching is shown at the end of this message. - -load_from_file Optional. Loads model from file directly without read_model. All CNNNetwork options (like re-shape) will be ignored - -api Optional. Enable Sync/Async API. Default value is "async". - -nireq Optional. Number of infer requests. Default value is determined automatically for device. - -nstreams Optional. Number of streams to use for inference on the CPU or GPU devices (for HETERO and MULTI device cases use format :, : or just ). Default value is determined automatically for a device.Please note that although the automatic selection usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small models. See sample's README for more details. Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency estimations the number of streams should be set to 1. - -inference_only Optional. Measure only inference stage. Default option for static models. Dynamic models are measured in full mode which includes inputs setup stage, inference only mode available for them with single input data shape only. To enable full mode for static models pass "false" value to this argument: ex. "-inference_only=false". - -infer_precision Optional. Specifies the inference precision. Example #1: '-infer_precision bf16'. Example #2: '-infer_precision CPU:bf16,GPU:f32' - - Preprocessing options: - -ip Optional. Specifies precision for all input layers of the model. - -op Optional. Specifies precision for all output layers of the model. - -iop Optional. Specifies precision for input and output layers by name. - Example: -iop "input:f16, output:f16". - Notice that quotes are required. - Overwrites precision from ip and op options for specified layers. - -mean_values [R,G,B] Optional. Mean values to be used for the input image per channel. Values to be provided in the [R,G,B] format. Can be defined for desired input of the model, for example: "--mean_values data[255,255,255],info[255,255,255]". The exact meaning and order of channels depend on how the original model was trained. Applying the values affects performance and may cause type conversion - -scale_values [R,G,B] Optional. Scale values to be used for the input image per channel. Values are provided in the [R,G,B] format. Can be defined for desired input of the model, for example: "--scale_values data[255,255,255],info[255,255,255]". The exact meaning and order of channels depend on how the original model was trained. If both --mean_values and --scale_values are specified, the mean is subtracted first and then scale is applied regardless of the order of options in command line. Applying the values affects performance and may cause type conversion - - Device-specific performance options: - -nthreads Optional. Number of threads to use for inference on the CPU (including HETERO and MULTI cases). - -pin ("YES"|"CORE") / "HYBRID_AWARE" / ("NO"|"NONE") / "NUMA" Optional. Explicit inference threads binding options (leave empty to let the OpenVINO make a choice): - enabling threads->cores pinning("YES", which is already default for any conventional CPU), - letting the runtime to decide on the threads->different core types("HYBRID_AWARE", which is default on the hybrid CPUs) - threads->(NUMA)nodes("NUMA") or - completely disable("NO") CPU inference threads pinning - - Statistics dumping options: - -latency_percentile Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median). - -report_type Optional. Enable collecting statistics report. "no_counters" report contains configuration options specified, resulting FPS and latency. "average_counters" report extends "no_counters" report and additionally includes average PM counters values for each layer from the model. "detailed_counters" report extends "average_counters" report and additionally includes per-layer PM counters and latency for each executed infer request. - -report_folder Optional. Path to a folder where statistics report is stored. - -json_stats Optional. Enables JSON-based statistics output (by default reporting system will use CSV format). Should be used together with -report_folder option. - -pc Optional. Report performance counters. - -pcsort Optional. Report performance counters and analysis the sort hotpoint opts. "sort" Analysis opts time cost, print by hotpoint order "no_sort" Analysis opts time cost, print by normal order "simple_sort" Analysis opts time cost, only print EXECUTED opts by normal order - -pcseq Optional. Report latencies for each shape in -data_shape sequence. - -exec_graph_path Optional. Path to a file where to store executable graph information serialized. - -dump_config Optional. Path to JSON file to dump IE parameters, which were set by application. - -load_config Optional. Path to JSON file to load custom IE parameters. Please note, command line parameters have higher priority then parameters from configuration file. - Example 1: a simple JSON file for HW device with primary properties. - { - "CPU": {"NUM_STREAMS": "3", "PERF_COUNT": "NO"} - } - Example 2: a simple JSON file for meta device(AUTO/MULTI) with HW device properties. - { - "AUTO": { - "PERFORMANCE_HINT": "THROUGHPUT", - "PERF_COUNT": "NO", - "DEVICE_PROPERTIES": "{CPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:3},GPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:5}}" - } - } - - -Running the application with the empty list of options yields the usage message given above and an error message. - -More information on inputs -++++++++++++++++++++++++++ - -The benchmark tool supports topologies with one or more inputs. If a topology is not data sensitive, you can skip the input parameter, and the inputs will be filled with random values. If a model has only image input(s), provide a folder with images or a path to an image as input. If a model has some specific input(s) (besides images), please prepare a binary file(s) or numpy array(s) that is filled with data of appropriate precision and provide a path to it as input. If a model has mixed input types, the input folder should contain all required files. Image inputs are filled with image files one by one. Binary inputs are filled with binary inputs one by one. - -.. _examples-of-running-the-tool-cpp: - -Examples of Running the Tool -############################ - -This section provides step-by-step instructions on how to run the Benchmark Tool with the ``asl-recognition`` model from the :doc:`Open Model Zoo ` on CPU or GPU devices. It uses random data as the input. - -.. note:: - - Internet access is required to execute the following steps successfully. If you have access to the Internet through a proxy server only, please make sure that it is configured in your OS environment. - - -1. Install OpenVINO Development Tools (if it hasn't been installed already): - - .. code-block:: sh - - pip install openvino-dev - - -2. Download the model using ``omz_downloader``, specifying the model name and directory to download the model to: - - .. code-block:: sh - - omz_downloader --name asl-recognition-0004 --precisions FP16 --output_dir omz_models - - -3. Run the tool, specifying the location of the model .xml file, the device to perform inference on, and with a performance hint. The following commands demonstrate examples of how to run the Benchmark Tool in latency mode on CPU and throughput mode on GPU devices: - - * On CPU (latency mode): - - .. code-block:: sh - - ./benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency - - - * On GPU (throughput mode): - - .. code-block:: sh - - ./benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d GPU -hint throughput - - -The application outputs the number of executed iterations, total duration of execution, latency, and throughput. -Additionally, if you set the ``-report_type`` parameter, the application outputs a statistics report. If you set the ``-pc`` parameter, the application outputs performance counters. If you set ``-exec_graph_path``, the application reports executable graph information serialized. All measurements including per-layer PM counters are reported in milliseconds. - -An example of the information output when running benchmark_app on CPU in latency mode is shown below: - -.. code-block:: sh - - ./benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency - - -.. code-block:: sh - - [Step 1/11] Parsing and validating input arguments - [ INFO ] Parsing input parameters - [ INFO ] Input command: /home/openvino/bin/intel64/DEBUG/benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency - [Step 2/11] Loading OpenVINO Runtime - [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2022.3.0-7750-c1109a7317e-feature/py_cpp_align - [ INFO ] - [ INFO ] Device info: - [ INFO ] CPU - [ INFO ] Build ................................. 2022.3.0-7750-c1109a7317e-feature/py_cpp_align - [ INFO ] - [ INFO ] - [Step 3/11] Setting device configuration - [ WARNING ] Device(CPU) performance hint is set to LATENCY - [Step 4/11] Reading model files - [ INFO ] Loading model files - [ INFO ] Read model took 141.11 ms - [ INFO ] Original model I/O parameters: - [ INFO ] Network inputs: - [ INFO ] input (node: input) : f32 / [N,C,D,H,W] / {1,3,16,224,224} - [ INFO ] Network outputs: - [ INFO ] output (node: output) : f32 / [...] / {1,100} - [Step 5/11] Resizing model to match image sizes and given batch - [ INFO ] Model batch size: 0 - [Step 6/11] Configuring input of the model - [ INFO ] Model batch size: 1 - [ INFO ] Network inputs: - [ INFO ] input (node: input) : f32 / [N,C,D,H,W] / {1,3,16,224,224} - [ INFO ] Network outputs: - [ INFO ] output (node: output) : f32 / [...] / {1,100} - [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 989.62 ms - [Step 8/11] Querying optimal runtime parameters - [ INFO ] Model: - [ INFO ] NETWORK_NAME: torch-jit-export - [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS: 2 - [ INFO ] NUM_STREAMS: 2 - [ INFO ] AFFINITY: CORE - [ INFO ] INFERENCE_NUM_THREADS: 0 - [ INFO ] PERF_COUNT: NO - [ INFO ] INFERENCE_PRECISION_HINT: f32 - [ INFO ] PERFORMANCE_HINT: LATENCY - [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 - [Step 9/11] Creating infer requests and preparing input tensors - [ WARNING ] No input files were given: all inputs will be filled with random values! - [ INFO ] Test Config 0 - [ INFO ] input ([N,C,D,H,W], f32, {1, 3, 16, 224, 224}, static): random (binary data is expected) - [Step 10/11] Measuring performance (Start inference asynchronously, 2 inference requests, limits: 60000 ms duration) - [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 37.27 ms - [Step 11/11] Dumping statistics report - [ INFO ] Count: 5470 iterations - [ INFO ] Duration: 60028.56 ms - [ INFO ] Latency: - [ INFO ] Median: 21.79 ms - [ INFO ] Average: 21.92 ms - [ INFO ] Min: 20.60 ms - [ INFO ] Max: 37.19 ms - [ INFO ] Throughput: 91.12 FPS - - - -The Benchmark Tool can also be used with dynamically shaped networks to measure expected inference time for various input data shapes. See the ``-shape`` and ``-data_shape`` argument descriptions in the :ref:`All configuration options ` section to learn more about using dynamic shapes. Here is a command example for using benchmark_app with dynamic networks and a portion of the resulting output: - -.. code-block:: sh - - ./benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -shape [-1,3,16,224,224] -data_shape [1,3,16,224,224][2,3,16,224,224][4,3,16,224,224] -pcseq - - -.. code-block:: sh - - [Step 9/11] Creating infer requests and preparing input tensors - [ INFO ] Test Config 0 - [ INFO ] input ([N,C,D,H,W], f32, {1, 3, 16, 224, 224}, dyn:{?,3,16,224,224}): random (binary data is expected) - [ INFO ] Test Config 1 - [ INFO ] input ([N,C,D,H,W], f32, {2, 3, 16, 224, 224}, dyn:{?,3,16,224,224}): random (binary data is expected) - [ INFO ] Test Config 2 - [ INFO ] input ([N,C,D,H,W], f32, {4, 3, 16, 224, 224}, dyn:{?,3,16,224,224}): random (binary data is expected) - [Step 10/11] Measuring performance (Start inference asynchronously, 11 inference requests, limits: 60000 ms duration) - [ INFO ] Benchmarking in full mode (inputs filling are included in measurement loop). - [ INFO ] First inference took 204.40 ms - [Step 11/11] Dumping statistics report - [ INFO ] Count: 2783 iterations - [ INFO ] Duration: 60326.29 ms - [ INFO ] Latency: - [ INFO ] Median: 208.20 ms - [ INFO ] Average: 237.47 ms - [ INFO ] Min: 85.06 ms - [ INFO ] Max: 743.46 ms - [ INFO ] Latency for each data shape group: - [ INFO ] 1. input: {1, 3, 16, 224, 224} - [ INFO ] Median: 120.36 ms - [ INFO ] Average: 117.19 ms - [ INFO ] Min: 85.06 ms - [ INFO ] Max: 348.66 ms - [ INFO ] 2. input: {2, 3, 16, 224, 224} - [ INFO ] Median: 207.81 ms - [ INFO ] Average: 206.39 ms - [ INFO ] Min: 167.19 ms - [ INFO ] Max: 578.33 ms - [ INFO ] 3. input: {4, 3, 16, 224, 224} - [ INFO ] Median: 387.40 ms - [ INFO ] Average: 388.99 ms - [ INFO ] Min: 327.50 ms - [ INFO ] Max: 743.46 ms - [ INFO ] Throughput: 107.61 FPS - - -See Also -#################### - -* :doc:`Using OpenVINO Samples ` -* :doc:`Convert a Model ` -* :doc:`Model Downloader ` - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_automatic_speech_recognition.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_automatic_speech_recognition.rst deleted file mode 100644 index 2d9d76ab028d0a..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_automatic_speech_recognition.rst +++ /dev/null @@ -1,308 +0,0 @@ -.. {#openvino_inference_engine_samples_speech_sample_README} - -Automatic Speech Recognition C++ Sample -======================================= - - - -.. meta:: - :description: Learn how to infer an acoustic model based on Kaldi - neural networks and speech feature vectors using Asynchronous - Inference Request (C++) API. - - -.. note:: - - This sample is now deprecated and will be removed with OpenVINO 2024.0. - The sample was mainly designed to demonstrate the features of the GNA plugin - and the use of models produced by the Kaldi framework. OpenVINO support for - these components is now deprecated and will be discontinued, making the sample - redundant. - - -This sample demonstrates how to execute an Asynchronous Inference of acoustic model based on Kaldi\* neural networks and speech feature vectors. - -The sample works with Kaldi ARK or Numpy* uncompressed NPZ files, so it does not cover an end-to-end speech recognition scenario (speech to text), requiring additional preprocessing (feature extraction) to get a feature vector from a speech signal, as well as postprocessing (decoding) to produce text from scores. - -.. tab-set:: - - .. tab-item:: Requirements - - +-------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Options | Values | - +=============================================================+===============================================================================================================================================================+ - | Validated Models | Acoustic model based on Kaldi\* neural networks (see :ref:`Model Preparation ` section) | - +-------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (*.xml + *.bin) | - +-------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Supported devices | See :ref:`Execution Modes ` section below and :doc:`List Supported Devices ` | - +-------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - The following C++ API is used in the application: - - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - | Feature | API | Description | - +=============================================================+=============================================================================================================+==============================================================================+ - | Available Devices | ``ov::Core::get_available_devices``, ``ov::Core::get_property`` | Get information of the devices for inference | - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - | Import/Export Model | ``ov::Core::import_model``, ``ov::CompiledModel::export_model`` | The GNA plugin supports loading and saving of the GNA-optimized model | - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - | Model Operations | ``ov::set_batch``, ``ov::Model::add_output``, ``ov::CompiledModel::inputs``, ``ov::CompiledModel::outputs`` | Managing of model: configure batch_size, input and output tensors | - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - | Node Operations | ``ov::OutputVector::size``, ``ov::Output::get_shape`` | Get node shape | - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - | Asynchronous Infer | ``ov::InferRequest::start_async``, ``ov::InferRequest::wait`` | Do asynchronous inference and waits until inference result becomes available | - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - | InferRequest Operations | ``ov::InferRequest::query_state``, ``ov::VariableState::reset`` | Gets and resets CompiledModel state control | - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - | Tensor Operations | ``ov::Tensor::get_size``, ``ov::Tensor::data``, ``ov::InferRequest::get_tensor`` | Get a tensor, its size and data | - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - | Profiling | ``ov::InferRequest::get_profiling_info`` | Get infer request profiling info | - +-------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification C++ sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/speech_sample/main.cpp - :language: cpp - -How It Works -############ - -At startup, the sample application reads command-line parameters, loads a specified model and input data to the OpenVINO™ Runtime plugin, performs inference on all speech utterances stored in the input file(s), logging each step in a standard output stream. -If the ``-r`` option is given, error statistics are provided for each speech utterance as shown above. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -GNA-specific details -++++++++++++++++++++ - -Quantization ------------- - -If the GNA device is selected (for example, using the ``-d`` GNA flag), the GNA OpenVINO™ Runtime plugin quantizes the model and input feature vector sequence to integer representation before performing inference. -Several parameters control neural network quantization. The ``-q`` flag determines the quantization mode. -Two modes are supported: - -- *static* - The first utterance in the input file is scanned for dynamic range. The scale factor (floating point scalar multiplier) required to scale the maximum input value of the first utterance to 16384 (15 bits) is used for all subsequent inputs. The neural network is quantized to accommodate the scaled input dynamic range. -- *user-defined* - The user may specify a scale factor via the ``-sf`` flag that will be used for static quantization. - -The ``-qb`` flag provides a hint to the GNA plugin regarding the preferred target weight resolution for all layers. For example, when ``-qb 8`` is specified, the plugin will use 8-bit weights wherever possible in the -network. - -.. note:: - - It is not always possible to use 8-bit weights due to GNA hardware limitations. For example, convolutional layers always use 16-bit weights (GNA hardware version 1 and 2). This limitation will be removed in GNA hardware version 3 and higher. - - -.. _execution-modes-speech: - -Execution Modes ---------------- - -Several execution modes are supported via the ``-d`` flag: - -- ``CPU`` - All calculations are performed on CPU device using CPU Plugin. -- ``GPU`` - All calculations are performed on GPU device using GPU Plugin. -- ``NPU`` - All calculations are performed on NPU device using NPU Plugin. -- ``GNA_AUTO`` - GNA hardware is used if available and the driver is installed. Otherwise, the GNA device is emulated in fast-but-not-bit-exact mode. -- ``GNA_HW`` - GNA hardware is used if available and the driver is installed. Otherwise, an error will occur. -- ``GNA_SW`` - Deprecated. The GNA device is emulated in fast-but-not-bit-exact mode. -- ``GNA_SW_FP32`` - Substitutes parameters and calculations from low precision to floating point (FP32). -- ``GNA_SW_EXACT`` - GNA device is emulated in bit-exact mode. - -Loading and Saving Models -------------------------- - -The GNA plugin supports loading and saving of the GNA-optimized model (non-IR) via the ``-rg`` and ``-wg`` flags. Thereby, it is possible to avoid the cost of full model quantization at run time. The GNA plugin also supports export of firmware-compatible embedded model images for the Intel® Speech Enabling Developer Kit and Amazon Alexa* Premium Far-Field Voice Development Kit via the ``-we`` flag (save only). - -In addition to performing inference directly from a GNA model file, these combinations of options make it possible to: - -- Convert from IR format to GNA format model file (``-m``, ``-wg``) -- Convert from IR format to embedded format model file (``-m``, ``-we``) -- Convert from GNA format to embedded format model file (``-rg``, ``-we``) - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -####### - -Run the application with the -h option to see the usage message: - -.. code-block:: sh - - speech_sample -h - -Usage message: - -.. code-block:: sh - - [ INFO ] OpenVINO Runtime version ......... - [ INFO ] Build ........... - [ INFO ] - [ INFO ] Parsing input parameters - - speech_sample [OPTION] - Options: - - -h Print a usage message. - -i "" Required. Path(s) to input file(s). Usage for a single file/layer: or . Example of usage for several files/layers: :=,:=. - -m "" Required. Path to an .xml file with a trained model (required if -rg is missing). - -o "" Optional. Output file name(s) to save scores (inference results). Example of usage for a single file/layer: or . Example of usage for several files/layers: :=,:=. - -d "" Optional. Specify a target device to infer on. CPU, GPU, NPU, GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g. HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified. - -pc Optional. Enables per-layer performance report. - -q "" Optional. Input quantization mode for GNA: static (default) or user defined (use with -sf). - -qb "" Optional. Weight resolution in bits for GNA quantization: 8 or 16 (default) - -sf "" Optional. User-specified input scale factor for GNA quantization (use with -q user). If the model contains multiple inputs, provide scale factors by separating them with commas. For example: :,: or just to be applied to all inputs. - -bs "" Optional. Batch size 1-8 (default 1) - -r "" Optional. Read reference score file(s) and compare inference results with reference scores. Usage for a single file/layer: or . Example of usage for several files/layers: :=,:=. - -rg "" Read GNA model from file using path/filename provided (required if -m is missing). - -wg "" Optional. Write GNA model to file using path/filename provided. - -we "" Optional. Write GNA embedded model to file using path/filename provided. - -cw_l "" Optional. Number of frames for left context windows (default is 0). Works only with context window networks. If you use the cw_l or cw_r flag, then batch size argument is ignored. - -cw_r "" Optional. Number of frames for right context windows (default is 0). Works only with context window networks. If you use the cw_r or cw_l flag, then batch size argument is ignored. - -layout "" Optional. Prompts how network layouts should be treated by application. For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size. - -pwl_me "" Optional. The maximum percent of error for PWL function.The value must be in <0, 100> range. The default value is 1.0. - -exec_target "" Optional. Specify GNA execution target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_EXEC_TARGET config option description. - -compile_target "" Optional. Specify GNA compile target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_COMPILE_TARGET config option description. - -memory_reuse_off Optional. Disables memory optimizations for compiled model. - - Available target devices: CPU GNA GPU NPU - - -.. _model-preparation-speech: - -Model Preparation -+++++++++++++++++ - -You can use the following model conversion command to convert a Kaldi nnet1 or nnet2 neural model to OpenVINO™ toolkit Intermediate Representation format: - -.. code-block:: sh - - mo --framework kaldi --input_model wsj_dnn5b.nnet --counts wsj_dnn5b.counts --remove_output_softmax --output_dir - -The following pre-trained models are available: - -- rm_cnn4a_smbr -- rm_lstm4f -- wsj_dnn5b_smbr - -All of them can be downloaded from `the storage `__. - -Speech Inference -++++++++++++++++ - -Once the IR is created, you can do inference on Intel® Processors with the GNA co-processor (or emulation library): - -.. code-block:: sh - - speech_sample -m wsj_dnn5b.xml -i dev93_10.ark -r dev93_scores_10.ark -d GNA_AUTO -o result.ark - -Here, the floating point Kaldi-generated reference neural network scores (``dev93_scores_10.ark``) corresponding to the input feature file (``dev93_10.ark``) are assumed to be available for comparison. - -.. note:: - - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API `. - - - The sample supports input and output in numpy file format (.npz) - - - Stating flags that take only single option like `-m` multiple times, for example `./speech_sample -m model.xml -m model2.xml`, results in only the first value being used. - -Sample Output -############# - -The sample application logs each step in a standard output stream. - -.. code-block:: sh - - [ INFO ] OpenVINO runtime: OpenVINO Runtime version ......... 2022.1.0 - [ INFO ] Build ........... 2022.1.0-6311-a90bb1ff017 - [ INFO ] - [ INFO ] Parsing input parameters - [ INFO ] Loading model files: - [ INFO ] \test_data\models\wsj_dnn5b_smbr_fp32\wsj_dnn5b_smbr_fp32.xml - [ INFO ] Using scale factor of 2175.43 calculated from first utterance. - [ INFO ] Model loading time 0.0034 ms - [ INFO ] Loading model to the device GNA_AUTO - [ INFO ] Loading model to the device - [ INFO ] Number scores per frame : 3425 - Utterance 0: - Total time in Infer (HW and SW): 5687.53 ms - Frames in utterance: 1294 frames - Average Infer time per frame: 4.39531 ms - max error: 0.705184 - avg error: 0.0448388 - avg rms error: 0.0574098 - stdev error: 0.0371649 - - - End of Utterance 0 - - [ INFO ] Number scores per frame : 3425 - Utterance 1: - Total time in Infer (HW and SW): 4341.34 ms - Frames in utterance: 1005 frames - Average Infer time per frame: 4.31974 ms - max error: 0.757597 - avg error: 0.0452166 - avg rms error: 0.0578436 - stdev error: 0.0372769 - - - End of Utterance 1 - - ... - End of Utterance X - - [ INFO ] Execution successful - -Use of Sample in Kaldi* Speech Recognition Pipeline -################################################### - -The Wall Street Journal DNN model used in this example was prepared using the Kaldi s5 recipe and the Kaldi Nnet (nnet1) framework. It is possible to recognize speech by substituting the ``speech_sample`` for -Kaldi's nnet-forward command. Since the ``speech_sample`` does not yet use pipes, it is necessary to use temporary files for speaker-transformed feature vectors and scores when running the Kaldi speech recognition pipeline. The following operations assume that feature extraction was already performed according to the ``s5`` recipe and that the working directory within the Kaldi source tree is ``egs/wsj/s5``. - -1. Prepare a speaker-transformed feature set given the feature transform specified in ``final.feature_transform`` and the feature files specified in ``feats.scp``: - - .. code-block:: sh - - nnet-forward --use-gpu=no final.feature_transform "ark,s,cs:copy-feats scp:feats.scp ark:- |" ark:feat.ark - -2. Score the feature set using the ``speech_sample``: - - .. code-block:: sh - - ./speech_sample -d GNA_AUTO -bs 8 -i feat.ark -m wsj_dnn5b.xml -o scores.ark - - OpenVINO™ toolkit Intermediate Representation ``wsj_dnn5b.xml`` file was generated in the previous :ref:`Model Preparation ` section. - -3. Run the Kaldi decoder to produce n-best text hypotheses and select most likely text given the WFST (``HCLG.fst``), vocabulary (``words.txt``), and TID/PID mapping (``final.mdl``): - - .. code-block:: sh - - latgen-faster-mapped --max-active=7000 --max-mem=50000000 --beam=13.0 --lattice-beam=6.0 --acoustic-scale=0.0833 --allow-partial=true --word-symbol-table=words.txt final.mdl HCLG.fst ark:scores.ark ark:-| lattice-scale --inv-acoustic-scale=13 ark:- ark:- | lattice-best-path --word-symbol-table=words.txt ark:- ark,t:- > out.txt & - -4. Run the word error rate tool to check accuracy given the vocabulary (``words.txt``) and reference transcript (``test_filt.txt``): - - .. code-block:: sh - - cat out.txt | utils/int2sym.pl -f 2- words.txt | sed s:\::g | compute-wer --text --mode=present ark:test_filt.txt ark,p:- - - All of mentioned files can be downloaded from `the storage `__ - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_classification.rst deleted file mode 100644 index 2dee0e4093c31d..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_classification.rst +++ /dev/null @@ -1,174 +0,0 @@ -.. {#openvino_inference_engine_samples_hello_classification_README} - -Hello Classification C++ Sample -=============================== - - -.. meta:: - :description: Learn how to do inference of image - classification models using Synchronous Inference Request - (C++) API. - - -This sample demonstrates how to do inference of image classification models using Synchronous Inference Request API. - -Models with only one input and output are supported. - -.. tab-set:: - - .. tab-item:: Requirements - - +-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Options | Values | - +=====================================+=======================================================================================================================================================================================+ - | Validated Models | :doc:`alexnet `, :doc:`googlenet-v1 ` | - +-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | - +-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`C `, :doc:`Python ` | - +-------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - The following C++ API is used in the application: - - +-------------------------------------+----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Feature | API | Description | - +=====================================+================================================================+=========================================================================================================================================================================================+ - | OpenVINO Runtime Version | ``ov::get_openvino_version`` | Get Openvino API version | - +-------------------------------------+----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Basic Infer Flow | ``ov::Core::read_model``, | Common API to do inference: read and compile a model, create an infer request, configure input and output tensors | - | | ``ov::Core::compile_model``, | | - | | ``ov::CompiledModel::create_infer_request``, | | - | | ``ov::InferRequest::set_input_tensor``, | | - | | ``ov::InferRequest::get_output_tensor`` | | - +-------------------------------------+----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Synchronous Infer | ``ov::InferRequest::infer`` | Do synchronous inference | - +-------------------------------------+----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Operations | ``ov::Model::inputs``, | Get inputs and outputs of a model | - | | ``ov::Model::outputs`` | | - +-------------------------------------+----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Tensor Operations | ``ov::Tensor::get_shape`` | Get a tensor shape | - +-------------------------------------+----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Preprocessing | ``ov::preprocess::InputTensorInfo::set_element_type``, | Set image of the original size as input for a model with other input size. Resize and layout conversions are performed automatically by the corresponding plugin just before inference. | - | | ``ov::preprocess::InputTensorInfo::set_layout``, | | - | | ``ov::preprocess::InputTensorInfo::set_spatial_static_shape``, | | - | | ``ov::preprocess::PreProcessSteps::resize``, | | - | | ``ov::preprocess::InputModelInfo::set_layout``, | | - | | ``ov::preprocess::OutputTensorInfo::set_element_type``, | | - | | ``ov::preprocess::PrePostProcessor::build`` | | - +-------------------------------------+----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/hello_classification/main.cpp - :language: cpp - - -How It Works -############ - -At startup, the sample application reads command line parameters, prepares input data, loads a specified model and image to the OpenVINO™ Runtime plugin and performs synchronous inference. Then processes output data and write it to a standard output stream. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -####### - -.. code-block:: console - - hello_classification - -To run the sample, you need to specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available at `the storage `__. - -.. note:: - - - By default, OpenVINO™ Toolkit Samples and Demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using ``mo`` with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - -Example -+++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: console - - python -m pip install openvino-dev[caffe] - -2. Download a pre-trained model using: - - .. code-block:: console - - omz_downloader --name googlenet-v1 - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: console - - omz_converter --name googlenet-v1 - -4. Perform inference of ``car.bmp`` using the ``googlenet-v1`` model on a ``GPU``, for example: - - .. code-block:: console - - hello_classification googlenet-v1.xml car.bmp GPU - -Sample Output -############# - -The application outputs top-10 inference results. - -.. code-block:: console - - [ INFO ] OpenVINO Runtime version ......... - [ INFO ] Build ........... - [ INFO ] - [ INFO ] Loading model files: /models/googlenet-v1.xml - [ INFO ] model name: GoogleNet - [ INFO ] inputs - [ INFO ] input name: data - [ INFO ] input type: f32 - [ INFO ] input shape: {1, 3, 224, 224} - [ INFO ] outputs - [ INFO ] output name: prob - [ INFO ] output type: f32 - [ INFO ] output shape: {1, 1000} - - Top 10 results: - - Image /images/car.bmp - - classid probability - ------- ----------- - 656 0.8139648 - 654 0.0550537 - 468 0.0178375 - 436 0.0165405 - 705 0.0111694 - 817 0.0105820 - 581 0.0086823 - 575 0.0077515 - 734 0.0064468 - 785 0.0043983 - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_nv12_input_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_nv12_input_classification.rst deleted file mode 100644 index 1ba17319e14c30..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_nv12_input_classification.rst +++ /dev/null @@ -1,171 +0,0 @@ -.. {#openvino_inference_engine_samples_hello_nv12_input_classification_README} - -Hello NV12 Input Classification C++ Sample -========================================== - - -.. meta:: - :description: Learn how to do inference of image - classification models with images in NV12 color format using - Synchronous Inference Request (C++) API. - - -This sample demonstrates how to execute an inference of image classification models with images in NV12 color format using Synchronous Inference Request API. - -.. tab-set:: - - .. tab-item:: Requirements - - +-------------------------------------+--------------------------------------------------------------------------------------------------+ - | Options | Values | - +=====================================+==================================================================================================+ - | Validated Models | :doc:`alexnet ` | - +-------------------------------------+--------------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | - +-------------------------------------+--------------------------------------------------------------------------------------------------+ - | Validated images | An uncompressed image in the NV12 color format - \*.yuv | - +-------------------------------------+--------------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +-------------------------------------+--------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`C ` | - +-------------------------------------+--------------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - The following C++ API is used in the application: - - +-------------------------------------+-------------------------------------------------------------+-------------------------------------------+ - | Feature | API | Description | - +=====================================+=============================================================+===========================================+ - | Node Operations | ``ov::Output::get_any_name`` | Get a layer name | - +-------------------------------------+-------------------------------------------------------------+-------------------------------------------+ - | Infer Request Operations | ``ov::InferRequest::set_tensor``, | Operate with tensors | - | | ``ov::InferRequest::get_tensor`` | | - +-------------------------------------+-------------------------------------------------------------+-------------------------------------------+ - | Preprocessing | ``ov::preprocess::InputTensorInfo::set_color_format``, | Change the color format of the input data | - | | ``ov::preprocess::PreProcessSteps::convert_element_type``, | | - | | ``ov::preprocess::PreProcessSteps::convert_color`` | | - +-------------------------------------+-------------------------------------------------------------+-------------------------------------------+ - - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification C++ sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/hello_nv12_input_classification/main.cpp - :language: cpp - -How It Works -############ - -At startup, the sample application reads command line parameters, loads the specified model and an image in the NV12 color format to an OpenVINO™ Runtime plugin. Then, the sample creates an synchronous inference request object. When inference is done, the application outputs data to the standard output stream. You can place labels in .labels file near the model to get pretty output. - -You can see the explicit description of each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -####### - -.. code-block:: console - - hello_nv12_input_classification - -To run the sample, you need to specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available at `the storage `__. - -The sample accepts an uncompressed image in the NV12 color format. To run the sample, you need to convert your BGR/RGB image to NV12. To do this, you can use one of the widely available tools such as FFmpeg\* or GStreamer\*. The following command shows how to convert an ordinary image into an uncompressed NV12 image using FFmpeg: - -.. code-block:: sh - - ffmpeg -i cat.jpg -pix_fmt nv12 car.yuv - - -.. note:: - - - Because the sample reads raw image files, you should provide a correct image size along with the image path. The sample expects the logical size of the image, not the buffer size. For example, for 640x480 BGR/RGB image the corresponding NV12 logical image size is also 640x480, whereas the buffer size is 640x720. - - By default, this sample expects that model input has BGR channels order. If you trained your model to work with RGB order, you need to reconvert your model using ``mo`` with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - -Example -+++++++ - -1. Install openvino-dev python package if you don't have it to use Open Model Zoo Tools: - - .. code-block:: console - - python -m pip install openvino-dev[caffe] - -2. Download a pre-trained model: - - .. code-block:: console - - omz_downloader --name alexnet - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: console - - omz_converter --name alexnet - -4. Perform inference of NV12 image using ``alexnet`` model on a ``CPU``, for example: - - .. code-block:: console - - hello_nv12_input_classification alexnet.xml car.yuv 300x300 CPU - - -Sample Output -############# - -The application outputs top-10 inference results. - -.. code-block:: console - - [ INFO ] OpenVINO Runtime version ......... - [ INFO ] Build ........... - [ INFO ] - [ INFO ] Loading model files: \models\alexnet.xml - [ INFO ] model name: AlexNet - [ INFO ] inputs - [ INFO ] input name: data - [ INFO ] input type: f32 - [ INFO ] input shape: {1, 3, 227, 227} - [ INFO ] outputs - [ INFO ] output name: prob - [ INFO ] output type: f32 - [ INFO ] output shape: {1, 1000} - - Top 10 results: - - Image \images\car.yuv - - classid probability - ------- ----------- - 656 0.6668988 - 654 0.1125269 - 581 0.0679280 - 874 0.0340229 - 436 0.0257744 - 817 0.0169367 - 675 0.0110199 - 511 0.0106134 - 569 0.0083373 - 717 0.0061734 - - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_query_device.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_query_device.rst deleted file mode 100644 index b64c494cf202e9..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_query_device.rst +++ /dev/null @@ -1,126 +0,0 @@ -.. {#openvino_inference_engine_samples_hello_query_device_README} - -Hello Query Device C++ Sample -============================= - - -.. meta:: - :description: Learn how to show metrics and default - configuration values of inference devices using Query - Device (C++) API feature. - - -This sample demonstrates how to execute an query OpenVINO™ Runtime devices, prints their metrics and default configuration values, using :doc:`Properties API `. - -.. tab-set:: - - .. tab-item:: Requirements - - +----------------------------------------+----------------------------------------------------------------------------------------------+ - | Options | Values | - +========================================+==============================================================================================+ - | Supported devices | :doc:`All ` | - +----------------------------------------+----------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`Python ` | - +----------------------------------------+----------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - The following C++ API is used in the application: - - +----------------------------------------+---------------------------------------+-------------------------------------------------------------------+ - | Feature | API | Description | - +========================================+=======================================+===================================================================+ - | Available Devices | ``ov::Core::get_available_devices``, | Get available devices information and configuration for inference | - | | ``ov::Core::get_property`` | | - +----------------------------------------+---------------------------------------+-------------------------------------------------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification C++ sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/hello_query_device/main.cpp - :language: cpp - -How It Works -############ - -The sample queries all available OpenVINO™ Runtime devices, prints their supported metrics and plugin configuration parameters. - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -####### - -To see quired information, run the following: - -.. code-block:: console - - hello_query_device - -Sample Output -############# - -The application prints all available devices with their supported metrics and default values for configuration parameters: - -.. code-block:: console - - [ INFO ] OpenVINO Runtime version ......... - [ INFO ] Build ........... - [ INFO ] - [ INFO ] Available devices: - [ INFO ] CPU - [ INFO ] SUPPORTED_METRICS: - [ INFO ] AVAILABLE_DEVICES : [ ] - [ INFO ] FULL_DEVICE_NAME : Intel(R) Core(TM) i5-8350U CPU @ 1.70GHz - [ INFO ] OPTIMIZATION_CAPABILITIES : [ FP32 FP16 INT8 BIN ] - [ INFO ] RANGE_FOR_ASYNC_INFER_REQUESTS : { 1, 1, 1 } - [ INFO ] RANGE_FOR_STREAMS : { 1, 8 } - [ INFO ] IMPORT_EXPORT_SUPPORT : true - [ INFO ] SUPPORTED_CONFIG_KEYS (default values): - [ INFO ] CACHE_DIR : "" - [ INFO ] CPU_BIND_THREAD : NO - [ INFO ] CPU_THREADS_NUM : 0 - [ INFO ] CPU_THROUGHPUT_STREAMS : 1 - [ INFO ] DUMP_EXEC_GRAPH_AS_DOT : "" - [ INFO ] ENFORCE_BF16 : NO - [ INFO ] EXCLUSIVE_ASYNC_REQUESTS : NO - [ INFO ] PERFORMANCE_HINT : "" - [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS : 0 - [ INFO ] PERF_COUNT : NO - [ INFO ] - [ INFO ] GNA - [ INFO ] SUPPORTED_METRICS: - [ INFO ] AVAILABLE_DEVICES : [ GNA_SW_EXACT ] - [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS : 1 - [ INFO ] FULL_DEVICE_NAME : GNA_SW_EXACT - [ INFO ] GNA_LIBRARY_FULL_VERSION : 3.0.0.1455 - [ INFO ] IMPORT_EXPORT_SUPPORT : true - [ INFO ] SUPPORTED_CONFIG_KEYS (default values): - [ INFO ] EXCLUSIVE_ASYNC_REQUESTS : NO - [ INFO ] GNA_COMPACT_MODE : YES - [ INFO ] GNA_COMPILE_TARGET : "" - [ INFO ] GNA_DEVICE_MODE : GNA_SW_EXACT - [ INFO ] GNA_EXEC_TARGET : "" - [ INFO ] GNA_FIRMWARE_MODEL_IMAGE : "" - [ INFO ] GNA_FIRMWARE_MODEL_IMAGE_GENERATION : "" - [ INFO ] GNA_LIB_N_THREADS : 1 - [ INFO ] GNA_PRECISION : I16 - [ INFO ] GNA_PWL_MAX_ERROR_PERCENT : 1.000000 - [ INFO ] GNA_PWL_UNIFORM_DESIGN : NO - [ INFO ] GNA_SCALE_FACTOR : 1.000000 - [ INFO ] GNA_SCALE_FACTOR_0 : 1.000000 - [ INFO ] LOG_LEVEL : LOG_NONE - [ INFO ] PERF_COUNT : NO - [ INFO ] SINGLE_THREAD : YES - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_reshape_ssd.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_reshape_ssd.rst deleted file mode 100644 index b5d759a7cf0220..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_hello_reshape_ssd.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. {#openvino_inference_engine_samples_hello_reshape_ssd_README} - -Hello Reshape SSD C++ Sample -============================ - - -.. meta:: - :description: Learn how to do inference of object - detection models using shape inference feature and Synchronous - Inference Request (C++) API. - - -This sample demonstrates how to do synchronous inference of object detection models using :doc:`input reshape feature `. -Models with only one input and output are supported. - -.. tab-set:: - - .. tab-item:: Requirements - - +----------------------------------+---------------------------------------------------------------------------------------------+ - | Options | Values | - +==================================+=============================================================================================+ - | Validated Models | :doc:`person-detection-retail-0013 ` | - +----------------------------------+---------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | - +----------------------------------+---------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +----------------------------------+---------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`Python ` | - +----------------------------------+---------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - The following C++ API is used in the application: - - +----------------------------------+-------------------------------------------------------------+------------------------------------------------+ - | Feature | API | Description | - +==================================+=============================================================+================================================+ - | Node operations | ``ov::Node::get_type_info``, | Get a node info | - | | ``ngraph::op::DetectionOutput::get_type_info_static``, | | - | | ``ov::Output::get_any_name``, | | - | | ``ov::Output::get_shape`` | | - +----------------------------------+-------------------------------------------------------------+------------------------------------------------+ - | Model Operations | ``ov::Model::get_ops``, | Get model nodes, reshape input | - | | ``ov::Model::reshape`` | | - +----------------------------------+-------------------------------------------------------------+------------------------------------------------+ - | Tensor Operations | ``ov::Tensor::data`` | Get a tensor data | - +----------------------------------+-------------------------------------------------------------+------------------------------------------------+ - | Preprocessing | ``ov::preprocess::PreProcessSteps::convert_element_type``, | Model input preprocessing | - | | ``ov::preprocess::PreProcessSteps::convert_layout`` | | - +----------------------------------+-------------------------------------------------------------+------------------------------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification C++ sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/hello_reshape_ssd/main.cpp - :language: cpp - - -How It Works -############ - -Upon the start-up the sample application reads command line parameters, loads specified network and image to the Inference -Engine plugin. Then, the sample creates an synchronous inference request object. When inference is done, the application creates output image and output data to the standard output stream. - -You can see the explicit description of each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -####### - -.. code-block:: console - - hello_reshape_ssd - -To run the sample, you need to specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available at `the storage `__. - -.. note:: - - - By default, OpenVINO™ Toolkit Samples and Demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using ``mo`` with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (\*.onnx) that do not require preprocessing. - -Example -+++++++ - -1. Install openvino-dev python package if you don't have it to use Open Model Zoo Tools: - - .. code-block:: console - - python -m pip install openvino-dev - -2. Download a pre-trained model using: - - .. code-block:: console - - omz_downloader --name person-detection-retail-0013 - -3. ``person-detection-retail-0013`` does not need to be converted, because it is already in necessary format, so you can skip this step. If you want to use another model that is not in the IR or ONNX format, you can convert it using the model converter script: - - .. code-block:: console - - omz_converter --name - -4. Perform inference of ``person_detection.bmp`` using ``person-detection-retail-0013`` model on a ``GPU``, for example: - - .. code-block:: console - - hello_reshape_ssd person-detection-retail-0013.xml person_detection.bmp GPU - -Sample Output -############# - -The application renders an image with detected objects enclosed in rectangles. It outputs the list of classes of the detected objects along with the respective confidence values and the coordinates of the rectangles to the standard output stream. - -.. code-block:: console - - [ INFO ] OpenVINO Runtime version ......... - [ INFO ] Build ........... - [ INFO ] - [ INFO ] Loading model files: \models\person-detection-retail-0013.xml - [ INFO ] model name: ResMobNet_v4 (LReLU) with single SSD head - [ INFO ] inputs - [ INFO ] input name: data - [ INFO ] input type: f32 - [ INFO ] input shape: {1, 3, 320, 544} - [ INFO ] outputs - [ INFO ] output name: detection_out - [ INFO ] output type: f32 - [ INFO ] output shape: {1, 1, 200, 7} - Reshape network to the image size = [960x1699] - [ INFO ] model name: ResMobNet_v4 (LReLU) with single SSD head - [ INFO ] inputs - [ INFO ] input name: data - [ INFO ] input type: f32 - [ INFO ] input shape: {1, 3, 960, 1699} - [ INFO ] outputs - [ INFO ] output name: detection_out - [ INFO ] output type: f32 - [ INFO ] output shape: {1, 1, 200, 7} - [0,1] element, prob = 0.716309, (852,187)-(983,520) - The resulting image was saved in the file: hello_reshape_ssd_output.bmp - - This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_image_classification_async.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_image_classification_async.rst deleted file mode 100644 index 46b7ad7cb8d88a..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_image_classification_async.rst +++ /dev/null @@ -1,222 +0,0 @@ -.. {#openvino_inference_engine_samples_classification_sample_async_README} - -Image Classification Async C++ Sample -===================================== - - -.. meta:: - :description: Learn how to do inference of image - classification models using Asynchronous Inference Request - (C++) API. - - -This sample demonstrates how to do inference of image classification models using Asynchronous Inference Request API. - -Models with only one input and output are supported. - -In addition to regular images, the sample also supports single-channel ``ubyte`` images as an input for LeNet model. - -.. tab-set:: - - .. tab-item:: Requirements - - +----------------------------+-------------------------------------------------------------------------------------------------------+ - | Options | Values | - +============================+=======================================================================================================+ - | Validated Models | :doc:`alexnet `, :doc:`googlenet-v1 ` | - +----------------------------+-------------------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | - +----------------------------+-------------------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +----------------------------+-------------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`Python ` | - +----------------------------+-------------------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - The following C++ API is used in the application: - - +--------------------------+-----------------------------------------------------------------------+----------------------------------------------------------------------------------------+ - | Feature | API | Description | - +==========================+=======================================================================+========================================================================================+ - | Asynchronous Infer | ``ov::InferRequest::start_async``, ``ov::InferRequest::set_callback`` | Do asynchronous inference with callback. | - +--------------------------+-----------------------------------------------------------------------+----------------------------------------------------------------------------------------+ - | Model Operations | ``ov::Output::get_shape``, ``ov::set_batch`` | Manage the model, operate with its batch size. Set batch size using input image count. | - +--------------------------+-----------------------------------------------------------------------+----------------------------------------------------------------------------------------+ - | Infer Request Operations | ``ov::InferRequest::get_input_tensor`` | Get an input tensor. | - +--------------------------+-----------------------------------------------------------------------+----------------------------------------------------------------------------------------+ - | Tensor Operations | ``ov::shape_size``, ``ov::Tensor::data`` | Get a tensor shape size and its data. | - +--------------------------+-----------------------------------------------------------------------+----------------------------------------------------------------------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification C++ sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/classification_sample_async/main.cpp - :language: cpp - -How It Works -############ - -At startup, the sample application reads command line parameters and loads the specified model and input images (or a -folder with images) to the OpenVINO™ Runtime plugin. The batch size of the model is set according to the number of read images. The batch mode is an independent attribute on the asynchronous mode. Asynchronous mode works efficiently with any batch size. - -Then, the sample creates an inference request object and assigns completion callback for it. In scope of the completion callback handling the inference request is executed again. - -After that, the application starts inference for the first infer request and waits of 10th inference request execution being completed. The asynchronous mode might increase the throughput of the pictures. - -When inference is done, the application outputs data to the standard output stream. You can place labels in .labels file near the model to get pretty output. - -You can see the explicit description of each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -####### - -Run the application with the ``-h`` option to see the usage instructions: - -.. code-block:: sh - - classification_sample_async -h - -Usage instructions: - -.. code-block:: sh - - [ INFO ] OpenVINO Runtime version ......... - [ INFO ] Build ........... - - classification_sample_async [OPTION] - Options: - - -h Print usage instructions. - -m "" Required. Path to an .xml file with a trained model. - -i "" Required. Path to a folder with images or path to image files: a .ubyte file for LeNet and a .bmp file for other models. - -d "" Optional. Specify the target device to infer on (the list of available devices is shown below). Default value is CPU. Use "-d HETERO:" format to specify the HETERO plugin. Sample will look for a suitable plugin for the device specified. - - Available target devices: - -To run the sample, you need to specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available `here `. - -.. note:: - - - By default, OpenVINO™ Toolkit Samples and Demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using ``mo`` with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - - - Stating flags that take only single option like `-m` multiple times, for example `./classification_sample_async -m model.xml -m model2.xml`, results in only the first value being used. - - - The sample supports NCHW model layout only. - -Example -+++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: sh - - python -m pip install openvino-dev[caffe] - - -2. Download a pre-trained model using: - - .. code-block:: sh - - omz_downloader --name googlenet-v1 - - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: sh - - omz_converter --name googlenet-v1 - -4. Perform inference of ``dog.bmp`` using ``googlenet-v1`` model on a ``GPU``, for example: - - .. code-block:: sh - - classification_sample_async -m googlenet-v1.xml -i dog.bmp -d GPU - -Sample Output -############# - -.. code-block:: sh - - [ INFO ] OpenVINO Runtime version ......... - [ INFO ] Build ........... - [ INFO ] - [ INFO ] Parsing input parameters - [ INFO ] Files were added: 1 - [ INFO ] /images/dog.bmp - [ INFO ] Loading model files: - [ INFO ] /models/googlenet-v1.xml - [ INFO ] model name: GoogleNet - [ INFO ] inputs - [ INFO ] input name: data - [ INFO ] input type: f32 - [ INFO ] input shape: {1, 3, 224, 224} - [ INFO ] outputs - [ INFO ] output name: prob - [ INFO ] output type: f32 - [ INFO ] output shape: {1, 1000} - [ INFO ] Read input images - [ INFO ] Set batch size 1 - [ INFO ] model name: GoogleNet - [ INFO ] inputs - [ INFO ] input name: data - [ INFO ] input type: u8 - [ INFO ] input shape: {1, 224, 224, 3} - [ INFO ] outputs - [ INFO ] output name: prob - [ INFO ] output type: f32 - [ INFO ] output shape: {1, 1000} - [ INFO ] Loading model to the device GPU - [ INFO ] Create infer request - [ INFO ] Start inference (asynchronous executions) - [ INFO ] Completed 1 async request execution - [ INFO ] Completed 2 async request execution - [ INFO ] Completed 3 async request execution - [ INFO ] Completed 4 async request execution - [ INFO ] Completed 5 async request execution - [ INFO ] Completed 6 async request execution - [ INFO ] Completed 7 async request execution - [ INFO ] Completed 8 async request execution - [ INFO ] Completed 9 async request execution - [ INFO ] Completed 10 async request execution - [ INFO ] Completed async requests execution - - Top 10 results: - - Image /images/dog.bmp - - classid probability - ------- ----------- - 156 0.8935547 - 218 0.0608215 - 215 0.0217133 - 219 0.0105667 - 212 0.0018835 - 217 0.0018730 - 152 0.0018730 - 157 0.0015745 - 154 0.0012817 - 220 0.0010099 - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_model_creation.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_model_creation.rst deleted file mode 100644 index a53588da2904cd..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_model_creation.rst +++ /dev/null @@ -1,238 +0,0 @@ -.. {#openvino_inference_engine_samples_model_creation_sample_README} - -Model Creation C++ Sample -========================= - - -.. meta:: - :description: Learn how to create a model on the fly with a - provided weights file and infer it later using Synchronous - Inference Request (C++) API. - - -This sample demonstrates how to execute an synchronous inference using :doc:`model ` built on the fly which uses weights from LeNet classification model, which is known to work well on digit classification tasks. - -You do not need an XML file to create a model. The API of ov::Model allows creating a model on the fly from the source code. - -.. tab-set:: - - .. tab-item:: Requirements - - +---------------------------------------------------------+-------------------------------------------------------------------------------------------------+ - | Options | Values | - +=========================================================+=================================================================================================+ - | Validated Models | LeNet | - +---------------------------------------------------------+-------------------------------------------------------------------------------------------------+ - | Model Format | model weights file (\*.bin) | - +---------------------------------------------------------+-------------------------------------------------------------------------------------------------+ - | Validated images | single-channel ``MNIST ubyte`` images | - +---------------------------------------------------------+-------------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +---------------------------------------------------------+-------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`Python ` | - +---------------------------------------------------------+-------------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - The following C++ API is used in the application: - - +------------------------------------------+-----------------------------------------+---------------------------------------+ - | Feature | API | Description | - +==========================================+=========================================+=======================================+ - | OpenVINO Runtime Info | ``ov::Core::get_versions`` | Get device plugins versions | - +------------------------------------------+-----------------------------------------+---------------------------------------+ - | Shape Operations | ``ov::Output::get_shape``, | Operate with shape | - | | ``ov::Shape::size``, | | - | | ``ov::shape_size`` | | - +------------------------------------------+-----------------------------------------+---------------------------------------+ - | Tensor Operations | ``ov::Tensor::get_byte_size``, | Get tensor byte size and its data | - | | ``ov::Tensor:data`` | | - +------------------------------------------+-----------------------------------------+---------------------------------------+ - | Model Operations | ``ov::set_batch`` | Operate with model batch size | - +------------------------------------------+-----------------------------------------+---------------------------------------+ - | Infer Request Operations | ``ov::InferRequest::get_input_tensor`` | Get a input tensor | - +------------------------------------------+-----------------------------------------+---------------------------------------+ - | Model creation objects | ``ov::opset8::Parameter``, | Used to construct an OpenVINO model | - | | ``ov::Node::output``, | | - | | ``ov::opset8::Constant``, | | - | | ``ov::opset8::Convolution``, | | - | | ``ov::opset8::Add``, | | - | | ``ov::opset1::MaxPool``, | | - | | ``ov::opset8::Reshape``, | | - | | ``ov::opset8::MatMul``, | | - | | ``ov::opset8::Relu``, | | - | | ``ov::opset8::Softmax``, | | - | | ``ov::descriptor::Tensor::set_names``, | | - | | ``ov::opset8::Result``, | | - | | ``ov::Model``, | | - | | ``ov::ParameterVector::vector`` | | - +------------------------------------------+-----------------------------------------+---------------------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification C++ sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/model_creation_sample/main.cpp - :language: cpp - -How It Works -############ - -At startup, the sample application does the following: - -- Reads command line parameters -- :doc:`Build a Model ` and passed weights file -- Loads the model and input data to the OpenVINO™ Runtime plugin -- Performs synchronous inference and processes output data, logging each step in a standard output stream - -You can see the explicit description of each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -######## - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -####### - -.. code-block:: console - - model_creation_sample - -.. note:: - - - you can use LeNet model weights in the sample folder: ``lenet.bin`` with FP32 weights file - - The ``lenet.bin`` with FP32 weights file was generated by :doc:`model conversion API ` from the public LeNet model with the ``input_shape [64,1,28,28]`` parameter specified. - - The original model is available in the `Caffe* repository `__ on GitHub\*. - - -You can do inference of an image using a pre-trained model on a GPU using the following command: - -.. code-block:: console - - model_creation_sample lenet.bin GPU - -Sample Output -############# - -The sample application logs each step in a standard output stream and outputs top-10 inference results. - -.. code-block:: console - - [ INFO ] OpenVINO Runtime version ......... - [ INFO ] Build ........... - [ INFO ] - [ INFO ] Device info: - [ INFO ] GPU - [ INFO ] Intel GPU plugin version ......... - [ INFO ] Build ........... - [ INFO ] - [ INFO ] - [ INFO ] Create model from weights: lenet.bin - [ INFO ] model name: lenet - [ INFO ] inputs - [ INFO ] input name: NONE - [ INFO ] input type: f32 - [ INFO ] input shape: {64, 1, 28, 28} - [ INFO ] outputs - [ INFO ] output name: output_tensor - [ INFO ] output type: f32 - [ INFO ] output shape: {64, 10} - [ INFO ] Batch size is 10 - [ INFO ] model name: lenet - [ INFO ] inputs - [ INFO ] input name: NONE - [ INFO ] input type: u8 - [ INFO ] input shape: {10, 28, 28, 1} - [ INFO ] outputs - [ INFO ] output name: output_tensor - [ INFO ] output type: f32 - [ INFO ] output shape: {10, 10} - [ INFO ] Compiling a model for the GPU device - [ INFO ] Create infer request - [ INFO ] Combine images in batch and set to input tensor - [ INFO ] Start sync inference - [ INFO ] Processing output tensor - - Top 1 results: - - Image 0 - - classid probability label - ------- ----------- ----- - 0 1.0000000 0 - - Image 1 - - classid probability label - ------- ----------- ----- - 1 1.0000000 1 - - Image 2 - - classid probability label - ------- ----------- ----- - 2 1.0000000 2 - - Image 3 - - classid probability label - ------- ----------- ----- - 3 1.0000000 3 - - Image 4 - - classid probability label - ------- ----------- ----- - 4 1.0000000 4 - - Image 5 - - classid probability label - ------- ----------- ----- - 5 1.0000000 5 - - Image 6 - - classid probability label - ------- ----------- ----- - 6 1.0000000 6 - - Image 7 - - classid probability label - ------- ----------- ----- - 7 1.0000000 7 - - Image 8 - - classid probability label - ------- ----------- ----- - 8 1.0000000 8 - - Image 9 - - classid probability label - ------- ----------- ----- - 9 1.0000000 9 - - - -Deprecation Notice -################## - -+--------------------+------------------+ -| Deprecation Begins | June 1, 2020 | -+====================+==================+ -| Removal Date | December 1, 2020 | -+--------------------+------------------+ - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.rst deleted file mode 100644 index 76b1025a19292e..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.rst +++ /dev/null @@ -1,145 +0,0 @@ -.. {#openvino_inference_engine_samples_sync_benchmark_README} - -Sync Benchmark C++ Sample -========================= - - -.. meta:: - :description: Learn how to estimate performance of a model using Synchronous Inference Request (C++) API. - -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike :doc:`demos ` this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. - - -.. tab-set:: - - .. tab-item:: Requirements - - +--------------------------------+------------------------------------------------------------------------------------------------+ - | Options | Values | - +================================+================================================================================================+ - | Validated Models | :doc:`alexnet `, | - | | :doc:`googlenet-v1 `, | - | | :doc:`yolo-v3-tf `, | - | | :doc:`face-detection-0200 ` | - +--------------------------------+------------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation | - | | (\*.xml + \*.bin), ONNX (\*.onnx) | - +--------------------------------+------------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +--------------------------------+------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`Python ` | - +--------------------------------+------------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Feature | API | Description | - +==========================+==============================================+==============================================+ - | OpenVINO Runtime Version | ``ov::get_openvino_version`` | Get Openvino API version. | - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Basic Infer Flow | ``ov::Core``, ``ov::Core::compile_model``, | Common API to do inference: compile a model, | - | | ``ov::CompiledModel::create_infer_request``, | create an infer request, | - | | ``ov::InferRequest::get_tensor`` | configure input tensors. | - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Synchronous Infer | ``ov::InferRequest::infer``, | Do synchronous inference. | - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Model Operations | ``ov::CompiledModel::inputs`` | Get inputs of a model. | - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Tensor Operations | ``ov::Tensor::get_shape``, | Get a tensor shape and its data. | - | | ``ov::Tensor::data`` | | - +--------------------------+----------------------------------------------+----------------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/benchmark/sync_benchmark/main.cpp - :language: cpp - -How It Works -#################### - -The sample compiles a model for a given device, randomly generates input data, performs synchronous inference multiple times for a given number of seconds. Then processes and reports performance results. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -#################### - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -#################### - -.. code-block:: sh - - sync_benchmark (default: CPU) - - -To run the sample, you need to specify a model: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. - -.. note:: - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - -Example -++++++++++++++++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: sh - - python -m pip install openvino-dev[caffe] - - -2. Download a pre-trained model using: - - .. code-block:: sh - - omz_downloader --name googlenet-v1 - - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: sh - - omz_converter --name googlenet-v1 - - -4. Perform benchmarking using the ``googlenet-v1`` model on a ``CPU``: - - .. code-block:: sh - - sync_benchmark googlenet-v1.xml - - -Sample Output -#################### - -The application outputs performance results. - -.. code-block:: sh - - [ INFO ] OpenVINO: - [ INFO ] Build ................................. - [ INFO ] Count: 992 iterations - [ INFO ] Duration: 15009.8 ms - [ INFO ] Latency: - [ INFO ] Median: 14.00 ms - [ INFO ] Average: 15.13 ms - [ INFO ] Min: 9.33 ms - [ INFO ] Max: 53.60 ms - [ INFO ] Throughput: 66.09 FPS - - -See Also -#################### - -* :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -* :doc:`Using OpenVINO Samples ` -* :doc:`Model Downloader ` -* :doc:`Convert a Model ` - diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.rst b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.rst deleted file mode 100644 index 4f56f727310b43..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.rst +++ /dev/null @@ -1,150 +0,0 @@ -.. {#openvino_inference_engine_samples_throughput_benchmark_README} - -Throughput Benchmark C++ Sample -=============================== - - -.. meta:: - :description: Learn how to estimate performance of a model using Asynchronous Inference Request (C++) API in throughput mode. - - -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike :doc:`demos ` this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. - -The reported results may deviate from what :doc:`benchmark_app ` reports. One example is model input precision for computer vision tasks. benchmark_app sets ``uint8``, while the sample uses default model precision which is usually ``float32``. - -.. tab-set:: - - .. tab-item:: Requirements - - +--------------------------------+------------------------------------------------------------------------------------------------+ - | Options | Values | - +================================+================================================================================================+ - | Validated Models | :doc:`alexnet `, | - | | :doc:`googlenet-v1 `, | - | | :doc:`yolo-v3-tf `, | - | | :doc:`face-detection-0200 ` | - +--------------------------------+------------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation | - | | (\*.xml + \*.bin), ONNX (\*.onnx) | - +--------------------------------+------------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +--------------------------------+------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`Python ` | - +--------------------------------+------------------------------------------------------------------------------------------------+ - - .. tab-item:: C++ API - - The following C++ API is used in the application: - - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Feature | API | Description | - +==========================+==============================================+==============================================+ - | OpenVINO Runtime Version | ``ov::get_openvino_version`` | Get Openvino API version. | - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Basic Infer Flow | ``ov::Core``, ``ov::Core::compile_model``, | Common API to do inference: compile a model, | - | | ``ov::CompiledModel::create_infer_request``, | create an infer request, | - | | ``ov::InferRequest::get_tensor`` | configure input tensors. | - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Asynchronous Infer | ``ov::InferRequest::start_async``, | Do asynchronous inference with callback. | - | | ``ov::InferRequest::set_callback`` | | - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Model Operations | ``ov::CompiledModel::inputs`` | Get inputs of a model. | - +--------------------------+----------------------------------------------+----------------------------------------------+ - | Tensor Operations | ``ov::Tensor::get_shape``, | Get a tensor shape and its data. | - | | ``ov::Tensor::data`` | | - +--------------------------+----------------------------------------------+----------------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/cpp/benchmark/throughput_benchmark/main.cpp - :language: cpp - -How It Works -#################### - -The sample compiles a model for a given device, randomly generates input data, performs asynchronous inference multiple times for a given number of seconds. Then processes and reports performance results. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Building -#################### - -To build the sample, please use instructions available at :doc:`Build the Sample Applications ` section in OpenVINO™ Toolkit Samples guide. - -Running -#################### - -.. code-block:: sh - - throughput_benchmark (default: CPU) - - -To run the sample, you need to specify a model: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. - -.. note:: - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - -Example -++++++++++++++++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: sh - - python -m pip install openvino-dev[caffe] - - -2. Download a pre-trained model using: - - .. code-block:: sh - - omz_downloader --name googlenet-v1 - - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: sh - - omz_converter --name googlenet-v1 - - -4. Perform benchmarking using the ``googlenet-v1`` model on a ``CPU``: - - .. code-block:: sh - - throughput_benchmark googlenet-v1.xml - - -Sample Output -#################### - -The application outputs performance results. - -.. code-block:: sh - - [ INFO ] OpenVINO: - [ INFO ] Build ................................. - [ INFO ] Count: 1577 iterations - [ INFO ] Duration: 15024.2 ms - [ INFO ] Latency: - [ INFO ] Median: 38.02 ms - [ INFO ] Average: 38.08 ms - [ INFO ] Min: 25.23 ms - [ INFO ] Max: 49.16 ms - [ INFO ] Throughput: 104.96 FPS - - -See Also -#################### - -* :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -* :doc:`Using OpenVINO Samples ` -* :doc:`Model Downloader ` -* :doc:`Convert a Model ` - diff --git a/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst b/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst index 50674bc6a14963..4d05da628df4fb 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst @@ -1,7 +1,7 @@ .. {#openvino_docs_get_started_get_started_demos} -Get Started with C++ Samples -============================ +Get Started with Samples +======================== .. meta:: @@ -9,7 +9,7 @@ Get Started with C++ Samples toolkit, and how to run inference, using provided code samples. -To use OpenVINO samples, install OpenVINO using one of the following distributions: +To use OpenVINO samples, install OpenVINO using one of the following distributions: * Archive files (recommended) - :doc:`Linux ` | :doc:`Windows ` | :doc:`macOS ` * :doc:`APT ` or :doc:`YUM ` for Linux @@ -32,7 +32,7 @@ Before you build samples, refer to the :doc:`system requirements `. 4. :ref:`Download media files used as input, if necessary `. -Once you perform all the steps, you can :ref:`run inference with the chosen sample application ` to see the results. +Once you perform all the steps, you can :ref:`run inference with the chosen sample application ` to see the results. .. _build-samples: @@ -43,7 +43,7 @@ Select a sample you want to use from the :doc:`OpenVINO Samples `__ to run properly. Make sure to install it for use with vision-oriented samples. + Some samples may also require `OpenCV `__ to run properly. Make sure to install it for use with vision-oriented samples. Instructions below show how to build sample applications with CMake. If you are interested in building them from source, check the `build instructions on GitHub `__ . @@ -57,58 +57,63 @@ Instructions below show how to build sample applications with CMake. If you are .. tab-item:: Python :sync: python - - Python samples do not require building. You can run the code samples in your development environment. - + + Each Python sample directory contains the ``requirements.txt`` file, which you must install before running the sample: + + .. code-block:: sh + + cd /samples/python/ + python3 -m pip install -r ./requirements.txt + .. tab-item:: C and C++ :sync: cpp - + To build the C or C++ sample applications for Linux, go to the ``/samples/c`` or ``/samples/cpp`` directory, respectively, and run the ``build_samples.sh`` script: - + .. code-block:: sh - + build_samples.sh - + Once the build is completed, you can find sample binaries in the following folders: - + * C samples: ``~/openvino_c_samples_build//Release`` * C++ samples: ``~/openvino_cpp_samples_build//Release`` where the is the output of ``uname -m``, for example, ``intel64``, ``armhf``, or ``aarch64``. - + You can also build the sample applications manually: - + .. note:: - + If you have installed the product as a root user, switch to root mode before you continue: ``sudo -i`` . - + 1. Navigate to a directory that you have write access to and create a samples build directory. This example uses a directory named ``build``: - + .. code-block:: sh - + mkdir build - - .. note:: - + + .. note:: + If you ran the Image Classification verification script during the installation, the C++ samples build directory is created in your home directory: ``~/openvino_cpp_samples_build/`` - + 2. Go to the created directory: - + .. code-block:: sh - + cd build - + 3. Run CMake to generate the Make files for release configuration. For example, for C++ samples: - - .. code-block:: sh - - cmake -DCMAKE_BUILD_TYPE=Release /samples/cpp - - + + .. code-block:: sh + + cmake -DCMAKE_BUILD_TYPE=Release /samples/cpp + + 4. Run ``make`` to build the samples: - + .. code-block:: sh - + cmake --build . --parallel - + For the release configuration, the sample application binaries are in ``//Release/``; for the debug configuration — in ``//Debug/``. @@ -119,29 +124,34 @@ Instructions below show how to build sample applications with CMake. If you are .. tab-item:: Python :sync: python - - Python samples do not require building. You can run the code samples in your development environment. - + + Each Python sample directory contains the ``requirements.txt`` file, which you must install before running the sample: + + .. code-block:: sh + + cd \samples\python\ + python -m pip install -r requirements.txt + .. tab-item:: C and C++ :sync: c-cpp .. note:: - + If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14 or higher. - + To build the C or C++ sample applications on Windows, go to the ``\samples\c`` or ``\samples\cpp`` directory, respectively, and run the ``build_samples_msvc.bat`` batch file: - + .. code-block:: sh - + build_samples_msvc.bat - + By default, the script automatically detects the highest Microsoft Visual Studio version installed on the machine and uses it to create and build a solution for a sample code - + Once the build is completed, you can find sample binaries in the following folders: - + * C samples: ``C:\Users\\Documents\Intel\OpenVINO\openvino_c_samples_build\\Release`` * C++ samples: ``C:\Users\\Documents\Intel\OpenVINO\openvino_cpp_samples_build\\Release`` where the is the output of ``echo PROCESSOR_ARCHITECTURE%``, for example, ``intel64`` (AMD64), or ``arm64``. - + You can also build a generated solution manually. For example, if you want to build C++ sample binaries in Debug configuration, run the appropriate version of the Microsoft Visual Studio and open the generated solution file from the ``C:\Users\\Documents\Intel\OpenVINO\openvino_cpp_samples_build\Samples.sln`` directory. .. tab-item:: macOS @@ -151,69 +161,74 @@ Instructions below show how to build sample applications with CMake. If you are .. tab-item:: Python :sync: python - - Python samples do not require building. You can run the code samples in your development environment. - + + Each Python sample directory contains the ``requirements.txt`` file, which you must install before running the sample: + + .. code-block:: sh + + cd /samples/python/ + python3 -m pip install -r ./requirements.txt + .. tab-item:: C and C++ :sync: cpp - .. note:: - + .. note:: + For building samples from the open-source version of OpenVINO toolkit, see the `build instructions on GitHub `__ . To build the C or C++ sample applications for macOS, go to the ``/samples/c`` or ``/samples/cpp`` directory, respectively, and run the ``build_samples.sh`` script: - + .. code-block:: sh - + build_samples.sh - + Once the build is completed, you can find sample binaries in the following folders: - + * C samples: ``~/openvino_c_samples_build//Release`` * C++ samples: ``~/openvino_cpp_samples_build//Release`` - + You can also build the sample applications manually. Before proceeding, make sure you have OpenVINO™ environment set correctly. This can be done manually by: - + .. code-block:: sh - + cd / source setupvars.sh - + .. note:: - + If you have installed the product as a root user, switch to root mode before you continue: ``sudo -i`` - + 1. Navigate to a directory that you have write access to and create a samples build directory. This example uses a directory named ``build``: - + .. code-block:: sh - + mkdir build - - .. note:: - + + .. note:: + If you ran the Image Classification verification script during the installation, the C++ samples build directory was already created in your home directory: ``~/openvino_cpp_samples_build/`` - + 2. Go to the created directory: - + .. code-block:: sh - + cd build - + 3. Run CMake to generate the Make files for release configuration. For example, for C++ samples: - + .. code-block:: sh - + cmake -DCMAKE_BUILD_TYPE=Release /samples/cpp - + 4. Run ``make`` to build the samples: - + .. code-block:: sh - + make - + For the release configuration, the sample application binaries are in ``//Release/``; for the debug configuration — in ``//Debug/``. - + .. _select-sample: @@ -227,7 +242,7 @@ First, select a sample from the :doc:`Sample Overview -m -i -d - + .. tab-item:: Windows :sync: windows - + .. code-block:: bat - + python -m -i -d - + .. tab-item:: macOS :sync: macos - + .. code-block:: sh - + python -m -i -d - + .. tab-item:: C++ :sync: cpp - + .. tab-set:: - + .. tab-item:: Linux :sync: linux - + .. code-block:: sh - + -i -m -d - + .. tab-item:: Windows :sync: windows - + .. code-block:: bat - + -i -m -d - + .. tab-item:: macOS :sync: macos - + .. code-block:: sh - + -i -m -d @@ -373,7 +388,7 @@ The following command shows how to run the Image Classification Code Sample usin .. note:: - * Running inference on Intel® Processor Graphics (GPU) requires :doc:`additional hardware configuration steps `, as described earlier on this page. + * Running inference on Intel® Processor Graphics (GPU) requires :doc:`additional hardware configuration steps `, as described earlier on this page. * Running on GPU is not compatible with macOS. .. tab-set:: @@ -382,52 +397,52 @@ The following command shows how to run the Image Classification Code Sample usin :sync: python .. tab-set:: - + .. tab-item:: Linux :sync: linux - + .. code-block:: sh - + python classification_sample_async.py -m ~/ir/googlenet-v1.xml -i ~/Downloads/dog.bmp -d CPU .. tab-item:: Windows :sync: windows - + .. code-block:: bat - + python classification_sample_async.py -m %USERPROFILE%\Documents\ir\googlenet-v1.xml -i %USERPROFILE%\Downloads\dog.bmp -d CPU .. tab-item:: macOS :sync: macos - + .. code-block:: sh - + python classification_sample_async.py -m ~/ir/googlenet-v1.xml -i ~/Downloads/dog.bmp -d CPU .. tab-item:: C++ :sync: cpp .. tab-set:: - + .. tab-item:: Linux :sync: linux - + .. code-block:: sh - + ./classification_sample_async -i ~/Downloads/dog.bmp -m ~/ir/googlenet-v1.xml -d CPU .. tab-item:: Windows :sync: windows - + .. code-block:: bat - + .\classification_sample_async.exe -i %USERPROFILE%\Downloads\dog.bmp -m %USERPROFILE%\Documents\ir\googlenet-v1.xml -d CPU .. tab-item:: macOS :sync: macos - + .. code-block:: sh - + ./classification_sample_async -i ~/Downloads/dog.bmp -m ~/ir/googlenet-v1.xml -d CPU @@ -458,5 +473,5 @@ When the sample application is complete, you are given the label and confidence Other Samples ================================ -Articles in this section describe all sample applications provided with OpenVINO. They will give you more information on how each of them works, giving you a convenient starting point for your own application. +Articles in this section describe all sample applications provided with OpenVINO. They will give you more information on how each of them works, giving you a convenient starting point for your own application. diff --git a/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst new file mode 100644 index 00000000000000..f081eb016ba67d --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst @@ -0,0 +1,267 @@ +.. {#openvino_sample_hello_classification} + +Hello Classification Sample +=========================== + + +.. meta:: + :description: Learn how to do inference of image classification + models using Synchronous Inference Request API (Python, C++, C). + + +This sample demonstrates how to do inference of image classification models using +Synchronous Inference Request API. Before using the sample, refer to the following requirements: + +- Models with only one input and output are supported. +- The sample accepts any file format supported by ``core.read_model``. +- The sample has been validated with: :doc:`alexnet `, + :doc:`googlenet-v1 ` models. +- To build the sample, use instructions available at :ref:`Build the Sample Applications ` + section in "Get Started with Samples" guide. + +How It Works +#################### + +At startup, the sample application reads command-line parameters, prepares input data, +loads a specified model and image to the OpenVINO™ Runtime plugin, performs synchronous +inference, and processes output data, logging each step in a standard output stream. + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. doxygensnippet:: samples/python/hello_classification/hello_classification.py + :language: python + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/hello_classification/main.cpp + :language: cpp + + .. tab-item:: C + :sync: c + + .. scrollbox:: + + .. doxygensnippet:: samples/c/hello_classification/main.c + :language: c + + +You can see the explicit description of each sample step at +:doc:`Integration Steps ` +section of "Integrate OpenVINO™ Runtime with Your Application" guide. + +Running +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python hello_classification.py + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + hello_classification + + .. tab-item:: C + :sync: c + + .. code-block:: console + + hello_classification_c + +To run the sample, you need to specify a model and an image: + +- You can get a model specific for your inference task from one of model + repositories, such as TensorFlow Zoo, HuggingFace, or TensorFlow Hub. +- You can use images from the media files collection available at + `the storage `__. + +.. note:: + + - By default, OpenVINO™ Toolkit Samples and demos expect input with BGR + channels order. If you trained your model to work with RGB order, you need + to manually rearrange the default channels order in the sample or demo + application or reconvert your model using model conversion API with + ``reverse_input_channels`` argument specified. For more information about + the argument, refer to **When to Reverse Input Channels** section of + :doc:`Embedding Preprocessing Computation `. + - Before running the sample with a trained model, make sure the model is + converted to the intermediate representation (IR) format (\*.xml + \*.bin) + using the :doc:`model conversion API `. + - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. + +Example +++++++++++++++++++++ + +1. Download a pre-trained model. +2. You can convert it by using: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: python + + import openvino as ov + + ov_model = ov.convert_model('./models/alexnet') + # or, when model is a Python model object + ov_model = ov.convert_model(alexnet) + + .. tab-item:: CLI + :sync: cli + + .. code-block:: console + + ovc ./models/alexnet + +3. Perform inference of an image, using a model on a ``GPU``, for example: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python hello_classification.py ./models/alexnet/alexnet.xml ./images/banana.jpg GPU + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + hello_classification ./models/googlenet-v1.xml ./images/car.bmp GPU + + .. tab-item:: C + :sync: c + + .. code-block:: console + + hello_classification_c alexnet.xml ./opt/intel/openvino/samples/scripts/car.png GPU + +Sample Output +############# + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + The sample application logs each step in a standard output stream and + outputs top-10 inference results. + + .. code-block:: console + + [ INFO ] Creating OpenVINO Runtime Core + [ INFO ] Reading the model: /models/alexnet/alexnet.xml + [ INFO ] Loading the model to the plugin + [ INFO ] Starting inference in synchronous mode + [ INFO ] Image path: /images/banana.jpg + [ INFO ] Top 10 results: + [ INFO ] class_id probability + [ INFO ] -------------------- + [ INFO ] 954 0.9703885 + [ INFO ] 666 0.0219518 + [ INFO ] 659 0.0033120 + [ INFO ] 435 0.0008246 + [ INFO ] 809 0.0004433 + [ INFO ] 502 0.0003852 + [ INFO ] 618 0.0002906 + [ INFO ] 910 0.0002848 + [ INFO ] 951 0.0002427 + [ INFO ] 961 0.0002213 + [ INFO ] + [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool + + .. tab-item:: C++ + :sync: cpp + + The application outputs top-10 inference results. + + .. code-block:: console + + [ INFO ] OpenVINO Runtime version ......... + [ INFO ] Build ........... + [ INFO ] + [ INFO ] Loading model files: /models/googlenet-v1.xml + [ INFO ] model name: GoogleNet + [ INFO ] inputs + [ INFO ] input name: data + [ INFO ] input type: f32 + [ INFO ] input shape: {1, 3, 224, 224} + [ INFO ] outputs + [ INFO ] output name: prob + [ INFO ] output type: f32 + [ INFO ] output shape: {1, 1000} + + Top 10 results: + + Image /images/car.bmp + + classid probability + ------- ----------- + 656 0.8139648 + 654 0.0550537 + 468 0.0178375 + 436 0.0165405 + 705 0.0111694 + 817 0.0105820 + 581 0.0086823 + 575 0.0077515 + 734 0.0064468 + 785 0.0043983 + + .. tab-item:: C + :sync: c + + The application outputs top-10 inference results. + + .. code-block:: console + + Top 10 results: + + Image /opt/intel/openvino/samples/scripts/car.png + + classid probability + ------- ----------- + 656 0.666479 + 654 0.112940 + 581 0.068487 + 874 0.033385 + 436 0.026132 + 817 0.016731 + 675 0.010980 + 511 0.010592 + 569 0.008178 + 717 0.006336 + + This sample is an API example, for any performance measurements use the dedicated benchmark_app tool. + + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO Samples ` +- :doc:`Convert a Model ` +- :doc:`C API Reference ` +- `Hello Classification Python Sample on Github `__ +- `Hello Classification C++ Sample on Github `__ +- `Hello Classification C Sample on Github `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/hello_nv12_input_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/hello_nv12_input_classification.rst new file mode 100644 index 00000000000000..c9a53ede7229d2 --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/hello_nv12_input_classification.rst @@ -0,0 +1,218 @@ +.. {#openvino_sample_hello_nv12_input_classification} + +Hello NV12 Input Classification Sample +====================================== + + +.. meta:: + :description: Learn how to do inference of image + classification models with images in NV12 color format using + Synchronous Inference Request (C++) API. + + +This sample demonstrates how to execute an inference of image classification models +with images in NV12 color format using Synchronous Inference Request API. Before +using the sample, refer to the following requirements: + +- The sample accepts any file format supported by ``ov::Core::read_model``. +- The sample has been validated with: :doc:`alexnet ` model and + uncompressed images in the NV12 color format - \*.yuv +- To build the sample, use instructions available at :ref:`Build the Sample Applications ` + section in "Get Started with Samples" guide. + +How It Works +#################### + +At startup, the sample application reads command line parameters, loads the +specified model and an image in the NV12 color format to an OpenVINO™ Runtime +plugin. Then, the sample creates an synchronous inference request object. When +inference is done, the application outputs data to the standard output stream. +You can place labels in ``.labels`` file near the model to get pretty output. + +.. tab-set:: + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/hello_nv12_input_classification/main.cpp + :language: cpp + + .. tab-item:: C + :sync: c + + .. scrollbox:: + + .. doxygensnippet:: samples/c/hello_nv12_input_classification/main.c + :language: c + + +You can see the explicit description of each sample step at +:doc:`Integration Steps ` +section of "Integrate OpenVINO™ Runtime with Your Application" guide. + +Running +#################### + +.. tab-set:: + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + hello_nv12_input_classification + + .. tab-item:: C + :sync: c + + .. code-block:: console + + hello_nv12_input_classification_c + + +To run the sample, you need to specify a model and an image: + +- You can get a model specific for your inference task from one of model + repositories, such as TensorFlow Zoo, HuggingFace, or TensorFlow Hub. +- You can use images from the media files collection available at + `the storage `__. + +The sample accepts an uncompressed image in the NV12 color format. To run the +sample, you need to convert your BGR/RGB image to NV12. To do this, you can use +one of the widely available tools such as FFmpeg or GStreamer. Using FFmpeg and +the following command, you can convert an ordinary image to an uncompressed NV12 image: + +.. code-block:: sh + + ffmpeg -i cat.jpg -pix_fmt nv12 cat.yuv + + +.. note:: + + - Because the sample reads raw image files, you should provide a correct image + size along with the image path. The sample expects the logical size of the + image, not the buffer size. For example, for 640x480 BGR/RGB image the + corresponding NV12 logical image size is also 640x480, whereas the buffer + size is 640x720. + - By default, this sample expects that model input has BGR channels order. If + you trained your model to work with RGB order, you need to reconvert your + model using model conversion API with ``reverse_input_channels`` argument + specified. For more information about the argument, refer to **When to Reverse + Input Channels** section of :doc:`Embedding Preprocessing Computation `. + - Before running the sample with a trained model, make sure the model is + converted to the intermediate representation (IR) format (\*.xml + \*.bin) + using the :doc:`model conversion API `. + - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. + +Example ++++++++ + +1. Download a pre-trained model. +2. You can convert it by using: + + .. code-block:: console + + ovc ./models/alexnet + +3. Perform inference of an NV12 image, using a model on a ``CPU``, for example: + + .. tab-set:: + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + hello_nv12_input_classification ./models/alexnet.xml ./images/cat.yuv 300x300 CPU + + .. tab-item:: C + :sync: c + + + .. code-block:: console + + hello_nv12_input_classification_c ./models/alexnet.xml ./images/cat.yuv 300x300 CPU + + +Sample Output +############# + +.. tab-set:: + + .. tab-item:: C++ + :sync: cpp + + The application outputs top-10 inference results. + + .. code-block:: console + + [ INFO ] OpenVINO Runtime version ......... + [ INFO ] Build ........... + [ INFO ] + [ INFO ] Loading model files: \models\alexnet.xml + [ INFO ] model name: AlexNet + [ INFO ] inputs + [ INFO ] input name: data + [ INFO ] input type: f32 + [ INFO ] input shape: {1, 3, 227, 227} + [ INFO ] outputs + [ INFO ] output name: prob + [ INFO ] output type: f32 + [ INFO ] output shape: {1, 1000} + + Top 10 results: + + Image \images\car.yuv + + classid probability + ------- ----------- + 656 0.6668988 + 654 0.1125269 + 581 0.0679280 + 874 0.0340229 + 436 0.0257744 + 817 0.0169367 + 675 0.0110199 + 511 0.0106134 + 569 0.0083373 + 717 0.0061734 + + .. tab-item:: C + :sync: c + + The application outputs top-10 inference results. + + .. code-block:: console + + Top 10 results: + + Image ./cat.yuv + + classid probability + ------- ----------- + 435 0.091733 + 876 0.081725 + 999 0.069305 + 587 0.043726 + 666 0.038957 + 419 0.032892 + 285 0.030309 + 700 0.029941 + 696 0.021628 + 855 0.020339 + + This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool + + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO Samples ` +- :doc:`Convert a Model ` +- `API Reference `__ +- `Hello NV12 Input Classification C++ Sample on Github `__ +- `Hello NV12 Input Classification C Sample on Github `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/hello_query_device.rst b/docs/articles_en/learn_openvino/openvino_samples/hello_query_device.rst new file mode 100644 index 00000000000000..73294f95a49747 --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/hello_query_device.rst @@ -0,0 +1,191 @@ +.. {#openvino_sample_hello_query_device} + +Hello Query Device Sample +========================= + + +.. meta:: + :description: Learn how to show metrics and default + configuration values of inference devices using Query + Device API feature (Python, C++). + + +This sample demonstrates how to show OpenVINO™ Runtime devices and prints their +metrics and default configuration values using :doc:`Query Device API feature `. +To build the sample, use instructions available at :ref:`Build the Sample Applications ` +section in "Get Started with Samples" guide. + +How It Works +#################### + +The sample queries all available OpenVINO™ Runtime devices and prints their +supported metrics and plugin configuration parameters. + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. doxygensnippet:: samples/python/hello_query_device/hello_query_device.py + :language: python + + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/hello_query_device/main.cpp + :language: cpp + + +Running +#################### + +The sample has no command-line parameters. To see the report, run the following command: + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python hello_query_device.py + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + hello_query_device + + + +Sample Output +#################### + +The application prints all available devices with their supported metrics and +default values for configuration parameters. +For example: + + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + [ INFO ] Available devices: + [ INFO ] CPU : + [ INFO ] SUPPORTED_METRICS: + [ INFO ] AVAILABLE_DEVICES: + [ INFO ] FULL_DEVICE_NAME: Intel(R) Core(TM) i5-8350U CPU @ 1.70GHz + [ INFO ] OPTIMIZATION_CAPABILITIES: FP32, FP16, INT8, BIN + [ INFO ] RANGE_FOR_ASYNC_INFER_REQUESTS: 1, 1, 1 + [ INFO ] RANGE_FOR_STREAMS: 1, 8 + [ INFO ] IMPORT_EXPORT_SUPPORT: True + [ INFO ] + [ INFO ] SUPPORTED_CONFIG_KEYS (default values): + [ INFO ] CACHE_DIR: + [ INFO ] CPU_BIND_THREAD: NO + [ INFO ] CPU_THREADS_NUM: 0 + [ INFO ] CPU_THROUGHPUT_STREAMS: 1 + [ INFO ] DUMP_EXEC_GRAPH_AS_DOT: + [ INFO ] ENFORCE_BF16: NO + [ INFO ] EXCLUSIVE_ASYNC_REQUESTS: NO + [ INFO ] PERFORMANCE_HINT: + [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 + [ INFO ] PERF_COUNT: NO + [ INFO ] + [ INFO ] GNA : + [ INFO ] SUPPORTED_METRICS: + [ INFO ] AVAILABLE_DEVICES: GNA_SW + [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS: 1 + [ INFO ] FULL_DEVICE_NAME: GNA_SW + [ INFO ] GNA_LIBRARY_FULL_VERSION: 3.0.0.1455 + [ INFO ] IMPORT_EXPORT_SUPPORT: True + [ INFO ] + [ INFO ] SUPPORTED_CONFIG_KEYS (default values): + [ INFO ] EXCLUSIVE_ASYNC_REQUESTS: NO + [ INFO ] GNA_COMPACT_MODE: YES + [ INFO ] GNA_COMPILE_TARGET: + [ INFO ] GNA_DEVICE_MODE: GNA_SW_EXACT + [ INFO ] GNA_EXEC_TARGET: + [ INFO ] GNA_FIRMWARE_MODEL_IMAGE: + [ INFO ] GNA_FIRMWARE_MODEL_IMAGE_GENERATION: + [ INFO ] GNA_LIB_N_THREADS: 1 + [ INFO ] GNA_PRECISION: I16 + [ INFO ] GNA_PWL_MAX_ERROR_PERCENT: 1.000000 + [ INFO ] GNA_PWL_UNIFORM_DESIGN: NO + [ INFO ] GNA_SCALE_FACTOR: 1.000000 + [ INFO ] GNA_SCALE_FACTOR_0: 1.000000 + [ INFO ] LOG_LEVEL: LOG_NONE + [ INFO ] PERF_COUNT: NO + [ INFO ] SINGLE_THREAD: YES + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + [ INFO ] OpenVINO Runtime version ......... + [ INFO ] Build ........... + [ INFO ] + [ INFO ] Available devices: + [ INFO ] CPU + [ INFO ] SUPPORTED_METRICS: + [ INFO ] AVAILABLE_DEVICES : [ ] + [ INFO ] FULL_DEVICE_NAME : Intel(R) Core(TM) i5-8350U CPU @ 1.70GHz + [ INFO ] OPTIMIZATION_CAPABILITIES : [ FP32 FP16 INT8 BIN ] + [ INFO ] RANGE_FOR_ASYNC_INFER_REQUESTS : { 1, 1, 1 } + [ INFO ] RANGE_FOR_STREAMS : { 1, 8 } + [ INFO ] IMPORT_EXPORT_SUPPORT : true + [ INFO ] SUPPORTED_CONFIG_KEYS (default values): + [ INFO ] CACHE_DIR : "" + [ INFO ] CPU_BIND_THREAD : NO + [ INFO ] CPU_THREADS_NUM : 0 + [ INFO ] CPU_THROUGHPUT_STREAMS : 1 + [ INFO ] DUMP_EXEC_GRAPH_AS_DOT : "" + [ INFO ] ENFORCE_BF16 : NO + [ INFO ] EXCLUSIVE_ASYNC_REQUESTS : NO + [ INFO ] PERFORMANCE_HINT : "" + [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS : 0 + [ INFO ] PERF_COUNT : NO + [ INFO ] + [ INFO ] GNA + [ INFO ] SUPPORTED_METRICS: + [ INFO ] AVAILABLE_DEVICES : [ GNA_SW_EXACT ] + [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS : 1 + [ INFO ] FULL_DEVICE_NAME : GNA_SW_EXACT + [ INFO ] GNA_LIBRARY_FULL_VERSION : 3.0.0.1455 + [ INFO ] IMPORT_EXPORT_SUPPORT : true + [ INFO ] SUPPORTED_CONFIG_KEYS (default values): + [ INFO ] EXCLUSIVE_ASYNC_REQUESTS : NO + [ INFO ] GNA_COMPACT_MODE : YES + [ INFO ] GNA_COMPILE_TARGET : "" + [ INFO ] GNA_DEVICE_MODE : GNA_SW_EXACT + [ INFO ] GNA_EXEC_TARGET : "" + [ INFO ] GNA_FIRMWARE_MODEL_IMAGE : "" + [ INFO ] GNA_FIRMWARE_MODEL_IMAGE_GENERATION : "" + [ INFO ] GNA_LIB_N_THREADS : 1 + [ INFO ] GNA_PRECISION : I16 + [ INFO ] GNA_PWL_MAX_ERROR_PERCENT : 1.000000 + [ INFO ] GNA_PWL_UNIFORM_DESIGN : NO + [ INFO ] GNA_SCALE_FACTOR : 1.000000 + [ INFO ] GNA_SCALE_FACTOR_0 : 1.000000 + [ INFO ] LOG_LEVEL : LOG_NONE + [ INFO ] PERF_COUNT : NO + [ INFO ] SINGLE_THREAD : YES + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO™ Toolkit Samples ` +- `Hello Query Device Python Sample on Github `__ +- `Hello Query Device C++ Sample on Github `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/hello_reshape_ssd.rst b/docs/articles_en/learn_openvino/openvino_samples/hello_reshape_ssd.rst new file mode 100644 index 00000000000000..0b516c797a6d57 --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/hello_reshape_ssd.rst @@ -0,0 +1,213 @@ +.. {#openvino_sample_hello_reshape_ssd} + +Hello Reshape SSD Sample +======================== + + +.. meta:: + :description: Learn how to do inference of object detection + models using shape inference feature and Synchronous + Inference Request API (Python, C++). + + +This sample demonstrates how to do synchronous inference of object detection models +using :doc:`Shape Inference feature `. Before +using the sample, refer to the following requirements: + +- Models with only one input and output are supported. +- The sample accepts any file format supported by ``core.read_model``. +- The sample has been validated with: :doc:`mobilenet-ssd `, + :doc:`person-detection-retail-0013 ` + models and the NCHW layout format. +- To build the sample, use instructions available at :ref:`Build the Sample Applications ` + section in "Get Started with Samples" guide. + +How It Works +#################### + +At startup, the sample application reads command-line parameters, prepares input data, loads a specified model and image to the OpenVINO™ Runtime plugin, performs synchronous inference, and processes output data. +As a result, the program creates an output image, logging each step in a standard output stream. + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. doxygensnippet:: samples/python/hello_reshape_ssd/hello_reshape_ssd.py + :language: python + + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/hello_reshape_ssd/main.cpp + :language: cpp + + +You can see the explicit description of +each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. + +Running +#################### + + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python hello_reshape_ssd.py + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + hello_reshape_ssd + + +To run the sample, you need to specify a model and an image: + +- You can get a model specific for your inference task from one of model + repositories, such as TensorFlow Zoo, HuggingFace, or TensorFlow Hub. +- You can use images from the media files collection available at + `the storage `__. + +.. note:: + + - By default, OpenVINO™ Toolkit Samples and demos expect input with BGR channels + order. If you trained your model to work with RGB order, you need to manually + rearrange the default channels order in the sample or demo application or + reconvert your model using model conversion API with ``reverse_input_channels`` + argument specified. For more information about the argument, refer to + **When to Reverse Input Channels** section of + :doc:`Embedding Preprocessing Computation `. + - Before running the sample with a trained model, make sure the model is + converted to the intermediate representation (IR) format (\*.xml + \*.bin) + using :doc:`model conversion API `. + - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. + +Example +++++++++++++++++++++ + +1. Download a pre-trained model: +2. You can convert it by using: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: python + + import openvino as ov + + ov_model = ov.convert_model('./test_data/models/mobilenet-ssd') + # or, when model is a Python model object + ov_model = ov.convert_model(mobilenet-ssd) + + .. tab-item:: CLI + :sync: cli + + .. code-block:: console + + ovc ./test_data/models/mobilenet-ssd + +4. Perform inference of an image, using a model on a ``GPU``, for example: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python hello_reshape_ssd.py ./test_data/models/mobilenet-ssd.xml banana.jpg GPU + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + hello_reshape_ssd ./models/person-detection-retail-0013.xml person_detection.bmp GPU + + +Sample Output +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + The sample application logs each step in a standard output stream and + creates an output image, drawing bounding boxes for inference results + with an over 50% confidence. + + .. code-block:: console + + [ INFO ] Creating OpenVINO Runtime Core + [ INFO ] Reading the model: C:/test_data/models/mobilenet-ssd.xml + [ INFO ] Reshaping the model to the height and width of the input image + [ INFO ] Loading the model to the plugin + [ INFO ] Starting inference in synchronous mode + [ INFO ] Found: class_id = 52, confidence = 0.98, coords = (21, 98), (276, 210) + [ INFO ] Image out.bmp was created! + [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool + + + .. tab-item:: C++ + :sync: cpp + + The application renders an image with detected objects enclosed in rectangles. + It outputs the list of classes of the detected objects along with the + respective confidence values and the coordinates of the rectangles to the + standard output stream. + + .. code-block:: console + + [ INFO ] OpenVINO Runtime version ......... + [ INFO ] Build ........... + [ INFO ] + [ INFO ] Loading model files: \models\person-detection-retail-0013.xml + [ INFO ] model name: ResMobNet_v4 (LReLU) with single SSD head + [ INFO ] inputs + [ INFO ] input name: data + [ INFO ] input type: f32 + [ INFO ] input shape: {1, 3, 320, 544} + [ INFO ] outputs + [ INFO ] output name: detection_out + [ INFO ] output type: f32 + [ INFO ] output shape: {1, 1, 200, 7} + Reshape network to the image size = [960x1699] + [ INFO ] model name: ResMobNet_v4 (LReLU) with single SSD head + [ INFO ] inputs + [ INFO ] input name: data + [ INFO ] input type: f32 + [ INFO ] input shape: {1, 3, 960, 1699} + [ INFO ] outputs + [ INFO ] output name: detection_out + [ INFO ] output type: f32 + [ INFO ] output shape: {1, 1, 200, 7} + [0,1] element, prob = 0.716309, (852,187)-(983,520) + The resulting image was saved in the file: hello_reshape_ssd_output.bmp + + This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool + + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO Samples ` +- :doc:`Convert a Model ` +- `Hello Reshape SSD Python Sample on Github `__ +- `Hello Reshape SSD C++ Sample on Github `__ + diff --git a/docs/articles_en/learn_openvino/openvino_samples/image_classification_async.rst b/docs/articles_en/learn_openvino/openvino_samples/image_classification_async.rst new file mode 100644 index 00000000000000..2dad59f0ee2f97 --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/image_classification_async.rst @@ -0,0 +1,334 @@ +.. {#openvino_sample_image_classification_async} + +Image Classification Async Sample +================================= + + +.. meta:: + :description: Learn how to do inference of image classification models + using Asynchronous Inference Request API (Python, C++). + + +This sample demonstrates how to do inference of image classification models +using Asynchronous Inference Request API. Before using the sample, refer to the +following requirements: + +- Models with only one input and output are supported. +- The sample accepts any file format supported by ``core.read_model``. +- The sample has been validated with: :doc:`alexnet `, :doc:`googlenet-v1 ` models. +- To build the sample, use instructions available at :ref:`Build the Sample Applications ` + section in "Get Started with Samples" guide. + + +How It Works +#################### + +At startup, the sample application reads command-line parameters, prepares input data, and +loads a specified model and an image to the OpenVINO™ Runtime plugin. +The batch size of the model is set according to the number of read images. The +batch mode is an independent attribute on the asynchronous mode. +The asynchronous mode works efficiently with any batch size. + +Then, the sample creates an inference request object and assigns completion callback +for it. In scope of the completion callback handling, the inference request is executed again. + +After that, the application starts inference for the first infer request and waits +until 10th inference request execution has been completed. +The asynchronous mode might increase the throughput of the pictures. + +When inference is done, the application outputs data to the standard output stream. +You can place labels in ``.labels`` file near the model to get pretty output. + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. doxygensnippet:: samples/python/classification_sample_async/classification_sample_async.py + :language: python + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/classification_sample_async/main.cpp + :language: cpp + + +You can see the explicit description of each sample step at +:doc:`Integration Steps ` +section of "Integrate OpenVINO™ Runtime with Your Application" guide. + + +Running +#################### + +Run the application with the ``-h`` option to see the usage message: + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python classification_sample_async.py -h + + Usage message: + + .. code-block:: console + + usage: classification_sample_async.py [-h] -m MODEL -i INPUT [INPUT ...] + [-d DEVICE] + + Options: + -h, --help Show this help message and exit. + -m MODEL, --model MODEL + Required. Path to an .xml or .onnx file with a trained + model. + -i INPUT [INPUT ...], --input INPUT [INPUT ...] + Required. Path to an image file(s). + -d DEVICE, --device DEVICE + Optional. Specify the target device to infer on; CPU, + GPU or HETERO: is acceptable. The sample + will look for a suitable plugin for device specified. + Default value is CPU. + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + classification_sample_async -h + + Usage instructions: + + .. code-block:: console + + [ INFO ] OpenVINO Runtime version ......... + [ INFO ] Build ........... + + classification_sample_async [OPTION] + Options: + + -h Print usage instructions. + -m "" Required. Path to an .xml file with a trained model. + -i "" Required. Path to a folder with images or path to image files: a .ubyte file for LeNet and a .bmp file for other models. + -d "" Optional. Specify the target device to infer on (the list of available devices is shown below). Default value is CPU. Use "-d HETERO:" format to specify the HETERO plugin. Sample will look for a suitable plugin for the device specified. + + Available target devices: + + +To run the sample, you need to specify a model and an image: + +- You can get a model specific for your inference task from one of model + repositories, such as TensorFlow Zoo, HuggingFace, or TensorFlow Hub. +- You can use images from the media files collection available at + `the storage `__. + + +.. note:: + + - By default, OpenVINO™ Toolkit Samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using model conversion API with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. + + - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API `. + + - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. + + - The sample supports NCHW model layout only. + + - When you specify single options multiple times, only the last value will be used. For example, the ``-m`` flag: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python classification_sample_async.py -m model.xml -m model2.xml + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + ./classification_sample_async -m model.xml -m model2.xml + + +Example +++++++++++++++++++++ + + +1. Download a pre-trained model: +2. You can convert it by using: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: python + + import openvino as ov + + ov_model = ov.convert_model('./models/alexnet') + # or, when model is a Python model object + ov_model = ov.convert_model(alexnet) + + .. tab-item:: CLI + :sync: cli + + .. code-block:: console + + ovc ./models/alexnet + +4. Perform inference of image files, using a model on a ``GPU``, for example: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python classification_sample_async.py -m ./models/alexnet.xml -i ./test_data/images/banana.jpg ./test_data/images/car.bmp -d GPU + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + classification_sample_async -m ./models/googlenet-v1.xml -i ./images/dog.bmp -d GPU + + +Sample Output +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + The sample application logs each step in a standard output stream and + outputs top-10 inference results. + + .. code-block:: console + + [ INFO ] Creating OpenVINO Runtime Core + [ INFO ] Reading the model: C:/test_data/models/alexnet.xml + [ INFO ] Loading the model to the plugin + [ INFO ] Starting inference in asynchronous mode + [ INFO ] Image path: /test_data/images/banana.jpg + [ INFO ] Top 10 results: + [ INFO ] class_id probability + [ INFO ] -------------------- + [ INFO ] 954 0.9707602 + [ INFO ] 666 0.0216788 + [ INFO ] 659 0.0032558 + [ INFO ] 435 0.0008082 + [ INFO ] 809 0.0004359 + [ INFO ] 502 0.0003860 + [ INFO ] 618 0.0002867 + [ INFO ] 910 0.0002866 + [ INFO ] 951 0.0002410 + [ INFO ] 961 0.0002193 + [ INFO ] + [ INFO ] Image path: /test_data/images/car.bmp + [ INFO ] Top 10 results: + [ INFO ] class_id probability + [ INFO ] -------------------- + [ INFO ] 656 0.5120340 + [ INFO ] 874 0.1142275 + [ INFO ] 654 0.0697167 + [ INFO ] 436 0.0615163 + [ INFO ] 581 0.0552262 + [ INFO ] 705 0.0304179 + [ INFO ] 675 0.0151660 + [ INFO ] 734 0.0151582 + [ INFO ] 627 0.0148493 + [ INFO ] 757 0.0120964 + [ INFO ] + [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool + + .. tab-item:: C++ + :sync: cpp + + The sample application logs each step in a standard output stream and + outputs top-10 inference results. + + .. code-block:: console + + [ INFO ] OpenVINO Runtime version ......... + [ INFO ] Build ........... + [ INFO ] + [ INFO ] Parsing input parameters + [ INFO ] Files were added: 1 + [ INFO ] /images/dog.bmp + [ INFO ] Loading model files: + [ INFO ] /models/googlenet-v1.xml + [ INFO ] model name: GoogleNet + [ INFO ] inputs + [ INFO ] input name: data + [ INFO ] input type: f32 + [ INFO ] input shape: {1, 3, 224, 224} + [ INFO ] outputs + [ INFO ] output name: prob + [ INFO ] output type: f32 + [ INFO ] output shape: {1, 1000} + [ INFO ] Read input images + [ INFO ] Set batch size 1 + [ INFO ] model name: GoogleNet + [ INFO ] inputs + [ INFO ] input name: data + [ INFO ] input type: u8 + [ INFO ] input shape: {1, 224, 224, 3} + [ INFO ] outputs + [ INFO ] output name: prob + [ INFO ] output type: f32 + [ INFO ] output shape: {1, 1000} + [ INFO ] Loading model to the device GPU + [ INFO ] Create infer request + [ INFO ] Start inference (asynchronous executions) + [ INFO ] Completed 1 async request execution + [ INFO ] Completed 2 async request execution + [ INFO ] Completed 3 async request execution + [ INFO ] Completed 4 async request execution + [ INFO ] Completed 5 async request execution + [ INFO ] Completed 6 async request execution + [ INFO ] Completed 7 async request execution + [ INFO ] Completed 8 async request execution + [ INFO ] Completed 9 async request execution + [ INFO ] Completed 10 async request execution + [ INFO ] Completed async requests execution + + Top 10 results: + + Image /images/dog.bmp + + classid probability + ------- ----------- + 156 0.8935547 + 218 0.0608215 + 215 0.0217133 + 219 0.0105667 + 212 0.0018835 + 217 0.0018730 + 152 0.0018730 + 157 0.0015745 + 154 0.0012817 + 220 0.0010099 + + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO™ Toolkit Samples ` +- :doc:`Convert a Model ` +- `Image Classification Async Python Sample on Github `__ +- `Image Classification Async C++ Sample on Github `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/model_creation.rst b/docs/articles_en/learn_openvino/openvino_samples/model_creation.rst new file mode 100644 index 00000000000000..1aaf9a1e3bffb1 --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/model_creation.rst @@ -0,0 +1,299 @@ +.. {#openvino_sample_model_creation} + +Model Creation Sample +===================== + + +.. meta:: + :description: Learn how to create a model on the fly with a + provided weights file and infer it later using Synchronous + Inference Request API (Python, C++). + + +This sample demonstrates how to run inference using a :doc:`model ` +built on the fly that uses weights from the LeNet classification model, which is +known to work well on digit classification tasks. You do not need an XML file, +the model is created from the source code on the fly. Before using the sample, +refer to the following requirements: + +- The sample accepts a model weights file (\*.bin). +- The sample has been validated with a LeNet model. +- To build the sample, use instructions available at :ref:`Build the Sample Applications ` + section in "Get Started with Samples" guide. + + +How It Works +#################### + +At startup, the sample application reads command-line parameters, :doc:`builds a model ` +and passes the weights file. Then, it loads the model and input data to the OpenVINO™ +Runtime plugin. Finally, it performs synchronous inference and processes output +data, logging each step in a standard output stream. + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. doxygensnippet:: samples/python/model_creation_sample/model_creation_sample.py + :language: python + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/model_creation_sample/main.cpp + :language: cpp + + +You can see the explicit description of each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. + +Running +#################### + +To run the sample, you need to specify model weights and a device. + + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python model_creation_sample.py + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + model_creation_sample + + +.. note:: + + - This sample supports models with FP32 weights only. + - The ``lenet.bin`` weights file is generated by + :doc:`model conversion API ` + from the public LeNet model, with the ``input_shape [64,1,28,28]`` parameter specified. + - The original model is available in the + `Caffe repository `__ on GitHub. + +Example +++++++++++++++++++++ + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python model_creation_sample.py lenet.bin GPU + + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + model_creation_sample lenet.bin GPU + + +Sample Output +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + The sample application logs each step in a standard output stream and outputs 10 inference results. + + .. code-block:: console + + [ INFO ] Creating OpenVINO Runtime Core + [ INFO ] Loading the model using ngraph function with weights from lenet.bin + [ INFO ] Loading the model to the plugin + [ INFO ] Starting inference in synchronous mode + [ INFO ] Top 1 results: + [ INFO ] Image 0 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 0 1.0000000 0 + [ INFO ] + [ INFO ] Image 1 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 1 1.0000000 1 + [ INFO ] + [ INFO ] Image 2 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 2 1.0000000 2 + [ INFO ] + [ INFO ] Image 3 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 3 1.0000000 3 + [ INFO ] + [ INFO ] Image 4 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 4 1.0000000 4 + [ INFO ] + [ INFO ] Image 5 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 5 1.0000000 5 + [ INFO ] + [ INFO ] Image 6 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 6 1.0000000 6 + [ INFO ] + [ INFO ] Image 7 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 7 1.0000000 7 + [ INFO ] + [ INFO ] Image 8 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 8 1.0000000 8 + [ INFO ] + [ INFO ] Image 9 + [ INFO ] + [ INFO ] classid probability label + [ INFO ] ------------------------- + [ INFO ] 9 1.0000000 9 + [ INFO ] + [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool + + .. tab-item:: C++ + :sync: cpp + + The sample application logs each step in a standard output stream and outputs top-10 inference results. + + .. code-block:: console + + [ INFO ] OpenVINO Runtime version ......... + [ INFO ] Build ........... + [ INFO ] + [ INFO ] Device info: + [ INFO ] GPU + [ INFO ] Intel GPU plugin version ......... + [ INFO ] Build ........... + [ INFO ] + [ INFO ] + [ INFO ] Create model from weights: lenet.bin + [ INFO ] model name: lenet + [ INFO ] inputs + [ INFO ] input name: NONE + [ INFO ] input type: f32 + [ INFO ] input shape: {64, 1, 28, 28} + [ INFO ] outputs + [ INFO ] output name: output_tensor + [ INFO ] output type: f32 + [ INFO ] output shape: {64, 10} + [ INFO ] Batch size is 10 + [ INFO ] model name: lenet + [ INFO ] inputs + [ INFO ] input name: NONE + [ INFO ] input type: u8 + [ INFO ] input shape: {10, 28, 28, 1} + [ INFO ] outputs + [ INFO ] output name: output_tensor + [ INFO ] output type: f32 + [ INFO ] output shape: {10, 10} + [ INFO ] Compiling a model for the GPU device + [ INFO ] Create infer request + [ INFO ] Combine images in batch and set to input tensor + [ INFO ] Start sync inference + [ INFO ] Processing output tensor + + Top 1 results: + + Image 0 + + classid probability label + ------- ----------- ----- + 0 1.0000000 0 + + Image 1 + + classid probability label + ------- ----------- ----- + 1 1.0000000 1 + + Image 2 + + classid probability label + ------- ----------- ----- + 2 1.0000000 2 + + Image 3 + + classid probability label + ------- ----------- ----- + 3 1.0000000 3 + + Image 4 + + classid probability label + ------- ----------- ----- + 4 1.0000000 4 + + Image 5 + + classid probability label + ------- ----------- ----- + 5 1.0000000 5 + + Image 6 + + classid probability label + ------- ----------- ----- + 6 1.0000000 6 + + Image 7 + + classid probability label + ------- ----------- ----- + 7 1.0000000 7 + + Image 8 + + classid probability label + ------- ----------- ----- + 8 1.0000000 8 + + Image 9 + + classid probability label + ------- ----------- ----- + 9 1.0000000 9 + + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO Samples ` +- :doc:`Convert a Model ` +- `Model Creation Python Sample on Github `__ +- `Model Creation C++ Sample on Github `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_benchmark_tool.rst b/docs/articles_en/learn_openvino/openvino_samples/python_benchmark_tool.rst deleted file mode 100644 index f77ddae387f3d9..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_benchmark_tool.rst +++ /dev/null @@ -1,501 +0,0 @@ -.. {#openvino_inference_engine_tools_benchmark_tool_README} - -Benchmark Python Tool -===================== - - -.. meta:: - :description: Learn how to use the Benchmark Python Tool to - estimate deep learning inference performance on supported - devices. - - -This page demonstrates how to use the Benchmark Python Tool to estimate deep learning inference performance on supported devices. - -.. note:: - - This page describes usage of the Python implementation of the Benchmark Tool. For the C++ implementation, refer to the :doc:`Benchmark C++ Tool ` page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. - -Basic Usage -#################### - -The Python benchmark_app is automatically installed when you install OpenVINO Developer Tools using :doc:`PyPI `. Before running ``benchmark_app``, make sure the ``openvino_env`` virtual environment is activated, and navigate to the directory where your model is located. - -The benchmarking application works with models in the OpenVINO IR (``model.xml`` and ``model.bin``) and ONNX (``model.onnx``) formats. -Make sure to :doc:`convert your models ` if necessary. - -To run benchmarking with default options on a model, use the following command: - -.. code-block:: sh - - benchmark_app -m model.xml - - -By default, the application will load the specified model onto the CPU and perform inferencing on batches of randomly-generated data inputs for 60 seconds. As it loads, it prints information about benchmark parameters. When benchmarking is completed, it reports the minimum, average, and maximum inferencing latency and average the throughput. - -You may be able to improve benchmark results beyond the default configuration by configuring some of the execution parameters for your model. For example, you can use "throughput" or "latency" performance hints to optimize the runtime for higher FPS or reduced inferencing time. Read on to learn more about the configuration options available with benchmark_app. - -Configuration Options -##################### - -The benchmark app provides various options for configuring execution parameters. This section covers key configuration options for easily tuning benchmarking to achieve better performance on your device. A list of all configuration options is given in the :ref:`Advanced Usage ` section. - -Performance hints: latency and throughput -+++++++++++++++++++++++++++++++++++++++++ - -The benchmark app allows users to provide high-level "performance hints" for setting latency-focused or throughput-focused inference modes. This hint causes the runtime to automatically adjust runtime parameters, such as the number of processing streams and inference batch size, to prioritize for reduced latency or high throughput. - -The performance hints do not require any device-specific settings and they are completely portable between devices. Parameters are automatically configured based on whichever device is being used. This allows users to easily port applications between hardware targets without having to re-determine the best runtime parameters for the new device. - -If not specified, throughput is used as the default. To set the hint explicitly, use ``-hint latency`` or ``-hint throughput`` when running benchmark_app: - -.. code-block:: sh - - benchmark_app -m model.xml -hint latency - benchmark_app -m model.xml -hint throughput - - -.. note:: - - It is up to the user to ensure the environment on which the benchmark is running is optimized for maximum performance. Otherwise, different results may occur when using the application in different environment settings (such as power optimization settings, processor overclocking, thermal throttling). - Stating flags that take only single option like `-m` multiple times, for example `benchmark_app -m model.xml -m model2.xml`, results in only the last value being used. - - -Latency --------------------- - -Latency is the amount of time it takes to process a single inference request. In applications where data needs to be inferenced and acted on as quickly as possible (such as autonomous driving), low latency is desirable. For conventional devices, lower latency is achieved by reducing the amount of parallel processing streams so the system can utilize as many resources as possible to quickly calculate each inference request. However, advanced devices like multi-socket CPUs and modern GPUs are capable of running multiple inference requests while delivering the same latency. - -When benchmark_app is run with ``-hint latency``, it determines the optimal number of parallel inference requests for minimizing latency while still maximizing the parallelization capabilities of the hardware. It automatically sets the number of processing streams and inference batch size to achieve the best latency. - -Throughput --------------------- - -Throughput is the amount of data an inferencing pipeline can process at once, and it is usually measured in frames per second (FPS) or inferences per second. In applications where large amounts of data needs to be inferenced simultaneously (such as multi-camera video streams), high throughput is needed. To achieve high throughput, the runtime focuses on fully saturating the device with enough data to process. It utilizes as much memory and as many parallel streams as possible to maximize the amount of data that can be processed simultaneously. - -When benchmark_app is run with ``-hint throughput``, it maximizes the number of parallel inference requests to utilize all the threads available on the device. On GPU, it automatically sets the inference batch size to fill up the GPU memory available. - -For more information on performance hints, see the :doc:`High-level Performance Hints ` page. For more details on optimal runtime configurations and how they are automatically determined using performance hints, see :doc:`Runtime Inference Optimizations `. - - -Device -++++++++++++++++++++ - -To set which device benchmarking runs on, use the ``-d `` argument. This will tell benchmark_app to run benchmarking on that specific device. The benchmark app supports "CPU", "GPU", and GNA devices. In order to use the GPU, the system must have the appropriate drivers installed. If no device is specified, benchmark_app will default to using CPU. - -For example, to run benchmarking on GPU, use: - -.. code-block:: sh - - benchmark_app -m model.xml -d GPU - - -You may also specify "AUTO" as the device, in which case the benchmark_app will automatically select the best device for benchmarking and support it with the CPU at the model loading stage. This may result in increased performance, thus, should be used purposefully. For more information, see the :doc:`Automatic device selection ` page. - -(Note: If the latency or throughput hint is set, it will automatically configure streams and batch sizes for optimal performance based on the specified device.) - -Number of iterations -++++++++++++++++++++ - -By default, the benchmarking app will run for a predefined duration, repeatedly performing inferencing with the model and measuring the resulting inference speed. There are several options for setting the number of inference iterations: - -* Explicitly specify the number of iterations the model runs using the ``-niter `` option -* Set how much time the app runs for using the ``-t `` option -* Set both of them (execution will continue until both conditions are met) -* If neither -niter nor -t are specified, the app will run for a predefined duration that depends on the device - -The more iterations a model runs, the better the statistics will be for determining average latency and throughput. - -Inputs -++++++++++++++++++++ - -The benchmark tool runs benchmarking on user-provided input images in ``.jpg``, ``.bmp``, or ``.png`` format. Use ``-i `` to specify the path to an image, or folder of images. For example, to run benchmarking on an image named ``test1.jpg``, use: - -.. code-block:: sh - - ./benchmark_app -m model.xml -i test1.jpg - - -The tool will repeatedly loop through the provided inputs and run inferencing on them for the specified amount of time or number of iterations. If the ``-i`` flag is not used, the tool will automatically generate random data to fit the input shape of the model. - -Examples -++++++++++++++++++++ - -For more usage examples (and step-by-step instructions on how to set up a model for benchmarking), see the :ref:`Examples of Running the Tool ` section. - -.. _advanced-usage-python-benchmark: - -Advanced Usage -#################### - -.. note:: - - By default, OpenVINO samples, tools and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channel order in the sample or demo application or reconvert your model using Model Conversion API with ``reverse_input_channels`` argument specified. For more information about the argument, refer to When to Reverse Input Channels section of Converting a Model to Intermediate Representation (IR). - - -Per-layer performance and logging -+++++++++++++++++++++++++++++++++ - -The application also collects per-layer Performance Measurement (PM) counters for each executed infer request if you enable statistics dumping by setting the ``-report_type`` parameter to one of the possible values: - -* ``no_counters`` report includes configuration options specified, resulting FPS and latency. -* ``average_counters`` report extends the ``no_counters`` report and additionally includes average PM counters values for each layer from the network. -* ``detailed_counters`` report extends the ``average_counters`` report and additionally includes per-layer PM counters and latency for each executed infer request. - -Depending on the type, the report is stored to ``benchmark_no_counters_report.csv``, ``benchmark_average_counters_report.csv``, or ``benchmark_detailed_counters_report.csv`` file located in the path specified in ``-report_folder``. The application also saves executable graph information serialized to an XML file if you specify a path to it with the ``-exec_graph_path`` parameter. - -.. _all-configuration-options-python-benchmark: - -All configuration options -+++++++++++++++++++++++++ - -Running the application with the ``-h`` or ``--help`` option yields the following usage message: - -.. scrollbox:: - - .. code-block:: sh - - [Step 1/11] Parsing and validating input arguments - [ INFO ] Parsing input parameters - usage: benchmark_app.py [-h [HELP]] [-i PATHS_TO_INPUT [PATHS_TO_INPUT ...]] -m PATH_TO_MODEL [-d TARGET_DEVICE] - [-hint {throughput,cumulative_throughput,latency,none}] [-niter NUMBER_ITERATIONS] [-t TIME] [-b BATCH_SIZE] [-shape SHAPE] - [-data_shape DATA_SHAPE] [-layout LAYOUT] [-extensions EXTENSIONS] [-c PATH_TO_CLDNN_CONFIG] [-cdir CACHE_DIR] [-lfile [LOAD_FROM_FILE]] - [-api {sync,async}] [-nireq NUMBER_INFER_REQUESTS] [-nstreams NUMBER_STREAMS] [-inference_only [INFERENCE_ONLY]] - [-infer_precision INFER_PRECISION] [-ip {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64}] - [-op {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64}] [-iop INPUT_OUTPUT_PRECISION] [--mean_values [R,G,B]] [--scale_values [R,G,B]] - [-nthreads NUMBER_THREADS] [-pin {YES,NO,NUMA,HYBRID_AWARE}] [-latency_percentile LATENCY_PERCENTILE] - [-report_type {no_counters,average_counters,detailed_counters}] [-report_folder REPORT_FOLDER] [-pc [PERF_COUNTS]] - [-pcsort {no_sort,sort,simple_sort}] [-pcseq [PCSEQ]] [-exec_graph_path EXEC_GRAPH_PATH] [-dump_config DUMP_CONFIG] [-load_config LOAD_CONFIG] - - Options: - -h [HELP], --help [HELP] - Show this help message and exit. - - -i PATHS_TO_INPUT [PATHS_TO_INPUT ...], --paths_to_input PATHS_TO_INPUT [PATHS_TO_INPUT ...] - Optional. Path to a folder with images and/or binaries or to specific image or binary file.It is also allowed to map files to model inputs: - input_1:file_1/dir1,file_2/dir2,input_4:file_4/dir4 input_2:file_3/dir3 Currently supported data types: bin, npy. If OPENCV is enabled, this - functionalityis extended with the following data types: bmp, dib, jpeg, jpg, jpe, jp2, png, pbm, pgm, ppm, sr, ras, tiff, tif. - - -m PATH_TO_MODEL, --path_to_model PATH_TO_MODEL - Required. Path to an .xml/.onnx file with a trained model or to a .blob file with a trained compiled model. - - -d TARGET_DEVICE, --target_device TARGET_DEVICE - Optional. Specify a target device to infer on (the list of available devices is shown below). Default value is CPU. Use '-d HETERO:' format to specify HETERO plugin. Use '-d MULTI:' format to specify MULTI plugin. The - application looks for a suitable plugin for the specified device. - - -hint {throughput,cumulative_throughput,latency,none}, --perf_hint {throughput,cumulative_throughput,latency,none} - Optional. Performance hint (latency or throughput or cumulative_throughput or none). Performance hint allows the OpenVINO device to select the - right model-specific settings. 'throughput': device performance mode will be set to THROUGHPUT. 'cumulative_throughput': device performance - mode will be set to CUMULATIVE_THROUGHPUT. 'latency': device performance mode will be set to LATENCY. 'none': no device performance mode will - be set. Using explicit 'nstreams' or other device-specific options, please set hint to 'none' - - -niter NUMBER_ITERATIONS, --number_iterations NUMBER_ITERATIONS - Optional. Number of iterations. If not specified, the number of iterations is calculated depending on a device. - - -t TIME, --time TIME Optional. Time in seconds to execute topology. - - -api {sync,async}, --api_type {sync,async} - Optional. Enable using sync/async API. Default value is async. - - - Input shapes: - -b BATCH_SIZE, --batch_size BATCH_SIZE - Optional. Batch size value. If not specified, the batch size value is determined from Intermediate Representation - - -shape SHAPE Optional. Set shape for input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size. This parameter - affect model Parameter shape, can be dynamic. For dynamic dimesions use symbol `?`, `-1` or range `low.. up`. - - -data_shape DATA_SHAPE - Optional. Optional if model shapes are all static (original ones or set by -shape).Required if at least one input shape is dynamic and input - images are not provided.Set shape for input tensors. For example, "input1[1,3,224,224][1,3,448,448],input2[1,4][1,8]" or - "[1,3,224,224][1,3,448,448] in case of one input size. - - -layout LAYOUT Optional. Prompts how model layouts should be treated by application. For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input - size. - - - Advanced options: - -extensions EXTENSIONS, --extensions EXTENSIONS - Optional. Path or a comma-separated list of paths to libraries (.so or .dll) with extensions. - - -c PATH_TO_CLDNN_CONFIG, --path_to_cldnn_config PATH_TO_CLDNN_CONFIG - Optional. Required for GPU custom kernels. Absolute path to an .xml file with the kernels description. - - -cdir CACHE_DIR, --cache_dir CACHE_DIR - Optional. Enable model caching to specified directory - - -lfile [LOAD_FROM_FILE], --load_from_file [LOAD_FROM_FILE] - Optional. Loads model from file directly without read_model. - - -nireq NUMBER_INFER_REQUESTS, --number_infer_requests NUMBER_INFER_REQUESTS - Optional. Number of infer requests. Default value is determined automatically for device. - - -nstreams NUMBER_STREAMS, --number_streams NUMBER_STREAMS - Optional. Number of streams to use for inference on the CPU/GPU (for HETERO and MULTI device cases use format - :,: or just ). Default value is determined automatically for a device. Please note that - although the automatic selection usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very - small models. Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency estimations the number of streams - should be set to 1. See samples README for more details. - - -inference_only [INFERENCE_ONLY], --inference_only [INFERENCE_ONLY] - Optional. If true inputs filling only once before measurements (default for static models), else inputs filling is included into loop - measurement (default for dynamic models) - - -infer_precision INFER_PRECISION - Optional. Specifies the inference precision. Example #1: '-infer_precision bf16'. Example #2: '-infer_precision CPU:bf16,GPU:f32' - - -exec_graph_path EXEC_GRAPH_PATH, --exec_graph_path EXEC_GRAPH_PATH - Optional. Path to a file where to store executable graph information serialized. - - - Preprocessing options: - -ip {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64}, --input_precision {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64} - Optional. Specifies precision for all input layers of the model. - - -op {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64}, --output_precision {bool,f16,f32,f64,i8,i16,i32,i64,u8,u16,u32,u64} - Optional. Specifies precision for all output layers of the model. - - -iop INPUT_OUTPUT_PRECISION, --input_output_precision INPUT_OUTPUT_PRECISION - Optional. Specifies precision for input and output layers by name. Example: -iop "input:f16, output:f16". Notice that quotes are required. - Overwrites precision from ip and op options for specified layers. - - --mean_values [R,G,B] - Optional. Mean values to be used for the input image per channel. Values to be provided in the [R,G,B] format. Can be defined for desired input - of the model, for example: "--mean_values data[255,255,255],info[255,255,255]". The exact meaning and order of channels depend on how the - original model was trained. Applying the values affects performance and may cause type conversion - - --scale_values [R,G,B] - Optional. Scale values to be used for the input image per channel. Values are provided in the [R,G,B] format. Can be defined for desired input - of the model, for example: "--scale_values data[255,255,255],info[255,255,255]". The exact meaning and order of channels depend on how the - original model was trained. If both --mean_values and --scale_values are specified, the mean is subtracted first and then scale is applied - regardless of the order of options in command line. Applying the values affects performance and may cause type conversion - - - Device-specific performance options: - -nthreads NUMBER_THREADS, --number_threads NUMBER_THREADS - Number of threads to use for inference on the CPU, GNA (including HETERO and MULTI cases). - - -pin {YES,NO,NUMA,HYBRID_AWARE}, --infer_threads_pinning {YES,NO,NUMA,HYBRID_AWARE} - Optional. Enable threads->cores ('YES' which is OpenVINO runtime's default for conventional CPUs), threads->(NUMA)nodes ('NUMA'), - threads->appropriate core types ('HYBRID_AWARE', which is OpenVINO runtime's default for Hybrid CPUs) or completely disable ('NO') CPU threads - pinning for CPU-involved inference. - - - Statistics dumping options: - -latency_percentile LATENCY_PERCENTILE, --latency_percentile LATENCY_PERCENTILE - Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median). - - -report_type {no_counters,average_counters,detailed_counters}, --report_type {no_counters,average_counters,detailed_counters} - Optional. Enable collecting statistics report. "no_counters" report contains configuration options specified, resulting FPS and latency. - "average_counters" report extends "no_counters" report and additionally includes average PM counters values for each layer from the model. - "detailed_counters" report extends "average_counters" report and additionally includes per-layer PM counters and latency for each executed - infer request. - - -report_folder REPORT_FOLDER, --report_folder REPORT_FOLDER - Optional. Path to a folder where statistics report is stored. - - -json_stats [JSON_STATS], --json_stats [JSON_STATS] - Optional. Enables JSON-based statistics output (by default reporting system will use CSV format). Should be used together with -report_folder option. - - -pc [PERF_COUNTS], --perf_counts [PERF_COUNTS] - Optional. Report performance counters. - - -pcsort {no_sort,sort,simple_sort}, --perf_counts_sort {no_sort,sort,simple_sort} - Optional. Report performance counters and analysis the sort hotpoint opts. sort: Analysis opts time cost, print by hotpoint order no_sort: - Analysis opts time cost, print by normal order simple_sort: Analysis opts time cost, only print EXECUTED opts by normal order - - -pcseq [PCSEQ], --pcseq [PCSEQ] - Optional. Report latencies for each shape in -data_shape sequence. - - -dump_config DUMP_CONFIG - Optional. Path to JSON file to dump OpenVINO parameters, which were set by application. - - -load_config LOAD_CONFIG - Optional. Path to JSON file to load custom OpenVINO parameters. - Please note, command line parameters have higher priority then parameters from configuration file. - Example 1: a simple JSON file for HW device with primary properties. - { - "CPU": {"NUM_STREAMS": "3", "PERF_COUNT": "NO"} - } - Example 2: a simple JSON file for meta device(AUTO/MULTI) with HW device properties. - { - "AUTO": { - "PERFORMANCE_HINT": "THROUGHPUT", - "PERF_COUNT": "NO", - "DEVICE_PROPERTIES": "{CPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:3},GPU:{INFERENCE_PRECISION_HINT:f32,NUM_STREAMS:5}}" - } - } - - -Running the application with the empty list of options yields the usage message given above and an error message. - -More information on inputs -++++++++++++++++++++++++++ - -The benchmark tool supports topologies with one or more inputs. If a topology is not data sensitive, you can skip the input parameter, and the inputs will be filled with random values. If a model has only image input(s), provide a folder with images or a path to an image as input. If a model has some specific input(s) (besides images), please prepare a binary file(s) or numpy array(s) that is filled with data of appropriate precision and provide a path to it as input. If a model has mixed input types, the input folder should contain all required files. Image inputs are filled with image files one by one. Binary inputs are filled with binary inputs one by one. - -.. _examples-of-running-the-tool-python: - -Examples of Running the Tool -############################ - -This section provides step-by-step instructions on how to run the Benchmark Tool with the ``asl-recognition`` Intel model on CPU or GPU devices. It uses random data as the input. - -.. note:: - - Internet access is required to execute the following steps successfully. If you have access to the Internet through a proxy server only, please make sure that it is configured in your OS environment. - -1. Install OpenVINO Development Tools (if it hasn't been installed already): - - .. code-block:: sh - - pip install openvino-dev - - -2. Download the model using ``omz_downloader``, specifying the model name and directory to download the model to: - - .. code-block:: sh - - omz_downloader --name asl-recognition-0004 --precisions FP16 --output_dir omz_models - - -3. Run the tool, specifying the location of the model .xml file, the device to perform inference on, and with a performance hint. The following commands demonstrate examples of how to run the Benchmark Tool in latency mode on CPU and throughput mode on GPU devices: - - * On CPU (latency mode): - - .. code-block:: sh - - benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency - - - * On GPU (throughput mode): - - .. code-block:: sh - - benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d GPU -hint throughput - - -The application outputs the number of executed iterations, total duration of execution, latency, and throughput. -Additionally, if you set the ``-report_type`` parameter, the application outputs a statistics report. If you set the ``-pc`` parameter, the application outputs performance counters. If you set ``-exec_graph_path``, the application reports executable graph information serialized. All measurements including per-layer PM counters are reported in milliseconds. - -An example of the information output when running benchmark_app on CPU in latency mode is shown below: - -.. code-block:: sh - - benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency - - -.. code-block:: sh - - [Step 1/11] Parsing and validating input arguments - [ INFO ] Parsing input parameters - [ INFO ] Input command: /home/openvino/tools/benchmark_tool/benchmark_app.py -m omz_models/intel/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -hint latency - [Step 2/11] Loading OpenVINO Runtime - [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2022.3.0-7750-c1109a7317e-feature/py_cpp_align - [ INFO ] - [ INFO ] Device info: - [ INFO ] CPU - [ INFO ] Build ................................. 2022.3.0-7750-c1109a7317e-feature/py_cpp_align - [ INFO ] - [ INFO ] - [Step 3/11] Setting device configuration - [Step 4/11] Reading model files - [ INFO ] Loading model files - [ INFO ] Read model took 147.82 ms - [ INFO ] Original model I/O parameters: - [ INFO ] Model inputs: - [ INFO ] input (node: input) : f32 / [N,C,D,H,W] / {1,3,16,224,224} - [ INFO ] Model outputs: - [ INFO ] output (node: output) : f32 / [...] / {1,100} - [Step 5/11] Resizing model to match image sizes and given batch - [ INFO ] Model batch size: 1 - [Step 6/11] Configuring input of the model - [ INFO ] Model inputs: - [ INFO ] input (node: input) : f32 / [N,C,D,H,W] / {1,3,16,224,224} - [ INFO ] Model outputs: - [ INFO ] output (node: output) : f32 / [...] / {1,100} - [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 974.64 ms - [Step 8/11] Querying optimal runtime parameters - [ INFO ] Model: - [ INFO ] NETWORK_NAME: torch-jit-export - [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS: 2 - [ INFO ] NUM_STREAMS: 2 - [ INFO ] AFFINITY: Affinity.CORE - [ INFO ] INFERENCE_NUM_THREADS: 0 - [ INFO ] PERF_COUNT: False - [ INFO ] INFERENCE_PRECISION_HINT: - [ INFO ] PERFORMANCE_HINT: PerformanceMode.LATENCY - [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 - [Step 9/11] Creating infer requests and preparing input tensors - [ WARNING ] No input files were given for input 'input'!. This input will be filled with random values! - [ INFO ] Fill input 'input' with random values - [Step 10/11] Measuring performance (Start inference asynchronously, 2 inference requests, limits: 60000 ms duration) - [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 38.41 ms - [Step 11/11] Dumping statistics report - [ INFO ] Count: 5380 iterations - [ INFO ] Duration: 60036.78 ms - [ INFO ] Latency: - [ INFO ] Median: 22.04 ms - [ INFO ] Average: 22.09 ms - [ INFO ] Min: 20.78 ms - [ INFO ] Max: 33.51 ms - [ INFO ] Throughput: 89.61 FPS - - -The Benchmark Tool can also be used with dynamically shaped networks to measure expected inference time for various input data shapes. See the ``-shape`` and ``-data_shape`` argument descriptions in the :ref:`All configuration options ` section to learn more about using dynamic shapes. Here is a command example for using benchmark_app with dynamic networks and a portion of the resulting output: - -.. code-block:: sh - - benchmark_app -m omz_models/intel/asl-recognition-0004/FP16/asl-recognition-0004.xml -d CPU -shape [-1,3,16,224,224] -data_shape [1,3,16,224,224][2,3,16,224,224][4,3,16,224,224] -pcseq - - -.. code-block:: sh - - [Step 9/11] Creating infer requests and preparing input tensors - [ WARNING ] No input files were given for input 'input'!. This input will be filled with random values! - [ INFO ] Fill input 'input' with random values - [ INFO ] Defined 3 tensor groups: - [ INFO ] input: {1, 3, 16, 224, 224} - [ INFO ] input: {2, 3, 16, 224, 224} - [ INFO ] input: {4, 3, 16, 224, 224} - [Step 10/11] Measuring performance (Start inference asynchronously, 11 inference requests, limits: 60000 ms duration) - [ INFO ] Benchmarking in full mode (inputs filling are included in measurement loop). - [ INFO ] First inference took 201.15 ms - [Step 11/11] Dumping statistics report - [ INFO ] Count: 2811 iterations - [ INFO ] Duration: 60271.71 ms - [ INFO ] Latency: - [ INFO ] Median: 207.70 ms - [ INFO ] Average: 234.56 ms - [ INFO ] Min: 85.73 ms - [ INFO ] Max: 773.55 ms - [ INFO ] Latency for each data shape group: - [ INFO ] 1. input: {1, 3, 16, 224, 224} - [ INFO ] Median: 118.08 ms - [ INFO ] Average: 115.05 ms - [ INFO ] Min: 85.73 ms - [ INFO ] Max: 339.25 ms - [ INFO ] 2. input: {2, 3, 16, 224, 224} - [ INFO ] Median: 207.25 ms - [ INFO ] Average: 205.16 ms - [ INFO ] Min: 166.98 ms - [ INFO ] Max: 545.55 ms - [ INFO ] 3. input: {4, 3, 16, 224, 224} - [ INFO ] Median: 384.16 ms - [ INFO ] Average: 383.48 ms - [ INFO ] Min: 305.51 ms - [ INFO ] Max: 773.55 ms - [ INFO ] Throughput: 108.82 FPS - - -See Also -#################### - -* :doc:`Using OpenVINO Samples ` -* :doc:`Convert a Model ` -* :doc:`Model Downloader ` - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.rst deleted file mode 100644 index 546a95af34dc56..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.rst +++ /dev/null @@ -1,401 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_speech_sample_README} - -Automatic Speech Recognition Python Sample -========================================== - - - -.. meta:: - :description: Learn how to infer an acoustic model based on Kaldi - neural networks and speech feature vectors using Asynchronous - Inference Request (Python) API. - - -.. note:: - - This sample is now deprecated and will be removed with OpenVINO 2024.0. - The sample was mainly designed to demonstrate the features of the GNA plugin - and the use of models produced by the Kaldi framework. OpenVINO support for - these components is now deprecated and will be discontinued, making the sample - redundant. - - -This sample demonstrates how to do a Synchronous Inference of acoustic model based on Kaldi\* neural models and speech feature vectors. - -The sample works with Kaldi ARK or Numpy* uncompressed NPZ files, so it does not cover an end-to-end speech recognition scenario (speech to text), requiring additional preprocessing (feature extraction) to get a feature vector from a speech signal, as well as postprocessing (decoding) to produce text from scores. - -.. tab-set:: - - .. tab-item:: Requirements - - +----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Options | Values | - +======================================================================+======================================================================================================================================================================+ - | Validated Models | Acoustic model based on Kaldi* neural models (see :ref:`Model Preparation ` section) | - +----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (.xml + .bin) | - +----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Supported devices | See :ref:`Execution Modes ` section below and :doc:`List Supported Devices ` | - +----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ ` | - +----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - - .. tab-item:: Python API - - Automatic Speech Recognition Python sample application demonstrates how to use the following Python API in applications: - - +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Feature | API | Description | - +===================================================================+================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+=======================================================================+ - | Import/Export Model | `openvino.runtime.Core.import_model `__ , `openvino.runtime.CompiledModel.export_model `__ | The GNA plugin supports loading and saving of the GNA-optimized model | - +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Model Operations | `openvino.runtime.Model.add_outputs `__ , `openvino.runtime.set_batch `__ , `openvino.runtime.CompiledModel.inputs `__ , `openvino.runtime.CompiledModel.outputs `__ , `openvino.runtime.ConstOutput.any_name `__ | Managing of model: configure batch_size, input and output tensors | - +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Synchronous Infer | `openvino.runtime.CompiledModel.create_infer_request `__ , `openvino.runtime.InferRequest.infer `__ | Do synchronous inference | - +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | InferRequest Operations | `openvino.runtime.InferRequest.get_input_tensor `__ , `openvino.runtime.InferRequest.model_outputs `__ , `openvino.runtime.InferRequest.model_inputs `__ , | Get info about model using infer request API | - +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | InferRequest Operations | `openvino.runtime.InferRequest.query_state `__ , `openvino.runtime.VariableState.reset `__ | Gets and resets CompiledModel state control | - +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Profiling | `openvino.runtime.InferRequest.profiling_info `__ , `openvino.runtime.ProfilingInfo.real_time `__ | Get infer request profiling info | - +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/speech_sample/speech_sample.py - :language: python - -How It Works -############ - -At startup, the sample application reads command-line parameters, loads a specified model and input data to the OpenVINO™ Runtime plugin, performs synchronous inference on all speech utterances stored in the input file, logging each step in a standard output stream. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - - -GNA-specific details -#################### - -Quantization -++++++++++++ - -If the GNA device is selected (for example, using the ``-d`` GNA flag), the GNA OpenVINO™ Runtime plugin quantizes the model and input feature vector sequence to integer representation before performing inference. - -Several neural model quantization modes: - -- *static* - The first utterance in the input file is scanned for dynamic range. The scale factor (floating point scalar multiplier) required to scale the maximum input value of the first utterance to 16384 (15 bits) is used for all subsequent inputs. The neural model is quantized to accommodate the scaled input dynamic range. -- *user-defined* - The user may specify a scale factor via the ``-sf`` flag that will be used for static quantization. - -The ``-qb`` flag provides a hint to the GNA plugin regarding the preferred target weight resolution for all layers. -For example, when ``-qb 8`` is specified, the plugin will use 8-bit weights wherever possible in the -model. - -.. note:: - - It is not always possible to use 8-bit weights due to GNA hardware limitations. For example, convolutional layers always use 16-bit weights (GNA hardware version 1 and 2). This limitation will be removed in GNA hardware version 3 and higher. - -.. _execution-modes-speech-python: - -Execution Modes -+++++++++++++++ - -Several execution modes are supported via the ``-d`` flag: - -- ``CPU`` - All calculations are performed on CPU device using CPU Plugin. -- ``GPU`` - All calculations are performed on GPU device using GPU Plugin. -- ``NPU`` - All calculations are performed on NPU device using NPU Plugin. -- ``GNA_AUTO`` - GNA hardware is used if available and the driver is installed. Otherwise, the GNA device is emulated in fast-but-not-bit-exact mode. -- ``GNA_HW`` - GNA hardware is used if available and the driver is installed. Otherwise, an error will occur. -- ``GNA_SW`` - Deprecated. The GNA device is emulated in fast-but-not-bit-exact mode. -- ``GNA_SW_FP32`` - Substitutes parameters and calculations from low precision to floating point (FP32). -- ``GNA_SW_EXACT`` - GNA device is emulated in bit-exact mode. - -Loading and Saving Models -+++++++++++++++++++++++++ - -The GNA plugin supports loading and saving of the GNA-optimized model (non-IR) via the ``-rg`` and ``-wg`` flags. -Thereby, it is possible to avoid the cost of full model quantization at run time. -The GNA plugin also supports export of firmware-compatible embedded model images for the Intel® Speech Enabling Developer Kit and Amazon Alexa* Premium Far-Field Voice Development Kit via the ``-we`` flag (save only). - -In addition to performing inference directly from a GNA model file, these options make it possible to: - -- Convert from IR format to GNA format model file (``-m``, ``-wg``) -- Convert from IR format to embedded format model file (``-m``, ``-we``) -- Convert from GNA format to embedded format model file (``-rg``, ``-we``) - -Running -####### - -Run the application with the ``-h`` option to see the usage message: - -.. code-block:: sh - - python speech_sample.py -h - -Usage message: - -.. code-block:: console - - usage: speech_sample.py [-h] (-m MODEL | -rg IMPORT_GNA_MODEL) -i INPUT [-o OUTPUT] [-r REFERENCE] [-d DEVICE] [-bs [1-8]] - [-layout LAYOUT] [-qb [8, 16]] [-sf SCALE_FACTOR] [-wg EXPORT_GNA_MODEL] - [-we EXPORT_EMBEDDED_GNA_MODEL] [-we_gen [GNA1, GNA3]] - [--exec_target [GNA_TARGET_2_0, GNA_TARGET_3_0]] [-pc] [-a [CORE, ATOM]] [-iname INPUT_LAYERS] - [-oname OUTPUT_LAYERS] [-cw_l CONTEXT_WINDOW_LEFT] [-cw_r CONTEXT_WINDOW_RIGHT] [-pwl_me PWL_ME] - - optional arguments: - -m MODEL, --model MODEL - Path to an .xml file with a trained model (required if -rg is missing). - -rg IMPORT_GNA_MODEL, --import_gna_model IMPORT_GNA_MODEL - Read GNA model from file using path/filename provided (required if -m is missing). - - Options: - -h, --help Show this help message and exit. - -i INPUT, --input INPUT - Required. Path(s) to input file(s). - Usage for a single file/layer: or . - Example of usage for several files/layers: :=,:=. - -o OUTPUT, --output OUTPUT - Optional. Output file name(s) to save scores (inference results). - Usage for a single file/layer: or . - Example of usage for several files/layers: :=,:=. - -r REFERENCE, --reference REFERENCE - Read reference score file(s) and compare inference results with reference scores. - Usage for a single file/layer: or . - Example of usage for several files/layers: :=,:=. - -d DEVICE, --device DEVICE - Optional. Specify a target device to infer on. CPU, GPU, NPU, GNA_AUTO, GNA_HW, GNA_SW_FP32, - GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g. - HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified. - Default value is CPU. - -bs [1-8], --batch_size [1-8] - Optional. Batch size 1-8. - -layout LAYOUT Optional. Custom layout in format: "input0[value0],input1[value1]" or "[value]" (applied to all - inputs) - -qb [8, 16], --quantization_bits [8, 16] - Optional. Weight resolution in bits for GNA quantization: 8 or 16 (default 16). - -sf SCALE_FACTOR, --scale_factor SCALE_FACTOR - Optional. User-specified input scale factor for GNA quantization. - If the model contains multiple inputs, provide scale factors by separating them with commas. - For example: :,: or just to be applied to all inputs. - -wg EXPORT_GNA_MODEL, --export_gna_model EXPORT_GNA_MODEL - Optional. Write GNA model to file using path/filename provided. - -we EXPORT_EMBEDDED_GNA_MODEL, --export_embedded_gna_model EXPORT_EMBEDDED_GNA_MODEL - Optional. Write GNA embedded model to file using path/filename provided. - -we_gen [GNA1, GNA3], --embedded_gna_configuration [GNA1, GNA3] - Optional. GNA generation configuration string for embedded export. Can be GNA1 (default) or GNA3. - --exec_target [GNA_TARGET_2_0, GNA_TARGET_3_0] - Optional. Specify GNA execution target generation. By default, generation corresponds to the GNA HW - available in the system or the latest fully supported generation by the software. See the GNA - Plugin's GNA_EXEC_TARGET config option description. - -pc, --performance_counter - Optional. Enables performance report (specify -a to ensure arch accurate results). - -a [CORE, ATOM], --arch [CORE, ATOM] - Optional. Specify architecture. CORE, ATOM with the combination of -pc. - -cw_l CONTEXT_WINDOW_LEFT, --context_window_left CONTEXT_WINDOW_LEFT - Optional. Number of frames for left context windows (default is 0). Works only with context window - models. If you use the cw_l or cw_r flag, then batch size argument is ignored. - -cw_r CONTEXT_WINDOW_RIGHT, --context_window_right CONTEXT_WINDOW_RIGHT - Optional. Number of frames for right context windows (default is 0). Works only with context window - models. If you use the cw_l or cw_r flag, then batch size argument is ignored. - -pwl_me PWL_ME Optional. The maximum percent of error for PWL function. The value must be in <0, 100> range. The - default value is 1.0. - - -.. _model-preparation-speech-python: - -Model Preparation -################# - -You can use the following model conversion command to convert a Kaldi nnet1 or nnet2 neural model to OpenVINO™ toolkit Intermediate Representation format: - -.. code-block:: sh - - mo --framework kaldi --input_model wsj_dnn5b.nnet --counts wsj_dnn5b.counts --remove_output_softmax --output_dir - -The following pre-trained models are available: - -- rm_cnn4a_smbr -- rm_lstm4f -- wsj_dnn5b_smbr - -All of them can be downloaded from `the storage `. - -Speech Inference -################ - -You can do inference on Intel® Processors with the GNA co-processor (or emulation library): - -.. code-block:: sh - - python speech_sample.py -m wsj_dnn5b.xml -i dev93_10.ark -r dev93_scores_10.ark -d GNA_AUTO -o result.npz - - -.. note:: - - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API `. - - The sample supports input and output in numpy file format (.npz) - - - Stating flags that take only single option like `-m` multiple times, for example `python classification_sample_async.py -m model.xml -m model2.xml`, results in only the last value being used. - -Sample Output -############# - -The sample application logs each step in a standard output stream. - -.. code-block:: sh - - [ INFO ] Creating OpenVINO Runtime Core - [ INFO ] Reading the model: /models/wsj_dnn5b_smbr_fp32.xml - [ INFO ] Using scale factor(s) calculated from first utterance - [ INFO ] For input 0 using scale factor of 2175.4322418 - [ INFO ] Loading the model to the plugin - [ INFO ] Starting inference in synchronous mode - [ INFO ] - [ INFO ] Utterance 0: - [ INFO ] Total time in Infer (HW and SW): 6326.06ms - [ INFO ] Frames in utterance: 1294 - [ INFO ] Average Infer time per frame: 4.89ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.7051840 - [ INFO ] avg error: 0.0448388 - [ INFO ] avg rms error: 0.0582387 - [ INFO ] stdev error: 0.0371650 - [ INFO ] - [ INFO ] Utterance 1: - [ INFO ] Total time in Infer (HW and SW): 4526.57ms - [ INFO ] Frames in utterance: 1005 - [ INFO ] Average Infer time per frame: 4.50ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.7575974 - [ INFO ] avg error: 0.0452166 - [ INFO ] avg rms error: 0.0586013 - [ INFO ] stdev error: 0.0372769 - [ INFO ] - [ INFO ] Utterance 2: - [ INFO ] Total time in Infer (HW and SW): 6636.56ms - [ INFO ] Frames in utterance: 1471 - [ INFO ] Average Infer time per frame: 4.51ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.7191710 - [ INFO ] avg error: 0.0472226 - [ INFO ] avg rms error: 0.0612991 - [ INFO ] stdev error: 0.0390846 - [ INFO ] - [ INFO ] Utterance 3: - [ INFO ] Total time in Infer (HW and SW): 3927.01ms - [ INFO ] Frames in utterance: 845 - [ INFO ] Average Infer time per frame: 4.65ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.7436461 - [ INFO ] avg error: 0.0477581 - [ INFO ] avg rms error: 0.0621334 - [ INFO ] stdev error: 0.0397457 - [ INFO ] - [ INFO ] Utterance 4: - [ INFO ] Total time in Infer (HW and SW): 3891.49ms - [ INFO ] Frames in utterance: 855 - [ INFO ] Average Infer time per frame: 4.55ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.7071600 - [ INFO ] avg error: 0.0449147 - [ INFO ] avg rms error: 0.0585048 - [ INFO ] stdev error: 0.0374897 - [ INFO ] - [ INFO ] Utterance 5: - [ INFO ] Total time in Infer (HW and SW): 3378.61ms - [ INFO ] Frames in utterance: 699 - [ INFO ] Average Infer time per frame: 4.83ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.8870468 - [ INFO ] avg error: 0.0479243 - [ INFO ] avg rms error: 0.0625490 - [ INFO ] stdev error: 0.0401951 - [ INFO ] - [ INFO ] Utterance 6: - [ INFO ] Total time in Infer (HW and SW): 4034.31ms - [ INFO ] Frames in utterance: 790 - [ INFO ] Average Infer time per frame: 5.11ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.7648273 - [ INFO ] avg error: 0.0482702 - [ INFO ] avg rms error: 0.0629734 - [ INFO ] stdev error: 0.0404429 - [ INFO ] - [ INFO ] Utterance 7: - [ INFO ] Total time in Infer (HW and SW): 2854.04ms - [ INFO ] Frames in utterance: 622 - [ INFO ] Average Infer time per frame: 4.59ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.7389560 - [ INFO ] avg error: 0.0465543 - [ INFO ] avg rms error: 0.0604941 - [ INFO ] stdev error: 0.0386294 - [ INFO ] - [ INFO ] Utterance 8: - [ INFO ] Total time in Infer (HW and SW): 2493.28ms - [ INFO ] Frames in utterance: 548 - [ INFO ] Average Infer time per frame: 4.55ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.6680136 - [ INFO ] avg error: 0.0439341 - [ INFO ] avg rms error: 0.0574614 - [ INFO ] stdev error: 0.0370353 - [ INFO ] - [ INFO ] Utterance 9: - [ INFO ] Total time in Infer (HW and SW): 1654.67ms - [ INFO ] Frames in utterance: 368 - [ INFO ] Average Infer time per frame: 4.50ms - [ INFO ] - [ INFO ] Output blob name: affinetransform14 - [ INFO ] Number scores per frame: 3425 - [ INFO ] - [ INFO ] max error: 0.6550579 - [ INFO ] avg error: 0.0467643 - [ INFO ] avg rms error: 0.0605045 - [ INFO ] stdev error: 0.0383914 - [ INFO ] - [ INFO ] Total sample time: 39722.60ms - [ INFO ] File result.npz was created! - [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool - - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_bert_benchmark.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_bert_benchmark.rst deleted file mode 100644 index 09e8d0518b5294..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_bert_benchmark.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_bert_benchmark_README} - -Bert Benchmark Python Sample -============================ - - -.. meta:: - :description: Learn how to estimate performance of a Bert model using Asynchronous Inference Request (Python) API. - - -This sample demonstrates how to estimate performance of a Bert model using Asynchronous Inference Request API. Unlike :doc:`demos ` this sample doesn't have configurable command line arguments. Feel free to modify sample's source code to try out different options. - -The following Python API is used in the application: - -.. tab-set:: - - .. tab-item:: Python API - - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Feature | API | Description | - +================================+=================================================+==============================================+ - | OpenVINO Runtime Version | [openvino.runtime.get_version] | Get Openvino API version. | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Basic Infer Flow | [openvino.runtime.Core], | Common API to do inference: compile a model. | - | | [openvino.runtime.Core.compile_model] | | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Asynchronous Infer | [openvino.runtime.AsyncInferQueue], | Do asynchronous inference. | - | | [openvino.runtime.AsyncInferQueue.start_async], | | - | | [openvino.runtime.AsyncInferQueue.wait_all] | | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Model Operations | [openvino.runtime.CompiledModel.inputs] | Get inputs of a model. | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/benchmark/bert_benchmark/bert_benchmark.py - :language: python - -How It Works -#################### - -The sample downloads a model and a tokenizer, export the model to onnx, reads the exported model and reshapes it to enforce dynamic input shapes, compiles the resulting model, downloads a dataset and runs benchmarking on the dataset. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Running -#################### - -Install the ``openvino`` Python package: - -.. code-block:: sh - - python -m pip install openvino - - -Install packages from ``requirements.txt``: - -.. code-block:: sh - - python -m pip install -r requirements.txt - - -Run the sample - -.. code-block:: sh - - python bert_benchmark.py - - -Sample Output -#################### - -The sample outputs how long it takes to process a dataset. - -See Also -#################### - -* :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -* :doc:`Using OpenVINO Samples ` -* :doc:`Model Downloader ` -* :doc:`Convert a Model ` - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.rst deleted file mode 100644 index 91b894f9b43286..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.rst +++ /dev/null @@ -1,151 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_hello_classification_README} - -Hello Classification Python Sample -================================== - - -.. meta:: - :description: Learn how to do inference of image classification - models using Synchronous Inference Request (Python) API. - - -This sample demonstrates how to do inference of image classification models using Synchronous Inference Request API. - -Models with only 1 input and output are supported. - -.. tab-set:: - - .. tab-item:: Requirements - - +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Options | Values | - +===================================+===================================================================================================================================================================+ - | Validated Models | :doc:`alexnet `, :doc:`googlenet-v1 ` | - +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (.xml + .bin), ONNX (.onnx) | - +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ `, :doc:`C ` | - +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - .. tab-item:: Python API - - The following Python API is used in the application: - - +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Feature | API | Description | - +=============================+===========================================================================================================================================================================================================================================+============================================================================================================================================================================================+ - | Basic Infer Flow | `openvino.runtime.Core `__ , | | - | | `openvino.runtime.Core.read_model `__ , | | - | | `openvino.runtime.Core.compile_model `__ | Common API to do inference | - +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Synchronous Infer | `openvino.runtime.CompiledModel.infer_new_request `__ | Do synchronous inference | - +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Operations | `openvino.runtime.Model.inputs `__ , | Managing of model | - | | `openvino.runtime.Model.outputs `__ | | - +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Preprocessing | `openvino.preprocess.PrePostProcessor `__ , | Set image of the original size as input for a model with other input size. Resize and layout conversions will be performed automatically by the corresponding plugin just before inference | - | | `openvino.preprocess.InputTensorInfo.set_element_type `__ , | | - | | `openvino.preprocess.InputTensorInfo.set_layout `__ , | | - | | `openvino.preprocess.InputTensorInfo.set_spatial_static_shape `__ , | | - | | `openvino.preprocess.PreProcessSteps.resize `__ , | | - | | `openvino.preprocess.InputModelInfo.set_layout `__ , | | - | | `openvino.preprocess.OutputTensorInfo.set_element_type `__ , | | - | | `openvino.preprocess.PrePostProcessor.build `__ | | - +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/hello_classification/hello_classification.py - :language: python - -How It Works -############ - -At startup, the sample application reads command-line parameters, prepares input data, loads a specified model and image to the OpenVINO™ Runtime plugin, performs synchronous inference, and processes output data, logging each step in a standard output stream. - -You can see the explicit description of each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Running -####### - -.. code-block:: console - - python hello_classification.py - -To run the sample, you need to specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available at `the storage `__. - -.. note:: - - - By default, OpenVINO™ Toolkit Samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using model conversion API with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - -Example -+++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: console - - python -m pip install openvino-dev[caffe] - -2. Download a pre-trained model: - - .. code-block:: console - - omz_downloader --name alexnet - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: console - - omz_converter --name alexnet - -4. Perform inference of ``banana.jpg`` using the ``alexnet`` model on a ``GPU``, for example: - - .. code-block:: console - - python hello_classification.py alexnet.xml banana.jpg GPU - -Sample Output -############# - -The sample application logs each step in a standard output stream and outputs top-10 inference results. - -.. code-block:: console - - [ INFO ] Creating OpenVINO Runtime Core - [ INFO ] Reading the model: /models/alexnet/alexnet.xml - [ INFO ] Loading the model to the plugin - [ INFO ] Starting inference in synchronous mode - [ INFO ] Image path: /images/banana.jpg - [ INFO ] Top 10 results: - [ INFO ] class_id probability - [ INFO ] -------------------- - [ INFO ] 954 0.9703885 - [ INFO ] 666 0.0219518 - [ INFO ] 659 0.0033120 - [ INFO ] 435 0.0008246 - [ INFO ] 809 0.0004433 - [ INFO ] 502 0.0003852 - [ INFO ] 618 0.0002906 - [ INFO ] 910 0.0002848 - [ INFO ] 951 0.0002427 - [ INFO ] 961 0.0002213 - [ INFO ] - [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.rst deleted file mode 100644 index 1adfd6bb24af48..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.rst +++ /dev/null @@ -1,121 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_hello_query_device_README} - -Hello Query Device Python Sample -================================ - - -.. meta:: - :description: Learn how to show metrics and default - configuration values of inference devices using Query - Device (Python) API feature. - - -This sample demonstrates how to show OpenVINO™ Runtime devices and prints their metrics and default configuration values using :doc:`Query Device API feature `. - -.. tab-set:: - - .. tab-item:: Requirements - - +-------------------------------------------------------+--------------------------------------------------------------------------+ - | Options | Values | - +=======================================================+==========================================================================+ - | Supported devices | :doc:`All ` | - +-------------------------------------------------------+--------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ ` | - +-------------------------------------------------------+--------------------------------------------------------------------------+ - - .. tab-item:: Python API - - The following Python API is used in the application: - - +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ - | Feature | API | Description | - +=======================================+============================================================================================================================================================================================+========================================+ - | Basic | `openvino.runtime.Core `__ | Common API | - +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ - | Query Device | `openvino.runtime.Core.available_devices `__ , | Get device properties | - | | `openvino.runtime.Core.get_metric `__ , | | - | | `openvino.runtime.Core.get_config `__ | | - +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/hello_query_device/hello_query_device.py - :language: python - -How It Works -############ - -The sample queries all available OpenVINO™ Runtime devices and prints their supported metrics and plugin configuration parameters. - -Running -####### - -The sample has no command-line parameters. To see the report, run the following command: - -.. code-block:: console - - python hello_query_device.py - -Sample Output -############# - -The application prints all available devices with their supported metrics and default values for configuration parameters. -For example: - -.. code-block:: console - - [ INFO ] Available devices: - [ INFO ] CPU : - [ INFO ] SUPPORTED_METRICS: - [ INFO ] AVAILABLE_DEVICES: - [ INFO ] FULL_DEVICE_NAME: Intel(R) Core(TM) i5-8350U CPU @ 1.70GHz - [ INFO ] OPTIMIZATION_CAPABILITIES: FP32, FP16, INT8, BIN - [ INFO ] RANGE_FOR_ASYNC_INFER_REQUESTS: 1, 1, 1 - [ INFO ] RANGE_FOR_STREAMS: 1, 8 - [ INFO ] IMPORT_EXPORT_SUPPORT: True - [ INFO ] - [ INFO ] SUPPORTED_CONFIG_KEYS (default values): - [ INFO ] CACHE_DIR: - [ INFO ] CPU_BIND_THREAD: NO - [ INFO ] CPU_THREADS_NUM: 0 - [ INFO ] CPU_THROUGHPUT_STREAMS: 1 - [ INFO ] DUMP_EXEC_GRAPH_AS_DOT: - [ INFO ] ENFORCE_BF16: NO - [ INFO ] EXCLUSIVE_ASYNC_REQUESTS: NO - [ INFO ] PERFORMANCE_HINT: - [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 - [ INFO ] PERF_COUNT: NO - [ INFO ] - [ INFO ] GNA : - [ INFO ] SUPPORTED_METRICS: - [ INFO ] AVAILABLE_DEVICES: GNA_SW - [ INFO ] OPTIMAL_NUMBER_OF_INFER_REQUESTS: 1 - [ INFO ] FULL_DEVICE_NAME: GNA_SW - [ INFO ] GNA_LIBRARY_FULL_VERSION: 3.0.0.1455 - [ INFO ] IMPORT_EXPORT_SUPPORT: True - [ INFO ] - [ INFO ] SUPPORTED_CONFIG_KEYS (default values): - [ INFO ] EXCLUSIVE_ASYNC_REQUESTS: NO - [ INFO ] GNA_COMPACT_MODE: YES - [ INFO ] GNA_COMPILE_TARGET: - [ INFO ] GNA_DEVICE_MODE: GNA_SW_EXACT - [ INFO ] GNA_EXEC_TARGET: - [ INFO ] GNA_FIRMWARE_MODEL_IMAGE: - [ INFO ] GNA_FIRMWARE_MODEL_IMAGE_GENERATION: - [ INFO ] GNA_LIB_N_THREADS: 1 - [ INFO ] GNA_PRECISION: I16 - [ INFO ] GNA_PWL_MAX_ERROR_PERCENT: 1.000000 - [ INFO ] GNA_PWL_UNIFORM_DESIGN: NO - [ INFO ] GNA_SCALE_FACTOR: 1.000000 - [ INFO ] GNA_SCALE_FACTOR_0: 1.000000 - [ INFO ] LOG_LEVEL: LOG_NONE - [ INFO ] PERF_COUNT: NO - [ INFO ] SINGLE_THREAD: YES - -See Also -######## - -- :doc:`Using OpenVINO™ Toolkit Samples ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.rst deleted file mode 100644 index e17233f675cea4..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.rst +++ /dev/null @@ -1,134 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_hello_reshape_ssd_README} - -Hello Reshape SSD Python Sample -=============================== - - -.. meta:: - :description: Learn how to do inference of object detection - models using shape inference feature and Synchronous - Inference Request (Python) API. - - -This sample demonstrates how to do synchronous inference of object detection models using :doc:`Shape Inference feature `. - -Models with only 1 input and output are supported. - -.. tab-set:: - - .. tab-item:: Requirements - - +------------------------------------+---------------------------------------------------------------------------+ - | Options | Values | - +====================================+===========================================================================+ - | Validated Models | :doc:`mobilenet-ssd ` | - +------------------------------------+---------------------------------------------------------------------------+ - | Validated Layout | NCHW | - +------------------------------------+---------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (.xml + .bin), ONNX (.onnx) | - +------------------------------------+---------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +------------------------------------+---------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ ` | - +------------------------------------+---------------------------------------------------------------------------+ - - .. tab-item:: Python API - - The following Python API is used in the application: - - +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ - | Feature | API | Description | - +====================================+================================================================================================================================================================================+======================================+ - | Model Operations | `openvino.runtime.Model.reshape `__ , | Managing of model | - | | `openvino.runtime.Model.input `__ , | | - | | `openvino.runtime.Output.get_any_name `__ , | | - | | `openvino.runtime.PartialShape `__ | | - +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/hello_reshape_ssd/hello_reshape_ssd.py - :language: python - - -How It Works -############ - -At startup, the sample application reads command-line parameters, prepares input data, loads a specified model and image to the OpenVINO™ Runtime plugin, performs synchronous inference, and processes output data. -As a result, the program creates an output image, logging each step in a standard output stream. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Running -####### - -.. code-block:: console - - python hello_reshape_ssd.py - -To run the sample, you need to specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available at `the storage `. - -.. note:: - - - By default, OpenVINO™ Toolkit Samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using model conversion API with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - -Example -+++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: console - - python -m pip install openvino-dev[caffe] - -2. Download a pre-trained model: - - .. code-block:: console - - omz_downloader --name mobilenet-ssd - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: console - - omz_converter --name mobilenet-ssd - -4. Perform inference of ``banana.jpg`` using ``ssdlite_mobilenet_v2`` model on a ``GPU``, for example: - - .. code-block:: console - - python hello_reshape_ssd.py mobilenet-ssd.xml banana.jpg GPU - -Sample Output -############# - -The sample application logs each step in a standard output stream and creates an output image, drawing bounding boxes for inference results with an over 50% confidence. - -.. code-block:: console - - [ INFO ] Creating OpenVINO Runtime Core - [ INFO ] Reading the model: C:/test_data/models/mobilenet-ssd.xml - [ INFO ] Reshaping the model to the height and width of the input image - [ INFO ] Loading the model to the plugin - [ INFO ] Starting inference in synchronous mode - [ INFO ] Found: class_id = 52, confidence = 0.98, coords = (21, 98), (276, 210) - [ INFO ] Image out.bmp was created! - [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.rst deleted file mode 100644 index f586689d891de5..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.rst +++ /dev/null @@ -1,188 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_classification_sample_async_README} - -Image Classification Async Python Sample -======================================== - - -.. meta:: - :description: Learn how to do inference of image classification models - using Asynchronous Inference Request (Python) API. - - -This sample demonstrates how to do inference of image classification models using Asynchronous Inference Request API. - -Models with only 1 input and output are supported. - -.. tab-set:: - - .. tab-item:: Requirements - - +----------------------------+-----------------------------------------------------------------------------------+ - | Options | Values | - +============================+===================================================================================+ - | Validated Models | :doc:`alexnet ` | - +----------------------------+-----------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation (.xml + .bin), ONNX (.onnx) | - +----------------------------+-----------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +----------------------------+-----------------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ ` | - +----------------------------+-----------------------------------------------------------------------------------+ - - .. tab-item:: Python API - - The following Python API is used in the application: - - +--------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------+ - | Feature | API | Description | - +====================+===========================================================================================================================================================================================================+===========================+ - | Asynchronous Infer | `openvino.runtime.AsyncInferQueue `__ , | Do asynchronous inference | - | | `openvino.runtime.AsyncInferQueue.set_callback `__ , | | - | | `openvino.runtime.AsyncInferQueue.start_async `__ , | | - | | `openvino.runtime.AsyncInferQueue.wait_all `__ , | | - | | `openvino.runtime.InferRequest.results `__ | | - +--------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python Sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/classification_sample_async/classification_sample_async.py - :language: python - - -How It Works -############ - -At startup, the sample application reads command-line parameters, prepares input data, loads a specified model and image(s) to the OpenVINO™ Runtime plugin, performs synchronous inference, and processes output data, logging each step in a standard output stream. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Running -####### - -Run the application with the ``-h`` option to see the usage message: - -.. code-block:: sh - - python classification_sample_async.py -h - -Usage message: - -.. code-block:: sh - - usage: classification_sample_async.py [-h] -m MODEL -i INPUT [INPUT ...] - [-d DEVICE] - - Options: - -h, --help Show this help message and exit. - -m MODEL, --model MODEL - Required. Path to an .xml or .onnx file with a trained - model. - -i INPUT [INPUT ...], --input INPUT [INPUT ...] - Required. Path to an image file(s). - -d DEVICE, --device DEVICE - Optional. Specify the target device to infer on; CPU, - GPU or HETERO: is acceptable. The sample - will look for a suitable plugin for device specified. - Default value is CPU. - -To run the sample, you need specify a model and image: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. -- You can use images from the media files collection available `here `__ . - -.. note:: - - - By default, OpenVINO™ Toolkit Samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using model conversion API with ``reverse_input_channels`` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of :doc:`Embedding Preprocessing Computation `. - - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API `. - - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - - - Stating flags that take only single option like `-m` multiple times, for example `python classification_sample_async.py -m model.xml -m model2.xml`, results in only the last value being used. - - - The sample supports NCHW model layout only. - -Example -+++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: sh - - python -m pip install openvino-dev[caffe] - -2. Download a pre-trained model: - - .. code-block:: sh - - omz_downloader --name alexnet - - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: sh - - omz_converter --name alexnet - -4. Perform inference of ``banana.jpg`` and ``car.bmp`` using the ``alexnet`` model on a ``GPU``, for example: - - .. code-block:: sh - - python classification_sample_async.py -m alexnet.xml -i banana.jpg car.bmp -d GPU - -Sample Output -############# - -The sample application logs each step in a standard output stream and outputs top-10 inference results. - -.. code-block:: sh - - [ INFO ] Creating OpenVINO Runtime Core - [ INFO ] Reading the model: C:/test_data/models/alexnet.xml - [ INFO ] Loading the model to the plugin - [ INFO ] Starting inference in asynchronous mode - [ INFO ] Image path: /test_data/images/banana.jpg - [ INFO ] Top 10 results: - [ INFO ] class_id probability - [ INFO ] -------------------- - [ INFO ] 954 0.9707602 - [ INFO ] 666 0.0216788 - [ INFO ] 659 0.0032558 - [ INFO ] 435 0.0008082 - [ INFO ] 809 0.0004359 - [ INFO ] 502 0.0003860 - [ INFO ] 618 0.0002867 - [ INFO ] 910 0.0002866 - [ INFO ] 951 0.0002410 - [ INFO ] 961 0.0002193 - [ INFO ] - [ INFO ] Image path: /test_data/images/car.bmp - [ INFO ] Top 10 results: - [ INFO ] class_id probability - [ INFO ] -------------------- - [ INFO ] 656 0.5120340 - [ INFO ] 874 0.1142275 - [ INFO ] 654 0.0697167 - [ INFO ] 436 0.0615163 - [ INFO ] 581 0.0552262 - [ INFO ] 705 0.0304179 - [ INFO ] 675 0.0151660 - [ INFO ] 734 0.0151582 - [ INFO ] 627 0.0148493 - [ INFO ] 757 0.0120964 - [ INFO ] - [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool - - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.rst deleted file mode 100644 index 936e62760a151d..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.rst +++ /dev/null @@ -1,177 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_model_creation_sample_README} - -Model Creation Python Sample -============================ - - -.. meta:: - :description: Learn how to create a model on the fly with a - provided weights file and infer it later using Synchronous - Inference Request (Python) API. - - -This sample demonstrates how to run inference using a :doc:`model ` built on the fly that uses weights from the LeNet classification model, which is known to work well on digit classification tasks. You do not need an XML file, the model is created from the source code on the fly. - -.. tab-set:: - - .. tab-item:: Requirements - - +------------------------------------------------+-----------------------------------------------------------------------------+ - | Options | Values | - +================================================+=============================================================================+ - | Validated Models | LeNet | - +------------------------------------------------+-----------------------------------------------------------------------------+ - | Model Format | Model weights file (\*.bin) | - +------------------------------------------------+-----------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +------------------------------------------------+-----------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ ` | - +------------------------------------------------+-----------------------------------------------------------------------------+ - - .. tab-item:: Python API - - The following OpenVINO Python API is used in the application: - - +------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+ - | Feature | API | Description | - +==========================================+==============================================================================================================================================================+====================================================================================+ - | Model Operations | `openvino.runtime.Model `__ , | Managing of model | - | | `openvino.runtime.set_batch `__ , | | - | | `openvino.runtime.Model.input `__ | | - +------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+ - | Opset operations | `openvino.runtime.op.Parameter `__ , | Description of a model topology using OpenVINO Python API | - | | `openvino.runtime.op.Constant `__ , | | - | | `openvino.runtime.opset8.convolution `__ , | | - | | `openvino.runtime.opset8.add `__ , | | - | | `openvino.runtime.opset1.max_pool `__ , | | - | | `openvino.runtime.opset8.reshape `__ , | | - | | `openvino.runtime.opset8.matmul `__ , | | - | | `openvino.runtime.opset8.relu `__ , | | - | | `openvino.runtime.opset8.softmax `__ | | - +------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+ - - Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/model_creation_sample/model_creation_sample.py - :language: python - -How It Works -############ - -At startup, the sample application does the following: - -- Reads command line parameters -- :doc:`Build a Model ` and passed weights file -- Loads the model and input data to the OpenVINO™ Runtime plugin -- Performs synchronous inference and processes output data, logging each step in a standard output stream - -You can see the explicit description of each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Running -####### - -To run the sample, you need to specify model weights and device. - -.. code-block:: console - - python model_creation_sample.py - -.. note:: - - - This sample supports models with FP32 weights only. - - - The ``lenet.bin`` weights file was generated by :doc:`model conversion API ` from the public LeNet model with the ``input_shape [64,1,28,28]`` parameter specified. - - - The original model is available in the `Caffe* repository `__ on GitHub\*. - -For example: - -.. code-block:: console - - python model_creation_sample.py lenet.bin GPU - -Sample Output -############# - -The sample application logs each step in a standard output stream and outputs 10 inference results. - -.. code-block:: console - - [ INFO ] Creating OpenVINO Runtime Core - [ INFO ] Loading the model using ngraph function with weights from lenet.bin - [ INFO ] Loading the model to the plugin - [ INFO ] Starting inference in synchronous mode - [ INFO ] Top 1 results: - [ INFO ] Image 0 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 0 1.0000000 0 - [ INFO ] - [ INFO ] Image 1 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 1 1.0000000 1 - [ INFO ] - [ INFO ] Image 2 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 2 1.0000000 2 - [ INFO ] - [ INFO ] Image 3 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 3 1.0000000 3 - [ INFO ] - [ INFO ] Image 4 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 4 1.0000000 4 - [ INFO ] - [ INFO ] Image 5 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 5 1.0000000 5 - [ INFO ] - [ INFO ] Image 6 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 6 1.0000000 6 - [ INFO ] - [ INFO ] Image 7 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 7 1.0000000 7 - [ INFO ] - [ INFO ] Image 8 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 8 1.0000000 8 - [ INFO ] - [ INFO ] Image 9 - [ INFO ] - [ INFO ] classid probability label - [ INFO ] ------------------------- - [ INFO ] 9 1.0000000 9 - [ INFO ] - [ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool - -See Also -######## - -- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -- :doc:`Using OpenVINO™ Toolkit Samples ` -- :doc:`Model Downloader ` -- :doc:`Convert a Model ` - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.rst deleted file mode 100644 index 4e37c045c0e29d..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.rst +++ /dev/null @@ -1,142 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_sync_benchmark_README} - -Sync Benchmark Python Sample -============================ - - -.. meta:: - :description: Learn how to estimate performance of a model using Synchronous Inference Request (Python) API. - - -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike :doc:`demos ` this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. - -.. tab-set:: - - .. tab-item:: Requirements - - +--------------------------------+------------------------------------------------------------------------------+ - | Options | Values | - +================================+==============================================================================+ - | Validated Models | :doc:`alexnet `, | - | | :doc:`googlenet-v1 `, | - | | :doc:`yolo-v3-tf `, | - | | :doc:`face-detection-0200 ` | - +--------------------------------+------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation | - | | (\*.xml + \*.bin), ONNX (\*.onnx) | - +--------------------------------+------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +--------------------------------+------------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ ` | - +--------------------------------+------------------------------------------------------------------------------+ - - .. tab-item:: Python API - - The following Python API is used in the application: - - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Feature | API | Description | - +================================+=================================================+==============================================+ - | OpenVINO Runtime Version | [openvino.runtime.get_version] | Get Openvino API version. | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Basic Infer Flow | [openvino.runtime.Core], | Common API to do inference: compile a model, | - | | [openvino.runtime.Core.compile_model], | configure input tensors. | - | | [openvino.runtime.InferRequest.get_tensor] | | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Synchronous Infer | [openvino.runtime.InferRequest.infer], | Do synchronous inference. | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Model Operations | [openvino.runtime.CompiledModel.inputs] | Get inputs of a model. | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Tensor Operations | [openvino.runtime.Tensor.get_shape], | Get a tensor shape and its data. | - | | [openvino.runtime.Tensor.data] | | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/benchmark/sync_benchmark/sync_benchmark.py - :language: python - -How It Works -#################### - -The sample compiles a model for a given device, randomly generates input data, performs synchronous inference multiple times for a given number of seconds. Then processes and reports performance results. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Running -#################### - -.. code-block:: sh - - python sync_benchmark.py (default: CPU) - - -To run the sample, you need to specify a model: - -- You can use :doc:`public ` or doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. - -.. note:: - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using the :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - -Example -++++++++++++++++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: sh - - python -m pip install openvino-dev[caffe] - - -2. Download a pre-trained model using: - - .. code-block:: sh - - omz_downloader --name googlenet-v1 - - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: sh - - omz_converter --name googlenet-v1 - - -4. Perform benchmarking using the ``googlenet-v1`` model on a ``CPU``: - - .. code-block:: sh - - python sync_benchmark.py googlenet-v1.xml - - -Sample Output -#################### - -The application outputs performance results. - -.. code-block:: sh - - [ INFO ] OpenVINO: - [ INFO ] Build ................................. - [ INFO ] Count: 2333 iterations - [ INFO ] Duration: 10003.59 ms - [ INFO ] Latency: - [ INFO ] Median: 3.90 ms - [ INFO ] Average: 4.29 ms - [ INFO ] Min: 3.30 ms - [ INFO ] Max: 10.11 ms - [ INFO ] Throughput: 233.22 FPS - - -See Also -#################### - -* :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -* :doc:`Using OpenVINO Samples ` -* :doc:`Model Downloader ` -* :doc:`Convert a Model ` - diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.rst deleted file mode 100644 index 4399fac0a3a0e9..00000000000000 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.rst +++ /dev/null @@ -1,148 +0,0 @@ -.. {#openvino_inference_engine_ie_bridges_python_sample_throughput_benchmark_README} - -Throughput Benchmark Python Sample -================================== - - -.. meta:: - :description: Learn how to estimate performance of a model using Asynchronous Inference Request (Python) API in throughput mode. - - -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike :doc:`demos ` this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. - -The reported results may deviate from what :doc:`benchmark_app ` reports. One example is model input precision for computer vision tasks. benchmark_app sets uint8, while the sample uses default model precision which is usually float32. - -.. tab-set:: - - .. tab-item:: Requirements - - +--------------------------------+------------------------------------------------------------------------------+ - | Options | Values | - +================================+==============================================================================+ - | Validated Models | :doc:`alexnet `, | - | | :doc:`googlenet-v1 `, | - | | :doc:`yolo-v3-tf `, | - | | :doc:`face-detection-0200 ` | - +--------------------------------+------------------------------------------------------------------------------+ - | Model Format | OpenVINO™ toolkit Intermediate Representation | - | | (\*.xml + \*.bin), ONNX (\*.onnx) | - +--------------------------------+------------------------------------------------------------------------------+ - | Supported devices | :doc:`All ` | - +--------------------------------+------------------------------------------------------------------------------+ - | Other language realization | :doc:`C++ ` | - +--------------------------------+------------------------------------------------------------------------------+ - - .. tab-item:: Python API - - The following Python API is used in the application: - - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Feature | API | Description | - +================================+=================================================+==============================================+ - | OpenVINO Runtime Version | [openvino.runtime.get_version] | Get Openvino API version. | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Basic Infer Flow | [openvino.runtime.Core], | Common API to do inference: compile a model, | - | | [openvino.runtime.Core.compile_model] | configure input tensors. | - | | [openvino.runtime.InferRequest.get_tensor] | | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Asynchronous Infer | [openvino.runtime.AsyncInferQueue], | Do asynchronous inference. | - | | [openvino.runtime.AsyncInferQueue.start_async], | | - | | [openvino.runtime.AsyncInferQueue.wait_all], | | - | | [openvino.runtime.InferRequest.results] | | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Model Operations | [openvino.runtime.CompiledModel.inputs] | Get inputs of a model. | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - | Tensor Operations | [openvino.runtime.Tensor.get_shape], | Get a tensor shape and its data. | - | | [openvino.runtime.Tensor.data] | | - +--------------------------------+-------------------------------------------------+----------------------------------------------+ - - .. tab-item:: Sample Code - - .. doxygensnippet:: samples/python/benchmark/throughput_benchmark/throughput_benchmark.py - :language: python - -How It Works -#################### - -The sample compiles a model for a given device, randomly generates input data, performs asynchronous inference multiple times for a given number of seconds. Then processes and reports performance results. - -You can see the explicit description of -each sample step at :doc:`Integration Steps ` section of "Integrate OpenVINO™ Runtime with Your Application" guide. - -Running -#################### - -.. code-block:: sh - - python throughput_benchmark.py (default: CPU) - - -To run the sample, you need to specify a model: - -- You can use :doc:`public ` or :doc:`Intel's ` pre-trained models from the Open Model Zoo. The models can be downloaded using the :doc:`Model Downloader `. - -.. note:: - - Before running the sample with a trained model, make sure the model is converted to the intermediate representation (IR) format (\*.xml + \*.bin) using :doc:`model conversion API `. - - The sample accepts models in ONNX format (.onnx) that do not require preprocessing. - - -Example -++++++++++++++++++++ - -1. Install the ``openvino-dev`` Python package to use Open Model Zoo Tools: - - .. code-block:: sh - - python -m pip install openvino-dev[caffe] - - -2. Download a pre-trained model using: - - .. code-block:: sh - - omz_downloader --name googlenet-v1 - - -3. If a model is not in the IR or ONNX format, it must be converted. You can do this using the model converter: - - .. code-block:: sh - - omz_converter --name googlenet-v1 - - -4. Perform benchmarking using the ``googlenet-v1`` model on a ``CPU``: - - .. code-block:: sh - - python throughput_benchmark.py googlenet-v1.xml - - -Sample Output -#################### - -The application outputs performance results. - -.. code-block:: sh - - [ INFO ] OpenVINO: - [ INFO ] Build ................................. - [ INFO ] Count: 2817 iterations - [ INFO ] Duration: 10012.65 ms - [ INFO ] Latency: - [ INFO ] Median: 13.80 ms - [ INFO ] Average: 14.10 ms - [ INFO ] Min: 8.35 ms - [ INFO ] Max: 28.38 ms - [ INFO ] Throughput: 281.34 FPS - - -See Also -#################### - -* :doc:`Integrate the OpenVINO™ Runtime with Your Application ` -* :doc:`Using OpenVINO Samples ` -* :doc:`Model Downloader ` -* :doc:`Convert a Model ` - diff --git a/docs/articles_en/learn_openvino/openvino_samples/sync_benchmark.rst b/docs/articles_en/learn_openvino/openvino_samples/sync_benchmark.rst new file mode 100644 index 00000000000000..793bc11c5262e4 --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/sync_benchmark.rst @@ -0,0 +1,174 @@ +.. {#openvino_sample_sync_benchmark} + +Sync Benchmark Sample +===================== + + +.. meta:: + :description: Learn how to estimate performance of a model using Synchronous Inference Request API (Python, C++). + + +This sample demonstrates how to estimate performance of a model using Synchronous +Inference Request API. It makes sense to use synchronous inference only in latency +oriented scenarios. Models with static input shapes are supported. Unlike +:doc:`demos ` this sample does not have other configurable command-line +arguments. Feel free to modify sample's source code to try out different options. +Before using the sample, refer to the following requirements: + +- The sample accepts any file format supported by ``core.read_model``. +- The sample has been validated with: :doc:`alexnet `, + :doc:`googlenet-v1 `, :doc:`yolo-v3-tf `, + :doc:`face-detection-0200 ` models. +- To build the sample, use instructions available at :ref:`Build the Sample Applications ` + section in "Get Started with Samples" guide. + +How It Works +#################### + +The sample compiles a model for a given device, randomly generates input data, +performs synchronous inference multiple times for a given number of seconds. +Then, it processes and reports performance results. + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. doxygensnippet:: samples/python/benchmark/sync_benchmark/sync_benchmark.py + :language: python + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/benchmark/sync_benchmark/main.cpp + :language: cpp + + +You can see the explicit description of +each sample step at :doc:`Integration Steps ` +section of "Integrate OpenVINO™ Runtime with Your Application" guide. + +Running +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python sync_benchmark.py (default: CPU) + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + sync_benchmark (default: CPU) + + +To run the sample, you need to specify a model. You can get a model specific for +your inference task from one of model repositories, such as TensorFlow Zoo, HuggingFace, or TensorFlow Hub. + +Example +++++++++++++++++++++ + +1. Download a pre-trained model. +2. You can convert it by using: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: python + + import openvino as ov + + ov_model = ov.convert_model('./models/googlenet-v1') + # or, when model is a Python model object + ov_model = ov.convert_model(googlenet-v1) + + .. tab-item:: CLI + :sync: cli + + .. code-block:: console + + ovc ./models/googlenet-v1 + +3. Perform benchmarking, using the ``googlenet-v1`` model on a ``CPU``: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python sync_benchmark.py googlenet-v1.xml + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + sync_benchmark googlenet-v1.xml + + +Sample Output +#################### + + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + The application outputs performance results. + + .. code-block:: console + + [ INFO ] OpenVINO: + [ INFO ] Build ................................. + [ INFO ] Count: 2333 iterations + [ INFO ] Duration: 10003.59 ms + [ INFO ] Latency: + [ INFO ] Median: 3.90 ms + [ INFO ] Average: 4.29 ms + [ INFO ] Min: 3.30 ms + [ INFO ] Max: 10.11 ms + [ INFO ] Throughput: 233.22 FPS + + .. tab-item:: C++ + :sync: cpp + + The application outputs performance results. + + .. code-block:: console + + [ INFO ] OpenVINO: + [ INFO ] Build ................................. + [ INFO ] Count: 992 iterations + [ INFO ] Duration: 15009.8 ms + [ INFO ] Latency: + [ INFO ] Median: 14.00 ms + [ INFO ] Average: 15.13 ms + [ INFO ] Min: 9.33 ms + [ INFO ] Max: 53.60 ms + [ INFO ] Throughput: 66.09 FPS + + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO Samples ` +- :doc:`Convert a Model ` +- `Sync Benchmark Python Sample on Github `__ +- `Sync Benchmark C++ Sample on Github `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/throughput_benchmark.rst b/docs/articles_en/learn_openvino/openvino_samples/throughput_benchmark.rst new file mode 100644 index 00000000000000..ff7d667ca74b1e --- /dev/null +++ b/docs/articles_en/learn_openvino/openvino_samples/throughput_benchmark.rst @@ -0,0 +1,179 @@ +.. {#openvino_sample_throughput_benchmark} + +Throughput Benchmark Sample +=========================== + + +.. meta:: + :description: Learn how to estimate performance of a model using Asynchronous Inference Request API in throughput mode (Python, C++). + + +This sample demonstrates how to estimate performance of a model using Asynchronous +Inference Request API in throughput mode. Unlike :doc:`demos ` this sample +does not have other configurable command-line arguments. Feel free to modify sample's +source code to try out different options. + +The reported results may deviate from what :doc:`benchmark_app ` +reports. One example is model input precision for computer vision tasks. benchmark_app +sets ``uint8``, while the sample uses default model precision which is usually ``float32``. + +Before using the sample, refer to the following requirements: + +- The sample accepts any file format supported by ``core.read_model``. +- The sample has been validated with: :doc:`alexnet `, + :doc:`googlenet-v1 `, :doc:`yolo-v3-tf `, + :doc:`face-detection-0200 ` models. +- To build the sample, use instructions available at :ref:`Build the Sample Applications ` + section in "Get Started with Samples" guide. + +How It Works +#################### + +The sample compiles a model for a given device, randomly generates input data, +performs asynchronous inference multiple times for a given number of seconds. +Then, it processes and reports performance results. + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. scrollbox:: + + .. doxygensnippet:: samples/python/benchmark/throughput_benchmark/throughput_benchmark.py + :language: python + + .. tab-item:: C++ + :sync: cpp + + .. scrollbox:: + + .. doxygensnippet:: samples/cpp/benchmark/throughput_benchmark/main.cpp + :language: cpp + + +You can see the explicit description of each sample step at +:doc:`Integration Steps ` +section of "Integrate OpenVINO™ Runtime with Your Application" guide. + +Running +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python throughput_benchmark.py (default: CPU) + + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + throughput_benchmark (default: CPU) + + +To run the sample, you need to specify a model. You can get a model specific for +your inference task from one of model repositories, such as TensorFlow Zoo, HuggingFace, or TensorFlow Hub. + +Example +++++++++++++++++++++ + +1. Download a pre-trained model. +2. You can convert it by using: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: python + + import openvino as ov + + ov_model = ov.convert_model('./models/googlenet-v1') + # or, when model is a Python model object + ov_model = ov.convert_model(googlenet-v1) + + .. tab-item:: CLI + :sync: cli + + .. code-block:: console + + ovc ./models/googlenet-v1 + + +3. Perform benchmarking, using the ``googlenet-v1`` model on a ``CPU``: + + .. tab-set:: + + .. tab-item:: Python + :sync: python + + .. code-block:: console + + python throughput_benchmark.py ./models/googlenet-v1.xml + + .. tab-item:: C++ + :sync: cpp + + .. code-block:: console + + throughput_benchmark ./models/googlenet-v1.xml + + +Sample Output +#################### + +.. tab-set:: + + .. tab-item:: Python + :sync: python + + The application outputs performance results. + + .. code-block:: console + + [ INFO ] OpenVINO: + [ INFO ] Build ................................. + [ INFO ] Count: 2817 iterations + [ INFO ] Duration: 10012.65 ms + [ INFO ] Latency: + [ INFO ] Median: 13.80 ms + [ INFO ] Average: 14.10 ms + [ INFO ] Min: 8.35 ms + [ INFO ] Max: 28.38 ms + [ INFO ] Throughput: 281.34 FPS + + .. tab-item:: C++ + :sync: cpp + + The application outputs performance results. + + .. code-block:: console + + [ INFO ] OpenVINO: + [ INFO ] Build ................................. + [ INFO ] Count: 1577 iterations + [ INFO ] Duration: 15024.2 ms + [ INFO ] Latency: + [ INFO ] Median: 38.02 ms + [ INFO ] Average: 38.08 ms + [ INFO ] Min: 25.23 ms + [ INFO ] Max: 49.16 ms + [ INFO ] Throughput: 104.96 FPS + + +Additional Resources +#################### + +- :doc:`Integrate the OpenVINO™ Runtime with Your Application ` +- :doc:`Get Started with Samples ` +- :doc:`Using OpenVINO Samples ` +- :doc:`Convert a Model ` +- `Throughput Benchmark Python Sample on Github `__ +- `Throughput Benchmark C++ Sample on Github `__ diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst index c642c42d2f1a88..c3c6b27d2ba9ac 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst @@ -59,7 +59,7 @@ For more details on plugin-specific feature limitations, see the corresponding p Enumerating Available Devices ####################################### -The OpenVINO Runtime API features dedicated methods of enumerating devices and their capabilities. See the :doc:`Hello Query Device C++ Sample `. This is an example output from the sample (truncated to device names only): +The OpenVINO Runtime API features dedicated methods of enumerating devices and their capabilities. See the :doc:`Hello Query Device C++ Sample `. This is an example output from the sample (truncated to device names only): .. code-block:: sh diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst index 849b331d12bc89..4ad10fbc386c56 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst @@ -64,7 +64,7 @@ CPU plugin supports the following data types as inference precision of internal - ``i8`` (Intel® x86-64) - ``u1`` (Intel® x86-64) -:doc:`Hello Query Device C++ Sample ` can be used to print out supported data types for all detected devices. +:doc:`Hello Query Device C++ Sample ` can be used to print out supported data types for all detected devices. Quantized Data Types Specifics diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst index a431271cdde778..c44e03b29de2b2 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst @@ -75,7 +75,7 @@ For optimal work with POT quantized models, which include 2D convolutions on GNA * Choose a compile target with priority on: cross-platform execution, performance, memory, or power optimization. * To check interoperability in your application use: ``ov::intel_gna::execution_target`` and ``ov::intel_gna::compile_target``. -:doc:`Speech C++ Sample ` can be used for experiments (see the ``-exec_target`` and ``-compile_target`` command line options). +:doc:`Speech C++ Sample ` can be used for experiments (see the ``-exec_target`` and ``-compile_target`` command line options). Software Emulation Mode @@ -148,7 +148,7 @@ This mode is going to be deprecated soon. GNA supports the ``i16`` and ``i8`` qu GNA users are encouraged to use the :doc:`Post-Training Optimization Tool ` to get a model with quantization hints based on statistics for the provided dataset. -:doc:`Hello Query Device C++ Sample ` can be used to print out supported data types for all detected devices. +:doc:`Hello Query Device C++ Sample ` can be used to print out supported data types for all detected devices. :doc:`POT API Usage sample for GNA ` demonstrates how a model can be quantized for GNA, using POT API in two modes: @@ -219,7 +219,7 @@ Import model: To compile a model, use either :ref:`compile Tool ` or -:doc:`Speech C++ Sample `. +:doc:`Speech C++ Sample `. Stateful Models +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst index c47b0126a8c419..ed7889e996471c 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst @@ -35,7 +35,7 @@ Device Naming Convention * If the system does not have an integrated GPU, devices are enumerated, starting from 0. * For GPUs with multi-tile architecture (multiple sub-devices in OpenCL terms), a specific tile may be addressed as ``GPU.X.Y``, where ``X,Y={0, 1, 2,...}``, ``X`` - id of the GPU device, ``Y`` - id of the tile within device ``X`` -For demonstration purposes, see the :doc:`Hello Query Device C++ Sample ` that can print out the list of available devices with associated indices. Below is an example output (truncated to the device names only): +For demonstration purposes, see the :doc:`Hello Query Device C++ Sample ` that can print out the list of available devices with associated indices. Below is an example output (truncated to the device names only): .. code-block:: sh @@ -135,7 +135,7 @@ Floating-point precision of a GPU primitive is selected based on operation preci The newer generation Intel Iris Xe and Xe MAX GPUs provide accelerated performance for i8/u8 models. Hardware acceleration for ``i8``/``u8`` precision may be unavailable on older generation platforms. In such cases, a model is executed in the floating-point precision taken from IR. Hardware support of ``u8``/``i8`` acceleration can be queried via the ``ov::device::capabilities`` property. -:doc:`Hello Query Device C++ Sample` can be used to print out the supported data types for all detected devices. +:doc:`Hello Query Device C++ Sample ` can be used to print out the supported data types for all detected devices. Supported Features diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/config_properties.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/config_properties.rst index f4c08b5ec3004d..e501afa0beb6fe 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/config_properties.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/config_properties.rst @@ -33,7 +33,7 @@ of ``ov::available_devices``, the string name of ``AVAILABLE_DEVICES`` and the t static constexpr Property, PropertyMutability::RO> available_devices{"AVAILABLE_DEVICES"}; -Refer to the :doc:`Hello Query Device C++ Sample ` sources and +Refer to the :doc:`Hello Query Device С++ Sample ` sources and the :doc:`Multi-Device execution ` documentation for examples of using setting and getting properties in user applications. diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/ShapeInference.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/ShapeInference.rst index 006d4c2d11e0b1..94544b42cdb163 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/ShapeInference.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/ShapeInference.rst @@ -153,7 +153,7 @@ When using the ``reshape`` method, you may take one of the approaches: You can find the usage scenarios of the ``reshape`` method in -:doc:`Hello Reshape SSD Samples `. +:doc:`Hello Reshape SSD Samples `. .. note:: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst index f31cbff195cf2e..c96fa16e6874e0 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst @@ -62,7 +62,7 @@ Below are example-codes for the regular and async-based approaches to compare: The technique can be generalized to any available parallel slack. For example, you can do inference and simultaneously encode the resulting or previous frames or run further inference, like emotion detection on top of the face detection results. -Refer to the `Object Detection C++ Demo `__ , `Object Detection Python Demo `__ (latency-oriented Async API showcase) and :doc:`Benchmark App Sample ` for complete examples of the Async API in action. +Refer to the `Object Detection C++ Demo `__ , `Object Detection Python Demo `__ (latency-oriented Async API showcase) and :doc:`Benchmark App Sample ` for complete examples of the Async API in action. .. note:: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/performance_hints.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/performance_hints.rst index 0b6e6d14e37a28..048f01062f081f 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/performance_hints.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/performance_hints.rst @@ -163,7 +163,7 @@ For example, use ``ov::hint::PerformanceMode::THROUGHPUT`` to prepare a general Testing Performance of the Hints with the Benchmark_App ####################################################### -The ``benchmark_app``, that exists in both :doc:`C++ ` and :doc:`Python ` versions, is the best way to evaluate the functionality of the performance hints for a particular device: +Using the :doc:`benchmark_app sample `is the best way to evaluate the functionality of the performance hints for a particular device: * benchmark_app **-hint tput** -d 'device' -m 'path to your model' * benchmark_app **-hint latency** -d 'device' -m 'path to your model' diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst index bb736322cd23ff..4f07e650af8223 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst @@ -309,7 +309,7 @@ The ``ov::hint::performance_mode`` property enables you to specify a performance The THROUGHPUT and CUMULATIVE_THROUGHPUT hints below only improve performance in an asynchronous inference pipeline. For information on asynchronous inference, see the :doc:`Async API documentation ` . The following notebooks provide examples of how to set up an asynchronous pipeline: -* :doc:`Image Classification Async Sample ` +* :doc:`Image Classification Async Sample ` * `Notebook - Asynchronous Inference with OpenVINO™ `__ * `Notebook - Automatic Device Selection with OpenVINO `__ @@ -492,7 +492,7 @@ For limited device choice: benchmark_app –d AUTO:CPU,GPU,GNA –m -i -niter 1000 -For more information, refer to the :doc:`C++ ` or :doc:`Python ` version instructions. +For more information, refer to the :doc:`Benchmark Tool ` article. .. note:: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/automatic_batching.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/automatic_batching.rst index 7bd7bc1b069fec..70751372526d52 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/automatic_batching.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/automatic_batching.rst @@ -206,7 +206,7 @@ The following are limitations of the current AUTO Batching implementations: Testing Performance with Benchmark_app ###################################### -The ``benchmark_app`` sample, that has both :doc:`C++ ` and :doc:`Python ` versions, is the best way to evaluate the performance of Automatic Batching: +Using the :doc:`benchmark_app sample ` is the best way to evaluate the performance of Automatic Batching: - The most straightforward way is using the performance hints: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/multi_device.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/multi_device.rst index 12485a719649c5..74ca070830ff09 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/multi_device.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/multi_device.rst @@ -123,7 +123,7 @@ Here is an example command to evaluate performance of CPU + GPU: ./benchmark_app –d MULTI:CPU,GPU –m -i -niter 1000 -For more information, refer to the :doc:`C++ ` or :doc:`Python ` version instructions. +For more information, refer to the :doc:`Benchmark Tool ` article. .. note:: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/ov_infer_request.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/ov_infer_request.rst index e672f8b7cc996a..a2b7ffe52306d6 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/ov_infer_request.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/ov_infer_request.rst @@ -156,7 +156,7 @@ When you are running several inference requests in parallel, a device can proces Use weak reference of infer_request (``ov::InferRequest*``, ``ov::InferRequest&``, ``std::weal_ptr``, etc.) in the callback. It is necessary to avoid cyclic references. -For more details, see the :doc:`Classification Async Sample `. +For more details, see the :doc:`Classification Async Sample `. You can use the ``ov::InferRequest::cancel`` method if you want to abort execution of the current inference request: diff --git a/docs/dev/pypi_publish/pypi-openvino-rt.md b/docs/dev/pypi_publish/pypi-openvino-rt.md index a5d37d2b05c3ee..9b3115eb7bb45e 100644 --- a/docs/dev/pypi_publish/pypi-openvino-rt.md +++ b/docs/dev/pypi_publish/pypi-openvino-rt.md @@ -77,7 +77,7 @@ If installation was successful, you will see the list of available devices. |------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [OpenVINO Runtime](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) | `openvino package` |**OpenVINO Runtime** is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the OpenVINO Runtime API to read PyTorch\*, TensorFlow\*, TensorFlow Lite\*, ONNX\*, and PaddlePaddle\* models and execute them on preferred devices. OpenVINO Runtime uses a plugin architecture and includes the following plugins: [CPU](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_CPU.html), [GPU](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_GPU.html), [Auto Batch](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Automatic_Batching.html), [Auto](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_AUTO.html), [Hetero](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Hetero_execution.html). | [OpenVINO Model Converter (OVC)](https://docs.openvino.ai/2023.3/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc) | `ovc` |**OpenVINO Model Converter** converts models that were trained in popular frameworks to a format usable by OpenVINO components.
Supported frameworks include ONNX\*, TensorFlow\*, TensorFlow Lite\*, and PaddlePaddle\*. | -| [Benchmark Tool](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. | +| [Benchmark Tool](https://docs.openvino.ai/2023.3/openvino_sample_benchmark_tool.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. | ## Troubleshooting diff --git a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst index 377f79990ed3d9..1fb04382d1bac1 100644 --- a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst +++ b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst @@ -329,7 +329,7 @@ Timing Measure the time it takes to do inference on thousand images. This gives an indication of performance. For more accurate benchmarking, use the `Benchmark -Tool `__ +Tool `__ in OpenVINO. Note that many optimizations are possible to improve the performance. diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst index 714e6f6a2c3248..0155fb02753da7 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst @@ -509,7 +509,7 @@ Performance Comparison Measure the time it takes to do inference on twenty images. This gives an indication of performance. For more accurate benchmarking, use the `Benchmark -Tool `__. +Tool `__. Keep in mind that many optimizations are possible to improve the performance. diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst index 221d1fbf970e14..4b22013af124fe 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst @@ -406,7 +406,7 @@ Measure the time it takes to do inference on fifty images and compare the result. The timing information gives an indication of performance. For a fair comparison, we include the time it takes to process the image. For more accurate benchmarking, use the `OpenVINO benchmark -tool `__. +tool `__. Note that many optimizations are possible to improve the performance. .. code:: ipython3 diff --git a/docs/notebooks/105-language-quantize-bert-with-output.rst b/docs/notebooks/105-language-quantize-bert-with-output.rst index 2170c3406a1aa1..25b2d7c0098d78 100644 --- a/docs/notebooks/105-language-quantize-bert-with-output.rst +++ b/docs/notebooks/105-language-quantize-bert-with-output.rst @@ -532,7 +532,7 @@ Frames Per Second (FPS) for images. Finally, measure the inference performance of OpenVINO ``FP32`` and ``INT8`` models. For this purpose, use `Benchmark -Tool `__ +Tool `__ in OpenVINO. **Note**: The ``benchmark_app`` tool is able to measure the diff --git a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst index 3296e830cfa403..0ba82c888af3d3 100644 --- a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst +++ b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst @@ -623,7 +623,7 @@ Compare Performance of the Original and Quantized Models -------------------------------------------------------- `Benchmark -Tool `__ +Tool `__ is used to measure the inference performance of the ``FP16`` and ``INT8`` models. diff --git a/docs/notebooks/108-gpu-device-with-output.rst b/docs/notebooks/108-gpu-device-with-output.rst index 05477911d5d8f8..42dee9afc0e3b9 100644 --- a/docs/notebooks/108-gpu-device-with-output.rst +++ b/docs/notebooks/108-gpu-device-with-output.rst @@ -75,7 +75,7 @@ run to compare GPU performance in different configurations. It also provides the code for a basic end-to-end application that compiles a model on GPU and uses it to run inference. -Introduction +Introduction ------------------------------------------------------ Originally, graphic processing units (GPUs) began as specialized chips, @@ -102,14 +102,14 @@ instructions =2023.1.0" %pip install -q tensorflow - + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( @@ -126,13 +126,13 @@ Install required packages -Checking GPUs with Query Device +Checking GPUs with Query Device ------------------------------------------------------------------------- In this section, we will see how to list the available GPUs and check their properties. Some of the key properties will also be defined. -List GPUs with core.available_devices +List GPUs with core.available_devices ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenVINO Runtime provides the ``available_devices`` method for checking @@ -143,7 +143,7 @@ appear. .. code:: ipython3 import openvino as ov - + core = ov.Core() core.available_devices @@ -171,7 +171,7 @@ appear in the list, follow the steps described to configure your GPU drivers to work with OpenVINO. Once we have the GPUs working with OpenVINO, we can proceed with the next sections. -Check Properties with core.get_property +Check Properties with core.get_property ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To get information about the GPUs, we can use device properties. In @@ -185,7 +185,7 @@ To get the value of a property, such as the device name, we can use the .. code:: ipython3 device = "GPU" - + core.get_property(device, "FULL_DEVICE_NAME") @@ -209,7 +209,7 @@ for that property. print(f"{device} SUPPORTED_PROPERTIES:\n") supported_properties = core.get_property(device, "SUPPORTED_PROPERTIES") indent = len(max(supported_properties, key=len)) - + for property_key in supported_properties: if property_key not in ('SUPPORTED_METRICS', 'SUPPORTED_CONFIG_KEYS', 'SUPPORTED_PROPERTIES'): try: @@ -222,7 +222,7 @@ for that property. .. parsed-literal:: GPU SUPPORTED_PROPERTIES: - + AVAILABLE_DEVICES : ['0'] RANGE_FOR_ASYNC_INFER_REQUESTS: (1, 2, 1) RANGE_FOR_STREAMS : (1, 2) @@ -245,7 +245,7 @@ for that property. GPU_QUEUE_PRIORITY : Priority.MEDIUM GPU_QUEUE_THROTTLE : Priority.MEDIUM GPU_ENABLE_LOOP_UNROLLING : True - CACHE_DIR : + CACHE_DIR : PERFORMANCE_HINT : PerformanceMode.LATENCY COMPILATION_NUM_THREADS : 20 NUM_STREAMS : 1 @@ -254,7 +254,7 @@ for that property. DEVICE_ID : 0 -Brief Descriptions of Key Properties +Brief Descriptions of Key Properties ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each device has several properties as seen in the last command. Some of @@ -282,7 +282,7 @@ To learn more about devices and properties, see the `Query Device Properties `__ page. -Compiling a Model on GPU +Compiling a Model on GPU ------------------------------------------------------------------ Now, we know how to list the GPUs in the system and check their @@ -290,7 +290,7 @@ properties. We can easily use one for compiling and running models with OpenVINO `GPU plugin `__. -Download and Convert a Model +Download and Convert a Model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This tutorial uses the ``ssdlite_mobilenet_v2`` model. The @@ -300,7 +300,7 @@ was trained on `Common Objects in Context categories of object. For details, see the `paper `__. -Download and unpack the Model +Download and unpack the Model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use the ``download_file`` function from the ``notebook_utils`` to @@ -313,23 +313,23 @@ package is already downloaded. import sys import tarfile from pathlib import Path - + sys.path.append("../utils") - + import notebook_utils as utils - + # A directory where the model will be downloaded. base_model_dir = Path("./model").expanduser() - + model_name = "ssdlite_mobilenet_v2" archive_name = Path(f"{model_name}_coco_2018_05_09.tar.gz") - + # Download the archive downloaded_model_path = base_model_dir / archive_name if not downloaded_model_path.exists(): model_url = f"http://download.tensorflow.org/models/object_detection/{archive_name}" utils.download_file(model_url, downloaded_model_path.name, downloaded_model_path.parent) - + # Unpack the model tf_model_path = base_model_dir / archive_name.with_suffix("").stem / "frozen_inference_graph.pb" if not tf_model_path.exists(): @@ -350,14 +350,14 @@ package is already downloaded. to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. - + Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs) - -Convert the Model to OpenVINO IR format + +Convert the Model to OpenVINO IR format ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To convert the model to OpenVINO IR with ``FP16`` precision, use model @@ -368,15 +368,15 @@ directory. For more details about model conversion, see this .. code:: ipython3 from openvino.tools.mo.front import tf as ov_tf_front - + precision = 'FP16' - + # The output path for the conversion. model_path = base_model_dir / 'ir_model' / f'{model_name}_{precision.lower()}.xml' - + trans_config_path = Path(ov_tf_front.__file__).parent / "ssd_v2_support.json" pipeline_config = base_model_dir / archive_name.with_suffix("").stem / "pipeline.config" - + model = None if not model_path.exists(): model = ov.tools.mo.convert_model(input_model=tf_model_path, @@ -402,7 +402,7 @@ directory. For more details about model conversion, see this IR model saved to model/ir_model/ssdlite_mobilenet_v2_fp16.xml -Compile with Default Configuration +Compile with Default Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When the model is ready, first we need to read it, using the @@ -424,7 +424,7 @@ Selection `__. -Reduce Compile Time through Model Caching +Reduce Compile Time through Model Caching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Depending on the model used, device-specific optimizations and network @@ -440,17 +440,17 @@ following: import time from pathlib import Path - + # Create cache folder cache_folder = Path("cache") cache_folder.mkdir(exist_ok=True) - + start = time.time() core = Core() - + # Set cache folder core.set_property({'CACHE_DIR': cache_folder}) - + # Compile the model as before model = core.read_model(model=model_path) compiled_model = core.compile_model(model, device) @@ -473,7 +473,7 @@ compile times with caching enabled and disabled as follows: model = core.read_model(model=model_path) compiled_model = core.compile_model(model, device) print(f"Cache enabled - compile time: {time.time() - start}s") - + start = time.time() core = Core() model = core.read_model(model=model_path) @@ -493,7 +493,7 @@ optimizing an application. To read more about this, see the `Model Caching `__ docs. -Throughput and Latency Performance Hints +Throughput and Latency Performance Hints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To simplify device and pipeline configuration, OpenVINO provides @@ -523,7 +523,7 @@ available memory. compiled_model = core.compile_model(model, device, {"PERFORMANCE_HINT": "THROUGHPUT"}) -Using Multiple GPUs with Multi-Device and Cumulative Throughput +Using Multiple GPUs with Multi-Device and Cumulative Throughput ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The latency and throughput hints mentioned above are great and can make @@ -565,7 +565,7 @@ manually specify devices to use. Below is an example showing how to use in OpenVINO as well as the `Asynchronous Inference notebook `__. -Performance Comparison with benchmark_app +Performance Comparison with benchmark_app ----------------------------------------------------------------------------------- Given all the different options available when compiling a model, it may @@ -585,7 +585,7 @@ Note that benchmark_app only requires the model path to run but both the device and hint arguments will be useful to us. For more advanced usages, the tool itself has other options that can be checked by running ``benchmark_app -h`` or reading the -`docs `__. +`docs `__. The following example shows how to benchmark a simple model, using a GPU with a latency focus: @@ -601,12 +601,12 @@ with a latency focus: [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] GPU [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [Step 4/11] Reading model files [ INFO ] Loading model files @@ -635,7 +635,7 @@ with a latency focus: [ INFO ] GPU_QUEUE_PRIORITY: Priority.MEDIUM [ INFO ] GPU_QUEUE_THROTTLE: Priority.MEDIUM [ INFO ] GPU_ENABLE_LOOP_UNROLLING: True - [ INFO ] CACHE_DIR: + [ INFO ] CACHE_DIR: [ INFO ] PERFORMANCE_HINT: PerformanceMode.LATENCY [ INFO ] COMPILATION_NUM_THREADS: 20 [ INFO ] NUM_STREAMS: 1 @@ -644,7 +644,7 @@ with a latency focus: [ INFO ] DEVICE_ID: 0 [Step 9/11] Creating infer requests and preparing input tensors [ WARNING ] No input files were given for input 'image_tensor'!. This input will be filled with random values! - [ INFO ] Fill input 'image_tensor' with random values + [ INFO ] Fill input 'image_tensor' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 1 inference requests, limits: 60000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). [ INFO ] First inference took 6.17 ms @@ -665,7 +665,7 @@ performance may depend on the hardware used. Generally, we should expect GPU to be better than CPU, whereas multiple GPUs should be better than a single GPU as long as there is enough work for each of them. -CPU vs GPU with Latency Hint +CPU vs GPU with Latency Hint ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code:: ipython3 @@ -680,12 +680,12 @@ CPU vs GPU with Latency Hint [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] CPU [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [Step 4/11] Reading model files [ INFO ] Loading model files @@ -717,7 +717,7 @@ CPU vs GPU with Latency Hint [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 [Step 9/11] Creating infer requests and preparing input tensors [ WARNING ] No input files were given for input 'image_tensor'!. This input will be filled with random values! - [ INFO ] Fill input 'image_tensor' with random values + [ INFO ] Fill input 'image_tensor' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 1 inference requests, limits: 60000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). [ INFO ] First inference took 4.42 ms @@ -744,12 +744,12 @@ CPU vs GPU with Latency Hint [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] GPU [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [Step 4/11] Reading model files [ INFO ] Loading model files @@ -778,7 +778,7 @@ CPU vs GPU with Latency Hint [ INFO ] GPU_QUEUE_PRIORITY: Priority.MEDIUM [ INFO ] GPU_QUEUE_THROTTLE: Priority.MEDIUM [ INFO ] GPU_ENABLE_LOOP_UNROLLING: True - [ INFO ] CACHE_DIR: + [ INFO ] CACHE_DIR: [ INFO ] PERFORMANCE_HINT: PerformanceMode.LATENCY [ INFO ] COMPILATION_NUM_THREADS: 20 [ INFO ] NUM_STREAMS: 1 @@ -787,7 +787,7 @@ CPU vs GPU with Latency Hint [ INFO ] DEVICE_ID: 0 [Step 9/11] Creating infer requests and preparing input tensors [ WARNING ] No input files were given for input 'image_tensor'!. This input will be filled with random values! - [ INFO ] Fill input 'image_tensor' with random values + [ INFO ] Fill input 'image_tensor' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 1 inference requests, limits: 60000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). [ INFO ] First inference took 8.79 ms @@ -802,7 +802,7 @@ CPU vs GPU with Latency Hint [ INFO ] Throughput: 189.21 FPS -CPU vs GPU with Throughput Hint +CPU vs GPU with Throughput Hint ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code:: ipython3 @@ -817,12 +817,12 @@ CPU vs GPU with Throughput Hint [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] CPU [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [Step 4/11] Reading model files [ INFO ] Loading model files @@ -854,7 +854,7 @@ CPU vs GPU with Throughput Hint [ INFO ] PERFORMANCE_HINT_NUM_REQUESTS: 0 [Step 9/11] Creating infer requests and preparing input tensors [ WARNING ] No input files were given for input 'image_tensor'!. This input will be filled with random values! - [ INFO ] Fill input 'image_tensor' with random values + [ INFO ] Fill input 'image_tensor' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 5 inference requests, limits: 60000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). [ INFO ] First inference took 8.15 ms @@ -881,12 +881,12 @@ CPU vs GPU with Throughput Hint [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] GPU [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [Step 4/11] Reading model files [ INFO ] Loading model files @@ -915,7 +915,7 @@ CPU vs GPU with Throughput Hint [ INFO ] GPU_QUEUE_PRIORITY: Priority.MEDIUM [ INFO ] GPU_QUEUE_THROTTLE: Priority.MEDIUM [ INFO ] GPU_ENABLE_LOOP_UNROLLING: True - [ INFO ] CACHE_DIR: + [ INFO ] CACHE_DIR: [ INFO ] PERFORMANCE_HINT: PerformanceMode.THROUGHPUT [ INFO ] COMPILATION_NUM_THREADS: 20 [ INFO ] NUM_STREAMS: 2 @@ -924,7 +924,7 @@ CPU vs GPU with Throughput Hint [ INFO ] DEVICE_ID: 0 [Step 9/11] Creating infer requests and preparing input tensors [ WARNING ] No input files were given for input 'image_tensor'!. This input will be filled with random values! - [ INFO ] Fill input 'image_tensor' with random values + [ INFO ] Fill input 'image_tensor' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 4 inference requests, limits: 60000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). [ INFO ] First inference took 9.17 ms @@ -939,7 +939,7 @@ CPU vs GPU with Throughput Hint [ INFO ] Throughput: 326.34 FPS -Single GPU vs Multiple GPUs +Single GPU vs Multiple GPUs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code:: ipython3 @@ -954,12 +954,12 @@ Single GPU vs Multiple GPUs [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] GPU [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Device GPU.1 does not support performance hint property(-hint). [ ERROR ] Config for device with 1 ID is not registered in GPU plugin @@ -983,14 +983,14 @@ Single GPU vs Multiple GPUs [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] AUTO [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 [ INFO ] GPU [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Device GPU.1 does not support performance hint property(-hint). [Step 4/11] Reading model files @@ -1030,14 +1030,14 @@ Single GPU vs Multiple GPUs [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] GPU [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 [ INFO ] MULTI [ INFO ] Build ................................. 2022.3.0-9052-9752fafe8eb-releases/2022/3 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Device GPU.1 does not support performance hint property(-hint). [Step 4/11] Reading model files @@ -1065,7 +1065,7 @@ Single GPU vs Multiple GPUs RuntimeError: Config for device with 1 ID is not registered in GPU plugin -Basic Application Using GPUs +Basic Application Using GPUs ---------------------------------------------------------------------- We will now show an end-to-end object detection example using GPUs in @@ -1077,19 +1077,19 @@ found in each frame. The detections are then drawn on their corresponding frame and saved as a video, which is displayed at the end of the application. -Import Necessary Packages +Import Necessary Packages ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 import time from pathlib import Path - + import cv2 import numpy as np from IPython.display import Video from openvino.runtime import AsyncInferQueue, Core, InferRequest - + # Instantiate OpenVINO Runtime core = Core() core.available_devices @@ -1103,7 +1103,7 @@ Import Necessary Packages -Compile the Model +Compile the Model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -1112,11 +1112,11 @@ Compile the Model model = core.read_model(model=model_path) device_name = "GPU" compiled_model = core.compile_model(model=model, device_name=device_name, config={"PERFORMANCE_HINT": "THROUGHPUT"}) - + # Get the input and output nodes input_layer = compiled_model.input(0) output_layer = compiled_model.output(0) - + # Get the input size num, height, width, channels = input_layer.shape print('Model input shape:', num, height, width, channels) @@ -1127,7 +1127,7 @@ Compile the Model Model input shape: 1 300 300 3 -Load and Preprocess Video Frames +Load and Preprocess Video Frames ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -1136,7 +1136,7 @@ Load and Preprocess Video Frames video_file = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/Coco%20Walking%20in%20Berkeley.mp4" video = cv2.VideoCapture(video_file) framebuf = [] - + # Go through every frame of video and resize it print('Loading video...') while video.isOpened(): @@ -1145,18 +1145,18 @@ Load and Preprocess Video Frames print('Video loaded!') video.release() break - + # Preprocess frames - convert them to shape expected by model input_frame = cv2.resize(src=frame, dsize=(width, height), interpolation=cv2.INTER_AREA) input_frame = np.expand_dims(input_frame, axis=0) - + # Append frame to framebuffer framebuf.append(input_frame) - - + + print('Frame shape: ', framebuf[0].shape) print('Number of frames: ', len(framebuf)) - + # Show original video file # If the video does not display correctly inside the notebook, please open it with your favorite media player Video(video_file) @@ -1170,7 +1170,7 @@ Load and Preprocess Video Frames Number of frames: 288 -Define Model Output Classes +Define Model Output Classes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -1191,10 +1191,10 @@ Define Model Output Classes "teddy bear", "hair drier", "toothbrush", "hair brush" ] -Set up Asynchronous Pipeline +Set up Asynchronous Pipeline ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Callback Definition +Callback Definition ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code:: ipython3 @@ -1204,14 +1204,14 @@ Callback Definition global frame_number stop_time = time.time() frame_number += 1 - + predictions = next(iter(infer_request.results.values())) results[frame_id] = predictions[:10] # Grab first 10 predictions for this frame - + total_time = stop_time - start_time frame_fps[frame_id] = frame_number / total_time -Create Async Pipeline +Create Async Pipeline ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code:: ipython3 @@ -1220,7 +1220,7 @@ Create Async Pipeline infer_queue = AsyncInferQueue(compiled_model) infer_queue.set_callback(completion_callback) -Perform Inference +Perform Inference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -1232,14 +1232,14 @@ Perform Inference start_time = time.time() for i, input_frame in enumerate(framebuf): infer_queue.start_async({0: input_frame}, i) - + infer_queue.wait_all() # Wait until all inference requests in the AsyncInferQueue are completed stop_time = time.time() - + # Calculate total inference time and FPS total_time = stop_time - start_time fps = len(framebuf) / total_time - time_per_frame = 1 / fps + time_per_frame = 1 / fps print(f'Total time to infer all frames: {total_time:.3f}s') print(f'Time per frame: {time_per_frame:.6f}s ({fps:.3f} FPS)') @@ -1250,27 +1250,27 @@ Perform Inference Time per frame: 0.004744s (210.774 FPS) -Process Results +Process Results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 # Set minimum detection threshold min_thresh = .6 - + # Load video video = cv2.VideoCapture(video_file) - + # Get video parameters frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(video.get(cv2.CAP_PROP_FPS)) fourcc = int(video.get(cv2.CAP_PROP_FOURCC)) - + # Create folder and VideoWriter to save output video Path('./output').mkdir(exist_ok=True) output = cv2.VideoWriter('output/output.mp4', fourcc, fps, (frame_width, frame_height)) - + # Draw detection results on every frame of video and save as a new video file while video.isOpened(): current_frame = int(video.get(cv2.CAP_PROP_POS_FRAMES)) @@ -1280,12 +1280,12 @@ Process Results output.release() video.release() break - + # Draw info at the top left such as current fps, the devices and the performance hint being used cv2.putText(frame, f"fps {str(round(frame_fps[current_frame], 2))}", (5, 20), cv2.FONT_ITALIC, 0.6, (0, 0, 0), 1, cv2.LINE_AA) - cv2.putText(frame, f"device {device_name}", (5, 40), cv2.FONT_ITALIC, 0.6, (0, 0, 0), 1, cv2.LINE_AA) + cv2.putText(frame, f"device {device_name}", (5, 40), cv2.FONT_ITALIC, 0.6, (0, 0, 0), 1, cv2.LINE_AA) cv2.putText(frame, f"hint {compiled_model.get_property('PERFORMANCE_HINT').name}", (5, 60), cv2.FONT_ITALIC, 0.6, (0, 0, 0), 1, cv2.LINE_AA) - + # prediction contains [image_id, label, conf, x_min, y_min, x_max, y_max] according to model for prediction in np.squeeze(results[current_frame]): if prediction[2] > min_thresh: @@ -1294,13 +1294,13 @@ Process Results x_max = int(prediction[5] * frame_width) y_max = int(prediction[6] * frame_height) label = classes[int(prediction[1])] - + # Draw a bounding box with its label above it cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 255, 0), 1, cv2.LINE_AA) cv2.putText(frame, label, (x_min, y_min - 10), cv2.FONT_ITALIC, 1, (255, 0, 0), 1, cv2.LINE_AA) - + output.write(frame) - + # Show output video file # If the video does not display correctly inside the notebook, please open it with your favorite media player Video("output/output.mp4", width=800, embed=True) @@ -1322,7 +1322,7 @@ Process Results -Conclusion +Conclusion ---------------------------------------------------- This tutorial demonstrates how easy it is to use one or more GPUs in @@ -1334,19 +1334,11 @@ detected bounding boxes. To read more about any of these topics, feel free to visit their corresponding documentation: -- `GPU - Plugin `__ -- `AUTO - Plugin `__ -- `Model - Caching `__ -- `MULTI Device - Mode `__ -- `Query Device - Properties `__ -- `Configurations for GPUs with - OpenVINO `__ -- `Benchmark Python - Tool `__ -- `Asynchronous - Inferencing `__ +- `GPU Plugin `__ +- `AUTO Plugin `__ +- `Model Caching `__ +- `MULTI Device Mode `__ +- `Query Device Properties `__ +- `Configurations for GPUs with OpenVINO `__ +- `Benchmark Python Tool `__ +- `Asynchronous Inferencing `__ diff --git a/docs/notebooks/109-latency-tricks-with-output.rst b/docs/notebooks/109-latency-tricks-with-output.rst index 6d9f242115a46c..8d8cadcd5a1ee6 100644 --- a/docs/notebooks/109-latency-tricks-with-output.rst +++ b/docs/notebooks/109-latency-tricks-with-output.rst @@ -678,6 +678,6 @@ object detection model. Even if you experience much better performance after running this notebook, please note this may not be valid for every hardware or every model. For the most accurate results, please use ``benchmark_app`` `command-line -tool `__. +tool `__. Note that ``benchmark_app`` cannot measure the impact of some tricks above, e.g., shared memory. diff --git a/docs/notebooks/109-throughput-tricks-with-output.rst b/docs/notebooks/109-throughput-tricks-with-output.rst index 43e6c9b9867ca5..b5f57927dffdc5 100644 --- a/docs/notebooks/109-throughput-tricks-with-output.rst +++ b/docs/notebooks/109-throughput-tricks-with-output.rst @@ -725,6 +725,6 @@ object detection model. Even if you experience much better performance after running this notebook, please note this may not be valid for every hardware or every model. For the most accurate results, please use ``benchmark_app`` `command-line -tool `__. +tool `__. Note that ``benchmark_app`` cannot measure the impact of some tricks above. diff --git a/docs/notebooks/110-ct-scan-live-inference-with-output.rst b/docs/notebooks/110-ct-scan-live-inference-with-output.rst index 343da65cc36238..6eaf622c42b13c 100644 --- a/docs/notebooks/110-ct-scan-live-inference-with-output.rst +++ b/docs/notebooks/110-ct-scan-live-inference-with-output.rst @@ -127,7 +127,7 @@ Benchmark Model Performance --------------------------------------------------------------------- To measure the inference performance of the IR model, use `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. Benchmark tool is a command-line application that can be run in the notebook with ``! benchmark_app`` or ``%sx benchmark_app`` commands. diff --git a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst index c4a466e2acdd70..277c9359867e10 100644 --- a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst +++ b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst @@ -639,7 +639,7 @@ Compare Performance of the FP32 IR Model and Quantized Models To measure the inference performance of the ``FP32`` and ``INT8`` models, we use `Benchmark -Tool `__ +Tool `__ - OpenVINO’s inference performance measurement tool. Benchmark tool is a command line application, part of OpenVINO development tools, that can be run in the notebook with ``! benchmark_app`` or diff --git a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst index 3187ce53bd9e03..14a62af8d8ad95 100644 --- a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst +++ b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst @@ -738,7 +738,7 @@ IV. Compare performance of INT8 model and FP32 model in OpenVINO Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/docs/notebooks/113-image-classification-quantization-with-output.rst b/docs/notebooks/113-image-classification-quantization-with-output.rst index 9c5291d3e34cbe..e0c5e5e7d04622 100644 --- a/docs/notebooks/113-image-classification-quantization-with-output.rst +++ b/docs/notebooks/113-image-classification-quantization-with-output.rst @@ -382,7 +382,7 @@ Compare Performance of the Original and Quantized Models Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. **NOTE**: For more accurate performance, it is recommended to run diff --git a/docs/notebooks/119-tflite-to-openvino-with-output.rst b/docs/notebooks/119-tflite-to-openvino-with-output.rst index 2367aff15ae492..a80be0f9317523 100644 --- a/docs/notebooks/119-tflite-to-openvino-with-output.rst +++ b/docs/notebooks/119-tflite-to-openvino-with-output.rst @@ -235,7 +235,7 @@ Estimate Model Performance -------------------------- `Benchmark -Tool `__ +Tool `__ is used to measure the inference performance of the model on CPU and GPU. diff --git a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst index c6ad363f260a27..f876980f8cfd12 100644 --- a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst +++ b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst @@ -568,7 +568,7 @@ Benchmarking performance of converted model Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the model. NOTE: For more accurate performance, it is recommended to run diff --git a/docs/notebooks/219-knowledge-graphs-conve-with-output.rst b/docs/notebooks/219-knowledge-graphs-conve-with-output.rst index cf62c9253ac99e..237bed07aa46b3 100644 --- a/docs/notebooks/219-knowledge-graphs-conve-with-output.rst +++ b/docs/notebooks/219-knowledge-graphs-conve-with-output.rst @@ -514,7 +514,7 @@ Benchmark the converted OpenVINO model using benchmark app The OpenVINO toolkit provides a benchmarking application to gauge the platform specific runtime performance that can be obtained under optimal configuration parameters for a given model. For more details refer to: -https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html +https://docs.openvino.ai/2023.3/openvino_sample_benchmark_tool.html Here, we use the benchmark application to obtain performance estimates under optimal configuration for the knowledge graph model inference. We diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst index 4f7fc79b33cb01..059c581f3fd1f3 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst @@ -933,8 +933,8 @@ advance and fill it in as the inference requests are executed. Let’s compare the models and plot the results. - **Note**: To get a more accurate benchmark, use the `Benchmark Python - Tool `__ + Note: To get a more accurate benchmark, use the `Benchmark Python + Tool `__ .. code:: ipython3 diff --git a/docs/notebooks/226-yolov7-optimization-with-output.rst b/docs/notebooks/226-yolov7-optimization-with-output.rst index 77bb7c91c825d5..6f3c1e062dd3c6 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output.rst +++ b/docs/notebooks/226-yolov7-optimization-with-output.rst @@ -996,7 +996,7 @@ Compare Performance of the Original and Quantized Models Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/230-yolov8-instance-segmentation-with-output.rst b/docs/notebooks/230-yolov8-instance-segmentation-with-output.rst index a920d4c1087bd1..2c2d7937148a64 100644 --- a/docs/notebooks/230-yolov8-instance-segmentation-with-output.rst +++ b/docs/notebooks/230-yolov8-instance-segmentation-with-output.rst @@ -981,13 +981,8 @@ Compare the Original and Quantized Models ----------------------------------------- - -Compare performance of the Original and Quantized Models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Finally, use the OpenVINO -`Benchmark -Tool `__ +Finally, use the OpenVINO `Benchmark +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst b/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst index b07d5900184bda..46643cd5f8bd2f 100644 --- a/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst +++ b/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst @@ -973,13 +973,8 @@ Compare the Original and Quantized Models ----------------------------------------- - -Compare performance of the Original and Quantized Models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Finally, use the OpenVINO -`Benchmark -Tool `__ +Finally, use the OpenVINO `Benchmark +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/230-yolov8-object-detection-with-output.rst b/docs/notebooks/230-yolov8-object-detection-with-output.rst index 95a2f00d1fab0e..ae9cb15a34fef5 100644 --- a/docs/notebooks/230-yolov8-object-detection-with-output.rst +++ b/docs/notebooks/230-yolov8-object-detection-with-output.rst @@ -949,7 +949,7 @@ Compare performance object detection models Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/237-segment-anything-with-output.rst b/docs/notebooks/237-segment-anything-with-output.rst index 7c5475f0c56881..e9d123e729a876 100644 --- a/docs/notebooks/237-segment-anything-with-output.rst +++ b/docs/notebooks/237-segment-anything-with-output.rst @@ -1526,9 +1526,9 @@ Run ``INT8`` model in automatic mask generation mode Compare Performance of the Original and Quantized Models ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Finally, use the OpenVINO -`Benchmark -Tool `__ +Finally, use the OpenVINO `Benchmark +Tool `__ + to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst b/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst index 929faec71c4f5c..b15d3817de20a5 100644 --- a/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst +++ b/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst @@ -736,7 +736,7 @@ Compare performance time of the converted and optimized models To measure the inference performance of OpenVINO FP16 and INT8 models, use `Benchmark -Tool `__. +Tool `__. **NOTE**: For more accurate performance, run ``benchmark_app`` in a terminal/command prompt after closing other applications. Run diff --git a/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst b/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst index 2c5c02d1d847fc..2062191af8ebf1 100644 --- a/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst +++ b/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst @@ -630,7 +630,7 @@ Compare Inference Speed Measure inference speed with the `OpenVINO Benchmark -App `__. +App `__. Benchmark App is a command line tool that measures raw inference performance for a specified OpenVINO IR model. Run @@ -640,7 +640,7 @@ the ``-m`` parameter with asynchronous inference on CPU, for one minute. Use the ``-d`` parameter to test performance on a different device, for example an Intel integrated Graphics (iGPU), and ``-t`` to set the number of seconds to run inference. See the -`documentation `__ +`documentation `__ for more information. This tutorial uses a wrapper function from `Notebook diff --git a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst index b44100bea8b20b..0e855ec485094f 100644 --- a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst +++ b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst @@ -718,7 +718,7 @@ Benchmark Model Performance by Computing Inference Time Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst b/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst index b73b88c965a25f..2a1d3dad8e1b3b 100644 --- a/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst +++ b/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst @@ -477,7 +477,7 @@ Benchmark Model Performance by Computing Inference Time Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/samples/c/hello_classification/README.md b/samples/c/hello_classification/README.md index f42cd925270ef2..c6a1c7d316b7d8 100644 --- a/samples/c/hello_classification/README.md +++ b/samples/c/hello_classification/README.md @@ -2,7 +2,7 @@ This sample demonstrates how to execute an inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API and input auto-resize feature. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html) ## Requirements @@ -12,8 +12,8 @@ For more detailed information on how this sample works, check the dedicated [art | Model Format | Inference Engine Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Validated images | The sample uses OpenCV\* to [read input image](https://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56) (\*.bmp, \*.png) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html), | -| | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html), | +| | [Python](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html) | Hello Classification C sample application demonstrates how to use the C API from OpenVINO in applications. diff --git a/samples/c/hello_nv12_input_classification/README.md b/samples/c/hello_nv12_input_classification/README.md index c6ca2ea77de0dc..a51a179917d18a 100644 --- a/samples/c/hello_nv12_input_classification/README.md +++ b/samples/c/hello_nv12_input_classification/README.md @@ -4,7 +4,7 @@ This sample demonstrates how to execute an inference of image classification net Hello NV12 Input Classification C Sample demonstrates how to use the NV12 automatic input pre-processing API in your applications. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_nv12_input_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_hello_nv12_input_classification.html) ## Requirements @@ -14,7 +14,7 @@ For more detailed information on how this sample works, check the dedicated [art | Model Format | Inference Engine Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Validated images | An uncompressed image in the NV12 color format - \*.yuv | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_nv12_input_classification_README.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_sample_hello_nv12_input_classification.html) | The following C++ API is used in the application: @@ -28,6 +28,6 @@ The following C++ API is used in the application: | | ``ov_preprocess_preprocess_steps_convert_color`` | | -Basic Inference Engine API is covered by [Hello Classification C sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html). +Basic Inference Engine API is covered by [Hello Classification C sample](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html). diff --git a/samples/cpp/benchmark/sync_benchmark/README.md b/samples/cpp/benchmark/sync_benchmark/README.md index 697d0efc439e05..c0887d49bfaa20 100644 --- a/samples/cpp/benchmark/sync_benchmark/README.md +++ b/samples/cpp/benchmark/sync_benchmark/README.md @@ -2,7 +2,7 @@ This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2023.3/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_sync_benchmark_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_sync_benchmark.html) ## Requirements @@ -15,7 +15,7 @@ For more detailed information on how this sample works, check the dedicated [art | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_sync_benchmark_README.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_sample_sync_benchmark.html) | The following C++ API is used in the application: diff --git a/samples/cpp/benchmark/throughput_benchmark/README.md b/samples/cpp/benchmark/throughput_benchmark/README.md index 0331a9d2ce7664..bb70ec4348fe56 100644 --- a/samples/cpp/benchmark/throughput_benchmark/README.md +++ b/samples/cpp/benchmark/throughput_benchmark/README.md @@ -2,9 +2,9 @@ This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2023.3/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. -The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_benchmark_app_README.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets ``uint8``, while the sample uses default model precision which is usually ``float32``. +The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2023.3/openvino_sample_benchmark_tool.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets ``uint8``, while the sample uses default model precision which is usually ``float32``. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_throughput_benchmark_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_throughput_benchmark.html) ## Requirements @@ -17,7 +17,7 @@ For more detailed information on how this sample works, check the dedicated [art | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_throughput_benchmark_README.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_sample_throughput_benchmark.html) | The following C++ API is used in the application: diff --git a/samples/cpp/benchmark_app/README.md b/samples/cpp/benchmark_app/README.md index ac100994de45af..17be6f2ec825e6 100644 --- a/samples/cpp/benchmark_app/README.md +++ b/samples/cpp/benchmark_app/README.md @@ -2,14 +2,14 @@ This page demonstrates how to use the Benchmark C++ Tool to estimate deep learning inference performance on supported devices. -> **NOTE**: This page describes usage of the C++ implementation of the Benchmark Tool. For the Python implementation, refer to the [Benchmark Python Tool](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html) page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. +> **NOTE**: This page describes usage of the C++ implementation of the Benchmark Tool. For the Python implementation, refer to the [Benchmark Python Tool](https://docs.openvino.ai/2023.3/openvino_sample_benchmark_tool.html) page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_benchmark_app_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_benchmark_tool.html) ## Requriements To use the C++ benchmark_app, you must first build it following the [Build the Sample Applications](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Samples_Overview.html) instructions and then set up paths and environment variables by following the [Get Ready for Running the Sample Applications](https://docs.openvino.ai/2023.3/openvino_docs_get_started_get_started_demos.html) instructions. Navigate to the directory where the benchmark_app C++ sample binary was built. -> **NOTE**: If you installed OpenVINO Runtime using PyPI or Anaconda Cloud, only the [Benchmark Python Tool](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html) is available, and you should follow the usage instructions on that page instead. +> **NOTE**: If you installed OpenVINO Runtime using PyPI or Anaconda Cloud, only the [Benchmark Python Tool](https://docs.openvino.ai/2023.3/openvino_sample_benchmark_tool.html) is available, and you should follow the usage instructions on that page instead. The benchmarking application works with models in the OpenVINO IR, TensorFlow, TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, OpenVINO also allows you to [convert your models](https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html). diff --git a/samples/cpp/classification_sample_async/README.md b/samples/cpp/classification_sample_async/README.md index f0188c0342f39c..021a78f113336e 100644 --- a/samples/cpp/classification_sample_async/README.md +++ b/samples/cpp/classification_sample_async/README.md @@ -6,7 +6,7 @@ Models with only one input and output are supported. In addition to regular images, the sample also supports single-channel ``ubyte`` images as an input for LeNet model. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_classification_sample_async_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_image_classification_async.html) ## Requirements @@ -16,7 +16,7 @@ For more detailed information on how this sample works, check the dedicated [art | | [googlenet-v1](https://docs.openvino.ai/2023.3/omz_models_model_googlenet_v1.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_classification_sample_async_README.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_sample_image_classification_async.html) | The following C++ API is used in the application: diff --git a/samples/cpp/hello_classification/README.md b/samples/cpp/hello_classification/README.md index 7ed0affa099610..809d417141874d 100644 --- a/samples/cpp/hello_classification/README.md +++ b/samples/cpp/hello_classification/README.md @@ -4,7 +4,7 @@ This sample demonstrates how to do inference of image classification models usin Models with only one input and output are supported. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html) ## Requirements @@ -14,8 +14,7 @@ For more detailed information on how this sample works, check the dedicated [art | | [googlenet-v1](https://docs.openvino.ai/2023.3/omz_models_model_googlenet_v1.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html), | -| | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html) | +| Other language realization | [Python, C](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html), | The following C++ API is used in the application: diff --git a/samples/cpp/hello_nv12_input_classification/README.md b/samples/cpp/hello_nv12_input_classification/README.md index fa921a6e2c6702..126a074c93dd3e 100644 --- a/samples/cpp/hello_nv12_input_classification/README.md +++ b/samples/cpp/hello_nv12_input_classification/README.md @@ -2,7 +2,7 @@ This sample demonstrates how to execute an inference of image classification models with images in NV12 color format using Synchronous Inference Request API. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_nv12_input_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_hello_nv12_input_classification.html) ## Requirements @@ -12,7 +12,7 @@ For more detailed information on how this sample works, check the dedicated [art | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Validated images | An uncompressed image in the NV12 color format - \*.yuv | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_nv12_input_classification_README.html) | +| Other language realization | [C](https://docs.openvino.ai/2023.3/openvino_sample_hello_nv12_input_classification.html) | The following C++ API is used in the application: @@ -27,5 +27,5 @@ The following C++ API is used in the application: | | ``ov::preprocess::PreProcessSteps::convert_color`` | | -Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html). +Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html). diff --git a/samples/cpp/hello_query_device/README.md b/samples/cpp/hello_query_device/README.md index 49d344cf40104c..8874e1e8eb7660 100644 --- a/samples/cpp/hello_query_device/README.md +++ b/samples/cpp/hello_query_device/README.md @@ -2,14 +2,14 @@ This sample demonstrates how to execute an query OpenVINO™ Runtime devices, prints their metrics and default configuration values, using [Properties API](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_query_api.html). -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_query_device_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_hello_query_device.html) ## Requirements | Options | Values | | ------------------------------| ----------------------------------------------------------------------------------------------------------------------------| | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_query_device_README.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_sample_hello_query_device.html) | The following C++ API is used in the application: @@ -18,4 +18,4 @@ The following C++ API is used in the application: | Available Devices | ``ov::Core::get_available_devices``, | Get available devices information and configuration for inference | | | ``ov::Core::get_property`` | | -Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html). +Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html). diff --git a/samples/cpp/hello_reshape_ssd/README.md b/samples/cpp/hello_reshape_ssd/README.md index 33929478c0b727..138706aae23c68 100644 --- a/samples/cpp/hello_reshape_ssd/README.md +++ b/samples/cpp/hello_reshape_ssd/README.md @@ -3,7 +3,7 @@ This sample demonstrates how to do synchronous inference of object detection models using [input reshape feature](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_ShapeInference.html). Models with only one input and output are supported. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_reshape_ssd_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_hello_reshape_ssd.html) ## Requirements @@ -12,7 +12,7 @@ For more detailed information on how this sample works, check the dedicated [art | Validated Models | [person-detection-retail-0013](https://docs.openvino.ai/nightly/omz_models_model_person_detection_retail_0013.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_reshape_ssd_README.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_sample_hello_reshape_ssd.html) | The following C++ API is used in the application: @@ -29,4 +29,4 @@ The following C++ API is used in the application: | | ``ov::preprocess::PreProcessSteps::convert_layout`` | | -Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html). \ No newline at end of file +Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html). diff --git a/samples/cpp/model_creation_sample/README.md b/samples/cpp/model_creation_sample/README.md index 7003f27044e00b..c97c4f8ecb7fb6 100644 --- a/samples/cpp/model_creation_sample/README.md +++ b/samples/cpp/model_creation_sample/README.md @@ -4,7 +4,7 @@ This sample demonstrates how to execute an synchronous inference using [model](h You do not need an XML file to create a model. The API of ov::Model allows creating a model on the fly from the source code. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_model_creation_sample_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_model_creation.html) ## Requirements @@ -14,7 +14,7 @@ For more detailed information on how this sample works, check the dedicated [art | Model Format | model weights file (\*.bin) | | Validated images | single-channel ``MNIST ubyte`` images | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_model_creation_sample_README.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_sample_model_creation.html) | The following C++ API is used in the application: @@ -43,4 +43,8 @@ The following C++ API is used in the application: | | ``ov::Model``, | | | | ``ov::ParameterVector::vector`` | | -Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html). \ No newline at end of file +<<<<<<< HEAD +Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html). +======= +Basic OpenVINO™ Runtime API is covered by [Hello Classification C++ sample](https://docs.openvino.ai/2023.2/openvino_sample_hello_classification.html). +>>>>>>> cf2b238df2 (Merge samples) diff --git a/samples/python/benchmark/bert_benchmark/README.md b/samples/python/benchmark/bert_benchmark/README.md index aceec41858764d..c01ade14584b52 100644 --- a/samples/python/benchmark/bert_benchmark/README.md +++ b/samples/python/benchmark/bert_benchmark/README.md @@ -2,7 +2,7 @@ This sample demonstrates how to estimate performance of a Bert model using Asynchronous Inference Request API. Unlike [demos](https://docs.openvino.ai/2023.3/omz_demos.html) this sample doesn't have configurable command line arguments. Feel free to modify sample's source code to try out different options. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_bert_benchmark_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_bert_benchmark.html) The sample downloads a model and a tokenizer, export the model to onnx, reads the exported model and reshapes it to enforce dynamic input shapes, compiles the resulting model, downloads a dataset and runs benchmarking on the dataset. diff --git a/samples/python/benchmark/sync_benchmark/README.md b/samples/python/benchmark/sync_benchmark/README.md index b6a08f3e096916..58c0d7b60bdb28 100644 --- a/samples/python/benchmark/sync_benchmark/README.md +++ b/samples/python/benchmark/sync_benchmark/README.md @@ -2,7 +2,7 @@ This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2023.3/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_sync_benchmark_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_sync_benchmark.html) ## Requirements @@ -15,7 +15,7 @@ For more detailed information on how this sample works, check the dedicated [art | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_sync_benchmark_README.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_sample_sync_benchmark.html) | The following Python API is used in the application: diff --git a/samples/python/benchmark/throughput_benchmark/README.md b/samples/python/benchmark/throughput_benchmark/README.md index 0bf3b350d4e262..77823bda36ae38 100644 --- a/samples/python/benchmark/throughput_benchmark/README.md +++ b/samples/python/benchmark/throughput_benchmark/README.md @@ -2,9 +2,9 @@ This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2023.3/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. -The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets uint8, while the sample uses default model precision which is usually float32. +The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2023.3/openvino_sample_benchmark_tool.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets uint8, while the sample uses default model precision which is usually float32. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_sync_benchmark_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_sample_sync_benchmark.html) ## Requirements @@ -17,7 +17,7 @@ For more detailed information on how this sample works, check the dedicated [art | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_sync_benchmark_README.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_sample_sync_benchmark.html) | The following Python API is used in the application: diff --git a/samples/python/classification_sample_async/README.md b/samples/python/classification_sample_async/README.md index 46ef4c3688ae9a..90925f0705154b 100644 --- a/samples/python/classification_sample_async/README.md +++ b/samples/python/classification_sample_async/README.md @@ -4,7 +4,7 @@ This sample demonstrates how to do inference of image classification models usin Models with only 1 input and output are supported. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_classification_sample_async_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_image_classification_async.html) ## Requirements @@ -13,7 +13,7 @@ For more detailed information on how this sample works, check the dedicated [art | Validated Models | [alexnet](https://docs.openvino.ai/2023.3/omz_models_model_alexnet.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation (.xml + .bin), ONNX (.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_classification_sample_async_README.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_sample_image_classification_async.html) | The following Python API is used in the application: @@ -25,4 +25,8 @@ The following Python API is used in the application: | | [openvino.runtime.AsyncInferQueue.wait_all](https://docs.openvino.ai/2023.3/api/ie_python_api/_autosummary/openvino.runtime.AsyncInferQueue.html#openvino.runtime.AsyncInferQueue.wait_all), | | | | [openvino.runtime.InferRequest.results](https://docs.openvino.ai/2023.3/api/ie_python_api/_autosummary/openvino.runtime.InferRequest.html#openvino.runtime.InferRequest.results) | | +<<<<<<< HEAD Basic OpenVINO™ Runtime API is covered by [Hello Classification Python Sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html). +======= +Basic OpenVINO™ Runtime API is covered by [Hello Classification Python Sample](https://docs.openvino.ai/2023.2/openvino_sample_hello_classification.html). +>>>>>>> cf2b238df2 (Merge samples) diff --git a/samples/python/hello_classification/README.md b/samples/python/hello_classification/README.md index 8a3e79fa07cda9..00354063f26a06 100644 --- a/samples/python/hello_classification/README.md +++ b/samples/python/hello_classification/README.md @@ -4,7 +4,7 @@ This sample demonstrates how to do inference of image classification models usin Models with only 1 input and output are supported. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html) ## Requirements @@ -14,8 +14,7 @@ For more detailed information on how this sample works, check the dedicated [art | | [googlenet-v1](https://docs.openvino.ai/2023.3/omz_models_model_googlenet_v1.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation (.xml + .bin), ONNX (.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html), | -| | [C](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html) | +| Other language realization | [C++, C](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html), | The following Python API is used in the application: diff --git a/samples/python/hello_query_device/README.md b/samples/python/hello_query_device/README.md index 59aef8f8399994..bb81b1d15b8226 100644 --- a/samples/python/hello_query_device/README.md +++ b/samples/python/hello_query_device/README.md @@ -2,14 +2,14 @@ This sample demonstrates how to show OpenVINO™ Runtime devices and prints their metrics and default configuration values using [Query Device API feature](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_query_api.html). -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_query_device_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_hello_query_device.html) ## Requirements | Options | Values | | ----------------------------| --------------------------------------------------------------------------------------------------------| | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_query_device_README.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_sample_hello_query_device.html) | The following Python API is used in the application: diff --git a/samples/python/hello_reshape_ssd/README.md b/samples/python/hello_reshape_ssd/README.md index f6fb47f5b2f554..01f203e1271b99 100644 --- a/samples/python/hello_reshape_ssd/README.md +++ b/samples/python/hello_reshape_ssd/README.md @@ -12,7 +12,7 @@ Models with only 1 input and output are supported. | Validated Layout | NCHW | | Model Format | OpenVINO™ toolkit Intermediate Representation (.xml + .bin), ONNX (.onnx) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_reshape_ssd_README.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_sample_hello_reshape_ssd.html) | The following Python API is used in the application: @@ -23,4 +23,4 @@ The following Python API is used in the application: | | [openvino.runtime.Output.get_any_name](https://docs.openvino.ai/2023.3/api/ie_python_api/_autosummary/openvino.runtime.Output.html#openvino.runtime.Output.get_any_name), | | | | [openvino.runtime.PartialShape](https://docs.openvino.ai/2023.3/api/ie_python_api/_autosummary/openvino.runtime.PartialShape.html) | | -Basic OpenVINO™ Runtime API is covered by [Hello Classification Python* Sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html). +Basic OpenVINO™ Runtime API is covered by [Hello Classification Python* Sample](https://docs.openvino.ai/2023.3/openvino_sample_hello_classification.html). diff --git a/samples/python/model_creation_sample/README.md b/samples/python/model_creation_sample/README.md index 3bfd38637e060f..fc92d3a74a3303 100644 --- a/samples/python/model_creation_sample/README.md +++ b/samples/python/model_creation_sample/README.md @@ -2,7 +2,7 @@ This sample demonstrates how to run inference using a [model](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Model_Representation.html) built on the fly that uses weights from the LeNet classification model, which is known to work well on digit classification tasks. You do not need an XML file, the model is created from the source code on the fly. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_model_creation_sample_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_model_creation.html) ## Requirements @@ -11,7 +11,7 @@ For more detailed information on how this sample works, check the dedicated [art | Validated Models | LeNet | | Model Format | Model weights file (\*.bin) | | Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_model_creation_sample_README.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_sample_model_creation.html) | The following OpenVINO Python API is used in the application: @@ -30,4 +30,8 @@ The following OpenVINO Python API is used in the application: | | [openvino.runtime.opset8.relu](https://docs.openvino.ai/2023.3/api/ie_python_api/_autosummary/openvino.runtime.opset8.relu.html) , | | | | [openvino.runtime.opset8.softmax](https://docs.openvino.ai/2023.3/api/ie_python_api/_autosummary/openvino.runtime.opset8.softmax.html) | | -Basic OpenVINO™ Runtime API is covered by [Hello Classification Python* Sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html). \ No newline at end of file +<<<<<<< HEAD +Basic OpenVINO™ Runtime API is covered by [Hello Classification Python* Sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html). +======= +Basic OpenVINO™ Runtime API is covered by [Hello Classification Python* Sample](https://docs.openvino.ai/2023.2/openvino_sample_hello_classification.html). +>>>>>>> cf2b238df2 (Merge samples) diff --git a/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md b/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md index 02b9e4eb636609..58638c96927906 100644 --- a/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md +++ b/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md @@ -59,7 +59,7 @@ For more details, see the [OpenCL on Linux](https://github.com/bashbaug/OpenCLPa ## 7. If you are using dGPU with XMX, ensure that HW_MATMUL feature is recognized -OpenVINO contains *hello_query_device* sample application: [link](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_query_device_README.html) +OpenVINO contains *hello_query_device* sample application: [link](https://docs.openvino.ai/2023.3/openvino_sample_hello_query_device.html) With this option, you can check whether Intel XMX(Xe Matrix Extension) feature is properly recognized or not. This is a hardware feature to accelerate matrix operations and available on some discrete GPUs. diff --git a/tools/benchmark_tool/README.md b/tools/benchmark_tool/README.md index 0fde3e8d7612a7..fd57ae58ae61d4 100644 --- a/tools/benchmark_tool/README.md +++ b/tools/benchmark_tool/README.md @@ -2,9 +2,9 @@ This page demonstrates how to use the Benchmark Python Tool to estimate deep learning inference performance on supported devices. -> **NOTE**: This page describes usage of the Python implementation of the Benchmark Tool. For the C++ implementation, refer to the [Benchmark C++ Tool](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_benchmark_app_README.html) page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. +> **NOTE**: This page describes usage of the Python implementation of the Benchmark Tool. For the C++ implementation, refer to the [Benchmark C++ Tool](https://docs.openvino.ai/2023.2/openvino_sample_benchmark_tool.html) page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_sample_benchmark_tool.html) ## Requriements From f01030e5df6a83b06b45bc419c917660c137bb4b Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Fri, 12 Jan 2024 17:31:15 +0400 Subject: [PATCH 27/43] Remove GNA specific subgraph test cases (#22046) * Remove shared subgraph_tests/ used in GNA only * Remove unused includes of deleted headers * TODO: COMMENTED CODE * Move `Basic_LSTM_S::GetNetwork` to `callback.hpp` * Return back `basic_lstm.hpp` * Simplify `ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout` --- .../behavior/infer_request/callback.hpp | 32 -- .../behavior/infer_request/io_blob.hpp | 1 - .../behavior/ov_infer_request/callback.hpp | 16 +- .../infer_request_dynamic.hpp | 1 - .../behavior/ov_infer_request/io_tensor.hpp | 1 - .../activation_concats_eltwise.hpp | 15 - .../include/subgraph_tests/activation_fq.hpp | 15 - .../include/subgraph_tests/basic_lstm.hpp | 2 + .../subgraph_tests/broadcast_power.hpp | 15 - .../include/subgraph_tests/cascade_concat.hpp | 19 -- .../include/subgraph_tests/clamp_fq.hpp | 15 - .../include/subgraph_tests/concat_conv.hpp | 15 - .../subgraph_tests/concat_multi_input.hpp | 28 -- .../subgraph_tests/concat_quantization.hpp | 27 -- ...ntization_during_memory_requantization.hpp | 15 - .../connect_split_concat_concat.hpp | 15 - .../subgraph_tests/const_conv_concat.hpp | 19 -- .../const_strided_slice_concat.hpp | 14 - .../subgraph_tests/conv_fq_eltwise.hpp | 15 - .../include/subgraph_tests/conv_fq_relu.hpp | 15 - .../convolution_relu_sequence.hpp | 15 - .../subgraph_tests/copy_before_squeeze.hpp | 15 - .../subgraph_tests/delayed_copy_layer.hpp | 19 -- .../subgraph_tests/eltwise_conv_eltwise.hpp | 33 --- .../eltwise_reshape_activation.hpp | 15 - .../include/subgraph_tests/fc_conv_fc.hpp | 23 -- .../first_connect_input_concat.hpp | 15 - .../subgraph_tests/fq_conv_fq_affine.hpp | 15 - .../subgraph_tests/fq_with_mixed_levels.hpp | 18 -- .../handling_orientation_conv.hpp | 15 - .../include/subgraph_tests/input_conv.hpp | 15 - .../subgraph_tests/input_split_concat.hpp | 15 - .../include/subgraph_tests/matmul_act_add.hpp | 15 - .../memory_eltwise_reshape_concat.hpp | 15 - .../subgraph_tests/memory_fq_concat_prelu.hpp | 18 -- .../subgraph_tests/multi_crops_to_concat.hpp | 13 - .../subgraph_tests/multi_input_scale.hpp | 13 - .../multioutput_eltwise_squeeze_eltwise.hpp | 15 - .../subgraph_tests/multiple_concat.hpp | 15 - .../subgraph_tests/multiple_input_fq.hpp | 18 -- .../negative_memory_layer_offset.hpp | 15 - .../parameter_reshape_result.hpp | 15 - .../permute_concat_concat_permute.hpp | 21 -- .../subgraph_tests/permute_concat_permute.hpp | 21 -- .../subgraph_tests/relu_split_reshape.hpp | 15 - .../scaleshift_conv_scaleshift.hpp | 29 -- .../include/subgraph_tests/softsign.hpp | 15 - .../split_concat_multi_inputs.hpp | 14 - .../include/subgraph_tests/split_conv.hpp | 15 - .../include/subgraph_tests/split_relu.hpp | 15 - .../split_trivial_permute_concat.hpp | 15 - .../include/subgraph_tests/strided_slice.hpp | 14 - .../subgraph_tests/stridedslice_concat.hpp | 15 - .../subgraph_tests/stridedslice_conv.hpp | 15 - .../transpose_conv_transpose_squeeze.hpp | 15 - .../include/subgraph_tests/trivial_concat.hpp | 15 - .../two_fake_quantize_to_fullyconnected.hpp | 15 - .../behavior/ov_infer_request/io_tensor.cpp | 1 - .../subgraph/activation_concats_eltwise.hpp | 31 -- .../subgraph/activation_fq.hpp | 60 ---- .../subgraph/basic_lstm.hpp | 2 + .../subgraph/broadcast_power.hpp | 33 --- .../subgraph/cascade_concat.hpp | 50 ---- .../shared_test_classes/subgraph/clamp_fq.hpp | 50 ---- .../subgraph/concat_conv.hpp | 43 --- .../subgraph/concat_multi_input.hpp | 42 --- .../subgraph/concat_quantization.hpp | 33 --- ...ntization_during_memory_requantization.hpp | 41 --- .../subgraph/connect_split_concat_concat.hpp | 33 --- .../subgraph/const_conv_concat.hpp | 43 --- .../subgraph/const_strided_slice_concat.hpp | 39 --- .../subgraph/conv_fq_eltwise.hpp | 56 ---- .../subgraph/conv_fq_relu.hpp | 56 ---- .../subgraph/convolution_relu_sequence.hpp | 51 ---- .../subgraph/copy_before_squeeze.hpp | 33 --- .../subgraph/delayed_copy_layer.hpp | 54 ---- .../subgraph/eltwise_conv_eltwise.hpp | 63 ---- .../subgraph/eltwise_reshape_activation.hpp | 29 -- .../subgraph/fc_conv_fc.hpp | 63 ---- .../subgraph/first_connect_input_concat.hpp | 34 --- .../subgraph/fq_conv_fq_affine.hpp | 56 ---- .../subgraph/fq_with_mixed_levels.hpp | 36 --- .../subgraph/handling_orientation_conv.hpp | 31 -- .../subgraph/input_conv.hpp | 43 --- .../subgraph/input_split_concat.hpp | 34 --- .../subgraph/matmul_act_add.hpp | 34 --- .../memory_eltwise_reshape_concat.hpp | 41 --- .../subgraph/memory_fq_concat_prelu.hpp | 50 ---- .../subgraph/multi_crops_to_concat.hpp | 35 --- .../subgraph/multi_input_scale.hpp | 30 -- .../multioutput_eltwise_squeeze_eltwise.hpp | 32 -- .../subgraph/multiple_concat.hpp | 27 -- .../subgraph/multiple_input_fq.hpp | 29 -- .../subgraph/negative_memory_layer_offset.hpp | 40 --- .../subgraph/parameter_reshape_result.hpp | 32 -- .../permute_concat_concat_permute.hpp | 69 ----- .../subgraph/permute_concat_permute.hpp | 33 --- .../subgraph/relu_split_reshape.hpp | 34 --- .../subgraph/scaleshift_conv_scaleshift.hpp | 53 ---- .../shared_test_classes/subgraph/softsign.hpp | 39 --- .../subgraph/split_concat_multi_inputs.hpp | 42 --- .../subgraph/split_conv.hpp | 43 --- .../subgraph/split_relu.hpp | 34 --- .../subgraph/split_trivial_permute_concat.hpp | 35 --- .../subgraph/strided_slice.hpp | 47 --- .../subgraph/stridedslice_concat.hpp | 43 --- .../subgraph/stridedslice_conv.hpp | 42 --- .../transpose_conv_transpose_squeeze.hpp | 49 ---- .../subgraph/trivial_concat.hpp | 32 -- .../two_fake_quantize_to_fullyconnected.hpp | 53 ---- .../subgraph/activation_concats_eltwise.cpp | 62 ---- .../src/subgraph/activation_fq.cpp | 84 ------ .../src/subgraph/broadcast_power.cpp | 47 --- .../src/subgraph/cascade_concat.cpp | 140 --------- .../src/subgraph/clamp_fq.cpp | 85 ------ .../src/subgraph/concat_conv.cpp | 95 ------ .../src/subgraph/concat_multi_input.cpp | 134 --------- ...ntization_during_memory_requantization.cpp | 147 ---------- .../src/subgraph/concat_qunatization.cpp | 62 ---- .../subgraph/connect_split_concat_concat.cpp | 46 --- .../src/subgraph/const_conv_concat.cpp | 95 ------ .../subgraph/const_strided_slice_concat.cpp | 110 ------- .../src/subgraph/conv_fq_eltwise.cpp | 124 -------- .../src/subgraph/conv_fq_relu.cpp | 123 -------- .../subgraph/convolution_relu_sequence.cpp | 84 ------ .../src/subgraph/copy_before_squeeze.cpp | 48 --- .../src/subgraph/delayed_copy_layer.cpp | 196 ------------- .../src/subgraph/eltwise_conv_eltwise.cpp | 275 ------------------ .../subgraph/eltwise_reshape_activation.cpp | 54 ---- .../src/subgraph/fc_conv_fc.cpp | 272 ----------------- .../subgraph/first_connect_input_concat.cpp | 46 --- .../src/subgraph/fq_conv_fq_affine.cpp | 139 --------- .../src/subgraph/fq_with_mixed_levels.cpp | 77 ----- .../subgraph/handling_orientation_conv.cpp | 58 ---- .../src/subgraph/input_conv.cpp | 108 ------- .../src/subgraph/input_split_concat.cpp | 49 ---- .../src/subgraph/matmul_act_add.cpp | 49 ---- .../memory_eltwise_reshape_concat.cpp | 147 ---------- .../src/subgraph/memory_fq_concat_prelu.cpp | 143 --------- .../src/subgraph/multi_crops_to_concat.cpp | 96 ------ .../src/subgraph/multi_input_scale.cpp | 54 ---- .../multioutput_eltwise_squeeze_eltwise.cpp | 56 ---- .../src/subgraph/multiple_concat.cpp | 58 ---- .../src/subgraph/multiple_input_fq.cpp | 68 ----- .../subgraph/negative_memory_layer_offset.cpp | 107 ------- .../src/subgraph/parameter_reshape_result.cpp | 44 --- .../permute_concat_concat_permute.cpp | 127 -------- .../src/subgraph/permute_concat_permute.cpp | 70 ----- .../src/subgraph/relu_split_reshape.cpp | 52 ---- .../subgraph/scaleshift_conv_scaleshift.cpp | 187 ------------ .../src/subgraph/softsign.cpp | 79 ----- .../subgraph/split_concat_multi_inputs.cpp | 64 ---- .../src/subgraph/split_conv.cpp | 98 ------- .../src/subgraph/split_relu.cpp | 46 --- .../subgraph/split_trivial_permute_concat.cpp | 52 ---- .../src/subgraph/strided_slice.cpp | 69 ----- .../src/subgraph/stridedslice_concat.cpp | 76 ----- .../src/subgraph/stridedslice_conv.cpp | 90 ------ .../transpose_conv_transpose_squeeze.cpp | 88 ------ .../src/subgraph/trivial_concat.cpp | 58 ---- .../two_fake_quantize_to_fullyconnected.cpp | 150 ---------- 161 files changed, 10 insertions(+), 7925 deletions(-) delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/activation_concats_eltwise.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/activation_fq.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/broadcast_power.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/cascade_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/clamp_fq.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/concat_conv.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/concat_multi_input.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization_during_memory_requantization.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/connect_split_concat_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/const_conv_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/const_strided_slice_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_eltwise.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_relu.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/convolution_relu_sequence.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/copy_before_squeeze.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/delayed_copy_layer.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/eltwise_conv_eltwise.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/eltwise_reshape_activation.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/fc_conv_fc.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/first_connect_input_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/fq_conv_fq_affine.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/fq_with_mixed_levels.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/handling_orientation_conv.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/input_conv.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/input_split_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/matmul_act_add.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/memory_eltwise_reshape_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/memory_fq_concat_prelu.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/multi_crops_to_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/multi_input_scale.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/multioutput_eltwise_squeeze_eltwise.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/multiple_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/multiple_input_fq.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/negative_memory_layer_offset.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/parameter_reshape_result.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/permute_concat_concat_permute.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/permute_concat_permute.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/relu_split_reshape.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/scaleshift_conv_scaleshift.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/softsign.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/split_concat_multi_inputs.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/split_conv.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/split_relu.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/split_trivial_permute_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/strided_slice.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/stridedslice_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/stridedslice_conv.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/transpose_conv_transpose_squeeze.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/trivial_concat.hpp delete mode 100644 src/tests/functional/plugin/shared/include/subgraph_tests/two_fake_quantize_to_fullyconnected.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/activation_concats_eltwise.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/activation_fq.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/broadcast_power.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/cascade_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/clamp_fq.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_conv.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_multi_input.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/connect_split_concat_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_conv_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_strided_slice_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_relu.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convolution_relu_sequence.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/copy_before_squeeze.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/delayed_copy_layer.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_conv_eltwise.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_reshape_activation.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fc_conv_fc.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/first_connect_input_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_conv_fq_affine.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_with_mixed_levels.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/handling_orientation_conv.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_conv.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_split_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_act_add.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_eltwise_reshape_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_fq_concat_prelu.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_crops_to_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_input_scale.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_input_fq.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/negative_memory_layer_offset.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_reshape_result.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_permute.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/softsign.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_multi_inputs.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_relu.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_trivial_permute_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/strided_slice.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_conv.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/trivial_concat.hpp delete mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/const_strided_slice_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/multiple_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp delete mode 100644 src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp diff --git a/src/tests/functional/plugin/shared/include/behavior/infer_request/callback.hpp b/src/tests/functional/plugin/shared/include/behavior/infer_request/callback.hpp index 9e5adc8ecbe782..51e0c4b478d73a 100644 --- a/src/tests/functional/plugin/shared/include/behavior/infer_request/callback.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/infer_request/callback.hpp @@ -6,7 +6,6 @@ #include -#include "shared_test_classes/subgraph/basic_lstm.hpp" #include "base/behavior_test_utils.hpp" namespace BehaviorTestsDefinitions { @@ -110,37 +109,6 @@ TEST_P(InferRequestCallbackTests, LegacyCastAndSetuserDataGetUserData) { ASSERT_EQ(42, userData); } -TEST_P(InferRequestCallbackTests, ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout) { - // Create CNNNetwork from ngraph::Function - // return ngrpah::Function - // GetNetwork(3000, 380) make inference around 20ms on GNA SW - // so increases chances for getting RESULT_NOT_READY - function = SubgraphTestsDefinitions::Basic_LSTM_S::GetNetwork(300, 38); - cnnNet = InferenceEngine::CNNNetwork(function); - // Load CNNNetwork to target plugins - execNet = ie->LoadNetwork(cnnNet, target_device, configuration); - // Create InferRequest - InferenceEngine::InferRequest req; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - InferenceEngine::StatusCode sts = InferenceEngine::StatusCode::OK; - std::promise callbackTimeStamp; - auto callbackTimeStampFuture = callbackTimeStamp.get_future(); - // add a callback to the request and capture the timestamp - req.SetCompletionCallback([&]() { - callbackTimeStamp.set_value(std::chrono::system_clock::now()); - }); - req.StartAsync(); - ASSERT_NO_THROW(sts = req.Wait(InferenceEngine::InferRequest::WaitMode::STATUS_ONLY)); - // get timestamp taken AFTER return from the Wait(STATUS_ONLY) - const auto afterWaitTimeStamp = std::chrono::system_clock::now(); - // IF the callback timestamp is larger than the afterWaitTimeStamp - // then we should observe RESULT_NOT_READY - if (afterWaitTimeStamp < callbackTimeStampFuture.get()) { - ASSERT_TRUE(sts == InferenceEngine::StatusCode::RESULT_NOT_READY); - } - ASSERT_NO_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY)); -} - TEST_P(InferRequestCallbackTests, ImplDoseNotCopyCallback) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() diff --git a/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp b/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp index ed664576d410c7..e06d4c9b55bb7b 100644 --- a/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp @@ -8,7 +8,6 @@ #include #include "base/behavior_test_utils.hpp" -#include "shared_test_classes/subgraph/basic_lstm.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" namespace BehaviorTestsDefinitions { diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/callback.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/callback.hpp index 12cf1addc49f39..7252feb04fb810 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/callback.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/callback.hpp @@ -5,8 +5,8 @@ #pragma once #include + #include "base/ov_behavior_test_utils.hpp" -#include "shared_test_classes/subgraph/basic_lstm.hpp" namespace ov { namespace test { @@ -17,7 +17,7 @@ TEST_P(OVInferRequestCallbackTests, canCallAsyncWithCompletionCallback) { ov::InferRequest req; OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); bool is_called = false; - OV_ASSERT_NO_THROW(req.set_callback([&] (std::exception_ptr exception_ptr) { + OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE ASSERT_EQ(exception_ptr, nullptr); is_called = true; @@ -31,7 +31,7 @@ TEST_P(OVInferRequestCallbackTests, syncInferDoesNotCallCompletionCallback) { ov::InferRequest req; OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); bool is_called = false; - req.set_callback([&] (std::exception_ptr exception_ptr) { + req.set_callback([&](std::exception_ptr exception_ptr) { ASSERT_EQ(nullptr, exception_ptr); is_called = true; }); @@ -50,7 +50,7 @@ TEST_P(OVInferRequestCallbackTests, canStartSeveralAsyncInsideCompletionCallback ov::InferRequest req; OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); - OV_ASSERT_NO_THROW(req.set_callback([&] (std::exception_ptr exception_ptr) { + OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { if (exception_ptr) { data.promise.set_exception(exception_ptr); } else { @@ -74,7 +74,7 @@ TEST_P(OVInferRequestCallbackTests, canStartSeveralAsyncInsideCompletionCallback TEST_P(OVInferRequestCallbackTests, returnGeneralErrorIfCallbackThrowException) { ov::InferRequest req; OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); - OV_ASSERT_NO_THROW(req.set_callback([] (std::exception_ptr) { + OV_ASSERT_NO_THROW(req.set_callback([](std::exception_ptr) { OPENVINO_THROW("Throw"); })); OV_ASSERT_NO_THROW(req.start_async()); @@ -82,10 +82,6 @@ TEST_P(OVInferRequestCallbackTests, returnGeneralErrorIfCallbackThrowException) } TEST_P(OVInferRequestCallbackTests, ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout) { - // GetNetwork(3000, 380) make inference around 20ms on GNA SW - // so increases chances for getting RESULT_NOT_READY - OV_ASSERT_NO_THROW(execNet = core->compile_model( - SubgraphTestsDefinitions::Basic_LSTM_S::GetNetwork(300, 38), target_device, configuration)); ov::InferRequest req; OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); std::promise callbackTimeStamp; @@ -116,7 +112,7 @@ TEST_P(OVInferRequestCallbackTests, ImplDoesNotCopyCallback) { OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); { auto somePtr = std::make_shared(42); - OV_ASSERT_NO_THROW(req.set_callback([somePtr] (std::exception_ptr exception_ptr) { + OV_ASSERT_NO_THROW(req.set_callback([somePtr](std::exception_ptr exception_ptr) { ASSERT_EQ(nullptr, exception_ptr); ASSERT_EQ(1, somePtr.use_count()); })); diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp index ed67236666d380..28918d8f376733 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp @@ -21,7 +21,6 @@ #include "functional_test_utils/plugin_cache.hpp" #include "functional_test_utils/blob_utils.hpp" #include "ov_models/subgraph_builders.hpp" -#include "shared_test_classes/subgraph/basic_lstm.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" // TODO [mandrono]: move current test case inside CPU plug-in and return the original tests diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp index d13d53366bd40b..27281cf468a16b 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp @@ -7,7 +7,6 @@ #include #include -#include "shared_test_classes/subgraph/basic_lstm.hpp" #include "base/ov_behavior_test_utils.hpp" namespace ov { diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/activation_concats_eltwise.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/activation_concats_eltwise.hpp deleted file mode 100644 index 3385f3067d1b75..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/activation_concats_eltwise.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/activation_concats_eltwise.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ActivationConcatsEltwise, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/activation_fq.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/activation_fq.hpp deleted file mode 100644 index 7543ca014ffe56..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/activation_fq.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/activation_fq.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ActivationFakeQuantizeSubgraphTest, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp index f75cb259fced23..55dcd349bf7201 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +// TODO (vurusovs): delete file after removing dependency in other components + #pragma once #include diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/broadcast_power.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/broadcast_power.hpp deleted file mode 100644 index 256abfc1b6dd4a..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/broadcast_power.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/broadcast_power.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(BroadcastPowerTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/cascade_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/cascade_concat.hpp deleted file mode 100644 index 2b94ae5e7f1ab2..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/cascade_concat.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/cascade_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(CascadeConcat, CompareWithRefs) { - Run(); -} - -TEST_P(CascadeConcatWithMultiConnReshape, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/clamp_fq.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/clamp_fq.hpp deleted file mode 100644 index 6561a55a5ba669..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/clamp_fq.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/clamp_fq.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ClampFakeQuantizeSubgraphTest, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/concat_conv.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/concat_conv.hpp deleted file mode 100644 index 3522ee83fc451d..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/concat_conv.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/concat_conv.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConcatConvTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/concat_multi_input.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/concat_multi_input.hpp deleted file mode 100644 index 2eda05df58325a..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/concat_multi_input.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/concat_multi_input.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConcatMultiInput, CompareWithRefStridedSlice) { - GenerateStridedSliceModel(); - Run(); -}; - -TEST_P(ConcatMultiInput, CompareWithRefConstOnly) { - GenerateConstOnlyModel(); - Run(); -}; - -TEST_P(ConcatMultiInput, CompareWithRefMemory) { - GenerateMemoryModel(); - LoadNetwork(); - GenerateInputs(); - Infer(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization.hpp deleted file mode 100644 index a370c067ae631c..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/concat_quantization.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConcatQuantization, CompareWithRefImpl) { - InferenceEngine::Core* core = PluginCache::get().ie(targetDevice).get(); - if (!configuration.empty()) { - core->SetConfig(configuration, targetDevice); - } - - try { - InferenceEngine::CNNNetwork cnnNetwork = InferenceEngine::CNNNetwork{ function }; - executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice); - } - catch (InferenceEngine::Exception & ex) { - FAIL() << ex.what(); - } -}; - - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization_during_memory_requantization.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization_during_memory_requantization.hpp deleted file mode 100644 index 1c01dfcd42e4c7..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization_during_memory_requantization.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConcatQuantDuringMemoryRequantTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/connect_split_concat_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/connect_split_concat_concat.hpp deleted file mode 100644 index f3d9d9f9ef0acc..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/connect_split_concat_concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/connect_split_concat_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(SplitConcatConcatTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/const_conv_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/const_conv_concat.hpp deleted file mode 100644 index 21407bef4daaa3..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/const_conv_concat.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/const_conv_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConstConvConcatTest, CompareWithRefImpl) { - LoadNetwork(); - GenerateInputs(); - Infer(); - // Create another copy of function for validation since some data will be changed by GNA plugin - SetUp(); - Validate(); -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/const_strided_slice_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/const_strided_slice_concat.hpp deleted file mode 100644 index ba36681ab95585..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/const_strided_slice_concat.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/const_strided_slice_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConstStridedSliceConcatTest, CompareWithRefImpl) { - Run(); -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_eltwise.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_eltwise.hpp deleted file mode 100644 index eb4de225ac1e41..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_eltwise.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/conv_fq_eltwise.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConvFqEltwiseTest, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_relu.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_relu.hpp deleted file mode 100644 index 90cc1548418233..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_relu.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/conv_fq_relu.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConvFqReluTest, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/convolution_relu_sequence.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/convolution_relu_sequence.hpp deleted file mode 100644 index 509d6fa4347d8b..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/convolution_relu_sequence.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/convolution_relu_sequence.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ConvolutionReluSequenceTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/copy_before_squeeze.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/copy_before_squeeze.hpp deleted file mode 100644 index 8b51a65282b3f6..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/copy_before_squeeze.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/copy_before_squeeze.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(CopyBeforeSqueezeTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/delayed_copy_layer.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/delayed_copy_layer.hpp deleted file mode 100644 index 829bdfd8118c92..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/delayed_copy_layer.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/delayed_copy_layer.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(DelayedCopyTest, CompareWithRefs) { - Run(); -}; - -TEST_P(DelayedCopyAfterReshapeWithMultipleConnTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/eltwise_conv_eltwise.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/eltwise_conv_eltwise.hpp deleted file mode 100644 index c6b3957719fa0b..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/eltwise_conv_eltwise.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/eltwise_conv_eltwise.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(EltwiseAfterConvTest, CompareWithRefImpl) { - LoadNetwork(); - GenerateInputs(); - Infer(); - // Create another copy of function for validation since some data will be changed by GNA plugin - SetUp(); - Validate(); -}; - -TEST_P(EltwiseBeforeConvTest, CompareWithRefImpl) { - LoadNetwork(); - GenerateInputs(); - Infer(); - // Create another copy of function for validation since some data will be changed by GNA plugin - SetUp(); - Validate(); -}; - -TEST_P(EltwiseWithTwoConvsAsInputsTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/eltwise_reshape_activation.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/eltwise_reshape_activation.hpp deleted file mode 100644 index 62fb484aecc115..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/eltwise_reshape_activation.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/eltwise_reshape_activation.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(EltwiseReshapeActivation, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/fc_conv_fc.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/fc_conv_fc.hpp deleted file mode 100644 index 131188a8bb18fc..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/fc_conv_fc.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/fc_conv_fc.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(FcAfterConvTest, CompareWithRefImpl) { - Run(); -}; - -TEST_P(FcBeforeConvTest, CompareWithRefImpl) { - Run(); -}; - -TEST_P(FcBetweenConvsTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/first_connect_input_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/first_connect_input_concat.hpp deleted file mode 100644 index f42ef2ad8a2c04..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/first_connect_input_concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace SubgraphTestsDefinitions { - -TEST_P(ConcatFirstInputTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/fq_conv_fq_affine.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/fq_conv_fq_affine.hpp deleted file mode 100644 index a12f547f06779b..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/fq_conv_fq_affine.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/fq_conv_fq_affine.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(FqConvFqAffineTest, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/fq_with_mixed_levels.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/fq_with_mixed_levels.hpp deleted file mode 100644 index 47fe2bb8a0de5e..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/fq_with_mixed_levels.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef TEST_FQ_WITH_MIXED_LEVELS_HPP -#define TEST_FQ_WITH_MIXED_LEVELS_HPP - -#include "shared_test_classes/subgraph/fq_with_mixed_levels.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(FqWithMixedLevelsTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions - -#endif // TEST_FQ_WITH_MIXED_LEVELS_HPP diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/handling_orientation_conv.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/handling_orientation_conv.hpp deleted file mode 100644 index d97b14d02c1031..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/handling_orientation_conv.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/handling_orientation_conv.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(HandlingOrientationClass, CompareWithRefs){ - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/input_conv.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/input_conv.hpp deleted file mode 100644 index a540ea0fcc9cca..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/input_conv.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/input_conv.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(InputConvTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/input_split_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/input_split_concat.hpp deleted file mode 100644 index 2673ce16a91729..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/input_split_concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/input_split_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(InputSplitConcatTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_act_add.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_act_add.hpp deleted file mode 100644 index 1a122f638d4986..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/matmul_act_add.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/matmul_act_add.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(MatMulActAddTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/memory_eltwise_reshape_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/memory_eltwise_reshape_concat.hpp deleted file mode 100644 index a47e38909958fd..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/memory_eltwise_reshape_concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/memory_eltwise_reshape_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(MemoryEltwiseReshapeConcatTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/memory_fq_concat_prelu.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/memory_fq_concat_prelu.hpp deleted file mode 100644 index c4710febf66bc4..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/memory_fq_concat_prelu.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef PLUGIN_SHARED_MEMORY_FQ_CONCAT_PRELU_HPP -#define PLUGIN_SHARED_MEMORY_FQ_CONCAT_PRELU_HPP - -#include "shared_test_classes/subgraph/memory_fq_concat_prelu.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(MemoryFqConcatPrelu, CompareWithRefs){ - Run(); -}; - -} // namespace SubgraphTestsDefinitions - -#endif // PLUGIN_SHARED_MEMORY_FQ_CONCAT_PRELU_HPP diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/multi_crops_to_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/multi_crops_to_concat.hpp deleted file mode 100644 index 70abb43f52b330..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/multi_crops_to_concat.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/multi_crops_to_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(MultiCropsToConcatTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/multi_input_scale.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/multi_input_scale.hpp deleted file mode 100644 index e0bb0a5a6674bf..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/multi_input_scale.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/multi_input_scale.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(MultipleInputScaleTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/multioutput_eltwise_squeeze_eltwise.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/multioutput_eltwise_squeeze_eltwise.hpp deleted file mode 100644 index 976f1f7b9a6472..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/multioutput_eltwise_squeeze_eltwise.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(MultioutputEltwiseReshapeEltwise, CompareWithRefs){ - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/multiple_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/multiple_concat.hpp deleted file mode 100644 index c3555c0a9d88a0..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/multiple_concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/multiple_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(MultipleConcatTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/multiple_input_fq.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/multiple_input_fq.hpp deleted file mode 100644 index c9bcb5a4c6a08f..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/multiple_input_fq.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef MULTIPLE_INPUT_HPP -#define MULTIPLE_INPUT_HPP - -#include "shared_test_classes/subgraph/multiple_input_fq.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(MultipleInputTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions - -#endif // MULTIPLE_INPUT_HPP diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/negative_memory_layer_offset.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/negative_memory_layer_offset.hpp deleted file mode 100644 index 900421270bbc57..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/negative_memory_layer_offset.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/negative_memory_layer_offset.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(NegativeMemoryOffsetTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_reshape_result.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_reshape_result.hpp deleted file mode 100644 index cf3e1675eb656b..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/parameter_reshape_result.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/parameter_reshape_result.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ParamReshapeResult, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/permute_concat_concat_permute.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/permute_concat_concat_permute.hpp deleted file mode 100644 index 692dcc56e575cc..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/permute_concat_concat_permute.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/permute_concat_concat_permute.hpp" - -namespace SubgraphTestsDefinitions { - -using PermuteConcatConcatPermuteNeg = PermuteConcatConcatPermute; - -TEST_P(PermuteConcatConcatPermute, CompareWithRefs) { - Run(); -} - -TEST_P(PermuteConcatConcatPermuteNeg, CompareWithRefs) { - ExpectLoadNetworkToThrow("type: Concat, and concatenation axis("); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/permute_concat_permute.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/permute_concat_permute.hpp deleted file mode 100644 index ebbbdbff8a6217..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/permute_concat_permute.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/permute_concat_permute.hpp" - -namespace SubgraphTestsDefinitions { - -using PermuteConcatPermuteNeg = PermuteConcatPermute; - -TEST_P(PermuteConcatPermute, CompareWithRefs) { - Run(); -} - -TEST_P(PermuteConcatPermuteNeg, CompareWithRefs) { - ExpectLoadNetworkToThrow("type: Concat, and concatenation axis("); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/relu_split_reshape.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/relu_split_reshape.hpp deleted file mode 100644 index 2a7aaed7d7d27e..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/relu_split_reshape.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/relu_split_reshape.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ReluSplitReshape, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/scaleshift_conv_scaleshift.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/scaleshift_conv_scaleshift.hpp deleted file mode 100644 index c368363e7492f2..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/scaleshift_conv_scaleshift.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(ScaleShiftAfterConvTest, CompareWithRefImpl) { - LoadNetwork(); - GenerateInputs(); - Infer(); - // Create another copy of function for validation since some data will be changed by GNA plugin - SetUp(); - Validate(); -}; - -TEST_P(ScaleShiftBeforeConvTest, CompareWithRefImpl) { - LoadNetwork(); - GenerateInputs(); - Infer(); - // Create another copy of function for validation since some data will be changed by GNA plugin - SetUp(); - Validate(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/softsign.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/softsign.hpp deleted file mode 100644 index 5322be06b6e26f..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/softsign.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/softsign.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(SoftsignTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/split_concat_multi_inputs.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/split_concat_multi_inputs.hpp deleted file mode 100644 index 87501580a84e46..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/split_concat_multi_inputs.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/split_concat_multi_inputs.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(SplitConcatMultiInputsTest, CompareWithRefs) { - Run(); -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/split_conv.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/split_conv.hpp deleted file mode 100644 index 8f99213ced30e8..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/split_conv.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/split_conv.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(SplitConvTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/split_relu.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/split_relu.hpp deleted file mode 100644 index e3b81fc0fbe7a9..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/split_relu.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/split_relu.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(SplitRelu, CompareWithRefs){ - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/split_trivial_permute_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/split_trivial_permute_concat.hpp deleted file mode 100644 index 57b2788040de85..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/split_trivial_permute_concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/split_trivial_permute_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(SplitTrivialPermuteConcatTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/strided_slice.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/strided_slice.hpp deleted file mode 100644 index 9d070b34041e38..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/strided_slice.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/strided_slice.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(StridedSliceTest, CompareWithRefs){ - Run(); -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/stridedslice_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/stridedslice_concat.hpp deleted file mode 100644 index bfe564fa1619b9..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/stridedslice_concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/stridedslice_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(SliceConcatTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/stridedslice_conv.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/stridedslice_conv.hpp deleted file mode 100644 index 7b1e4341a46d60..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/stridedslice_conv.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/stridedslice_conv.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(SliceConvTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/transpose_conv_transpose_squeeze.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/transpose_conv_transpose_squeeze.hpp deleted file mode 100644 index 3c51b8677c98c4..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/transpose_conv_transpose_squeeze.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(TransposeConvTest, CompareWithRefImpl) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/trivial_concat.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/trivial_concat.hpp deleted file mode 100644 index ba10e88d96c471..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/trivial_concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/trivial_concat.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(TrivialConcatLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/two_fake_quantize_to_fullyconnected.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/two_fake_quantize_to_fullyconnected.hpp deleted file mode 100644 index 530f8edbb6e3fe..00000000000000 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/two_fake_quantize_to_fullyconnected.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp" - -namespace SubgraphTestsDefinitions { - -TEST_P(FakeQuantizeSubgraphTest, CompareWithRefs) { - Run(); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp index a6e24feebd50a7..4f55c443118208 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp @@ -5,7 +5,6 @@ #include #include -#include "shared_test_classes/subgraph/basic_lstm.hpp" #include "behavior/ov_infer_request/io_tensor.hpp" #include #include "openvino/op/parameter.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/activation_concats_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/activation_concats_eltwise.hpp deleted file mode 100644 index 59ff5dd2efdc49..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/activation_concats_eltwise.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace SubgraphTestsDefinitions { - -using ActivationConcatsEltwiseParamsTuple = typename std::tuple< - size_t, // input size - size_t, // concat const size - InferenceEngine::Precision, // precision - std::string, // device name - std::map // configuration ->; - - -class ActivationConcatsEltwise : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/activation_fq.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/activation_fq.hpp deleted file mode 100644 index 317aa5ee565fd1..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/activation_fq.hpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace SubgraphTestsDefinitions { -static std::map activationNames = { - {ngraph::helpers::ActivationTypes::Sigmoid, "Sigmoid"}, - {ngraph::helpers::ActivationTypes::Tanh, "Tanh"}, - {ngraph::helpers::ActivationTypes::Relu, "Relu"}, - {ngraph::helpers::ActivationTypes::Exp, "Exp"}, - {ngraph::helpers::ActivationTypes::Log, "Log"}, - {ngraph::helpers::ActivationTypes::Sign, "Sign"}, - {ngraph::helpers::ActivationTypes::Abs, "Abs"}, -}; - -typedef std::tuple< - std::vector, // levels - std::vector>, // const inputs shape - std::vector // input generator data: low, high, resolution -> fqSpecificParams; - -typedef std::tuple< - fqSpecificParams, - ngraph::helpers::ActivationTypes, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice, // Device name - std::pair> // Additional backend configuration and alis name to it -> fqSubgraphTestParamsSet; - -class ActivationFakeQuantizeSubgraphTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - float inputDataMin = 0.0; - float inputDataMax = 10.0; - float inputDataResolution = 1.0; - int32_t seed = 1; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/basic_lstm.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/basic_lstm.hpp index 6d58656c0752ea..a71ef595e3bfaa 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/basic_lstm.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/basic_lstm.hpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +// TODO (vurusovs): delete file after removing dependency in other components + #pragma once #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/broadcast_power.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/broadcast_power.hpp deleted file mode 100644 index e4f0530e2012dd..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/broadcast_power.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector>, // Input shapes - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map //Configuration -> BroadCastPowerTuple; - -class BroadcastPowerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/cascade_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/cascade_concat.hpp deleted file mode 100644 index 00534c53e72442..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/cascade_concat.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector>, //input shapes 1 - std::vector>, //input shapes 2 - std::vector>, //input shapes 3 - InferenceEngine::Precision, //Network precision - bool, //Multioutput -> True, Single out ->false - std::string, //Device name - std::map//config - > CascadeConcatTuple; - -class CascadeConcat - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; - -typedef std::tuple< - std::vector, //input shapes - InferenceEngine::Precision, //Network precision - std::string, //Device name - std::map //config - > CascadeConcatWithMultiConnReshapeTuple; - -class CascadeConcatWithMultiConnReshape - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { - public: -static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/clamp_fq.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/clamp_fq.hpp deleted file mode 100644 index d2c954bde03a53..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/clamp_fq.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include -#include -#include "../base/layer_test_utils.hpp" - -namespace SubgraphTestsDefinitions { -typedef std::tuple< - std::vector, // levels - std::vector>, // const inputs shape - std::vector, // clamp min max - std::vector // input generator data: low, high, resolution -> fqSpecificParams; - -typedef std::tuple< - fqSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice, // Device name - std::pair> // Additional backend configuration and alis name to it -> fqSubgraphTestParamsSet; - -class ClampFakeQuantizeSubgraphTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - float inputDataMin = 0.0; - float inputDataMax = 10.0; - float inputDataResolution = 1.0; - int32_t seed = 1; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_conv.hpp deleted file mode 100644 index 37d0410ff5b374..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_conv.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input Shapes - std::vector, // Kernel Shape - size_t // Stride -> convParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - convParams, // Convolution Params - size_t, // Input Channels - size_t // Output Channels -> ConcatConvParams; - -class ConcatConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_multi_input.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_multi_input.hpp deleted file mode 100644 index a63203d7a6465b..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_multi_input.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector>, // Input shapes - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map // Config -> concatMultiParams; - -class ConcatMultiInput : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -private: - std::vector paramSize; - ngraph::element::Type ngPrc; - std::vector> inputShapes; - -public: - void GenerateStridedSliceModel(); - void GenerateConstOnlyModel(); - void GenerateMemoryModel(); - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization.hpp deleted file mode 100644 index 33f141b6e63dc2..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map //Configuration -> concatQuantizationParams; - -class ConcatQuantization : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp deleted file mode 100644 index 15bc230de6a304..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, //Network precision - std::string, //Device name - size_t, //Input size - size_t, //Hidden size - std::map //Configuration -> ConcatQuantDuringMemoryRequantTuple; - -class ConcatQuantDuringMemoryRequantTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -private: - void switchToNgraphFriendlyModel(); - std::vector memory_1_init; - std::vector memory_2_init; -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - void SetUp() override; - void Run() override; - void LoadNetwork() override; - void Infer() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/connect_split_concat_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/connect_split_concat_concat.hpp deleted file mode 100644 index 682c401f397f1e..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/connect_split_concat_concat.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map //Configuration -> SplitConcatConcatParams; - -class SplitConcatConcatTest: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_conv_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_conv_concat.hpp deleted file mode 100644 index faf192cb546ab7..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_conv_concat.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input Shapes - std::vector, // Kernel Shape - size_t // Stride -> convParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - convParams, // Convolution Params - size_t, // Input Channels - size_t // Output Channels -> ConstConvConcatParams; - -class ConstConvConcatTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_strided_slice_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_strided_slice_concat.hpp deleted file mode 100644 index f48915e31f21f0..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/const_strided_slice_concat.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - uint32_t, // Input chunk size - uint32_t, // Input chunk number - uint32_t, // Const chunk size - uint32_t // Const chunk number -> ConstStridedSliceConcatParams; - -class ConstStridedSliceConcatTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp deleted file mode 100644 index b9435dbced40c5..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - size_t, // levels - std::vector, // input generator data: low, high, resolution - float // convolution weights' FQ min and max value -> FqSpecificParams; - -typedef std::tuple< - std::vector, // Kernel Shape - std::vector, // Strides - size_t, // Input channels - size_t // Output channels -> ConvParams; - -typedef std::tuple< - FqSpecificParams, - ConvParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice, // Device name - std::map // Additional backend configuration and alis name to it -> ConvFqEltwiseTestParamsSet; - -class ConvFqEltwiseTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - float inputDataMin = 0.0; - float inputDataMax = 10.0; - float inputDataResolution = 1.0; - int32_t seed = 1; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_relu.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_relu.hpp deleted file mode 100644 index 6d87a5c58441e8..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_relu.hpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - size_t, // levels - std::vector, // input generator data: low, high, resolution - float // convolution weights' FQ min and max value -> FqSpecificParams; - -typedef std::tuple< - std::vector, // Kernel Shape - std::vector, // Strides - size_t, // Input channels - size_t // Output channels -> ConvParams; - -typedef std::tuple< - FqSpecificParams, - ConvParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice, // Device name - std::map // Additional backend configuration and alis name to it -> ConvFqReluTestParamsSet; - -class ConvFqReluTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - float inputDataMin = 0.0; - float inputDataMax = 10.0; - float inputDataResolution = 1.0; - int32_t seed = 1; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convolution_relu_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convolution_relu_sequence.hpp deleted file mode 100644 index 3fb665128a7750..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/convolution_relu_sequence.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef struct { - InferenceEngine::SizeVector kernelSize; - InferenceEngine::SizeVector strides; - std::vector padBegin; - std::vector padEnd; - size_t numOutChannels; - InferenceEngine::SizeVector poolingWindow; - InferenceEngine::SizeVector poolingStride; -} convReluSpecificParams; - -typedef struct { - InferenceEngine::SizeVector inputShape; - std::vector sequenceDesc; -} convReluSpecificParamsAll; - -typedef std::tuple< - convReluSpecificParamsAll, // CNN2D sequence desc - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - LayerTestsUtils::TargetDevice, // Device name - std::map // Configuration -> convReluSequenceTestParamsSet; - -class ConvolutionReluSequenceTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/copy_before_squeeze.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/copy_before_squeeze.hpp deleted file mode 100644 index fa84e2a1e05d4d..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/copy_before_squeeze.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, //Network precision - std::string, //Device name - std::vector, //Input shape - std::map //Configuration -> CopyBeforeSqueezeTuple; - -class CopyBeforeSqueezeTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/delayed_copy_layer.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/delayed_copy_layer.hpp deleted file mode 100644 index 927fed5b9502be..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/delayed_copy_layer.hpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map, // Configuration - size_t // Memory layer size -> DelayedCopyTuple; - -class DelayedCopyTestBase - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -private: - void InitMemory(); - virtual void switchToNgraphFriendlyModel() = 0; -protected: - void Run() override; - void LoadNetwork() override; - void Infer() override; - std::vector memory_init; -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -}; - -class DelayedCopyTest : public DelayedCopyTestBase { -private: - void switchToNgraphFriendlyModel() override; -protected: - void SetUp() override; -}; - -class DelayedCopyAfterReshapeWithMultipleConnTest : public DelayedCopyTestBase { -private: - void switchToNgraphFriendlyModel() override; -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_conv_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_conv_eltwise.hpp deleted file mode 100644 index 17e997deb7a4d9..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_conv_eltwise.hpp +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input Shapes - std::vector, // Kernel Shape - size_t // Stride -> convParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - convParams, // Convolution Params - size_t, // Input Channels - size_t // Output Channels -> EltwiseConvEltwiseParams; - -class EltwiseAfterConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -class EltwiseBeforeConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -class EltwiseWithTwoConvsAsInputsTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_reshape_activation.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_reshape_activation.hpp deleted file mode 100644 index 7152b158f1987b..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/eltwise_reshape_activation.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace SubgraphTestsDefinitions { - -using EltwiseReshapeActivationParams = typename std::tuple< - std::vector>, // input shape and shape after reshape - InferenceEngine::Precision, // precision - std::string, // device name - std::map // configuration ->; - -class EltwiseReshapeActivation : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fc_conv_fc.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fc_conv_fc.hpp deleted file mode 100644 index 221d7bf1cd9b2b..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fc_conv_fc.hpp +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input Shapes - std::vector, // Kernel Shape - size_t // Stride -> convParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - convParams, // Convolution Params - size_t, // Input Channels - size_t // Output Channels -> FcConvFcParams; - -class FcAfterConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -class FcBeforeConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -class FcBetweenConvsTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/first_connect_input_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/first_connect_input_concat.hpp deleted file mode 100644 index 3cceae5dce2f0f..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/first_connect_input_concat.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include -#include - - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector>, // Input shapes - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map // Config -> concatFirstInputParams; - -class ConcatFirstInputTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_conv_fq_affine.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_conv_fq_affine.hpp deleted file mode 100644 index fee1b8f20ba5e6..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_conv_fq_affine.hpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // levels - std::vector // input generator data: low, high, resolution -> FqSpecificParams; - -typedef std::tuple< - std::vector, // Kernel Shape - std::vector, // Strides - size_t, // Input channels - size_t // Output channels -> ConvParams; - -typedef std::tuple< - FqSpecificParams, - ConvParams, - bool, // Permute after convolution - InferenceEngine::Precision, // Net precision - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice, // Device name - std::map // Additional backend configuration and alis name to it -> FqConvFqAffineTestParamsSet; - -class FqConvFqAffineTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - float inputDataMin = 0.0; - float inputDataMax = 10.0; - float inputDataResolution = 1.0; - int32_t seed = 1; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_with_mixed_levels.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_with_mixed_levels.hpp deleted file mode 100644 index 51fa5a38a173df..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/fq_with_mixed_levels.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef FQ_WITH_MIXED_LEVELS_HPP -#define FQ_WITH_MIXED_LEVELS_HPP - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map // Configuration -> FqWithMixedLevelsParams; - -class FqWithMixedLevelsTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions - -#endif // FQ_WITH_MIXED_LEVELS_HPP diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/handling_orientation_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/handling_orientation_conv.hpp deleted file mode 100644 index a319d54085cf49..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/handling_orientation_conv.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { -typedef std::tuple< - InferenceEngine::Precision, //Network precision - std::string, //Device name - std::map //Configuration -> HandlingOrientationParams; - -class HandlingOrientationClass : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_conv.hpp deleted file mode 100644 index 6ba2d763a59c83..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_conv.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input Shapes - std::vector, // Kernel Shape - size_t // Stride -> convParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - convParams, // Convolution Params - size_t, // Output Channels - bool // If Add Reshape at the end of the model to reshape to 2D -> inputConvParams; - -class InputConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_split_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_split_concat.hpp deleted file mode 100644 index 22815ed51621cb..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/input_split_concat.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - std::vector // Input Shapes -> InputSplitConcatParams; - -class InputSplitConcatTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_act_add.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_act_add.hpp deleted file mode 100644 index 36bf41e439d141..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/matmul_act_add.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::size_t, // Input Size - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map //Configuration -> MatMulActAddParams; - -class MatMulActAddTest: - public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon{ -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_eltwise_reshape_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_eltwise_reshape_concat.hpp deleted file mode 100644 index 1f9e620c5dc724..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_eltwise_reshape_concat.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "common_test_utils/test_common.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include - -namespace SubgraphTestsDefinitions { -typedef std::tuple< - std::string, // Target device name - InferenceEngine::Precision, // Network precision - size_t, // Mutiples of concat size to be used as input size - size_t, // Concat size - std::map // Configuration -> memoryEltwiseReshapeConcatParams; - -class MemoryEltwiseReshapeConcatTest : virtual public LayerTestsUtils::LayerTestsCommon, - public testing::WithParamInterface { -private: - void initTestModel(); - // you have to replace memory layers since ngraph does not support them - void initNgraphFriendlyModel(); - - // since we switching models we need to generate and save these values in SetUp - size_t inputSize; - size_t concatSize; - ngraph::element::Type ngPrc; - std::vector memory_init; - std::vector concat_vals; -protected: - void SetUp() override; - void Run() override; - void LoadNetwork() override; - void Infer() override; -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_fq_concat_prelu.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_fq_concat_prelu.hpp deleted file mode 100644 index 5bfe7222337ad2..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/memory_fq_concat_prelu.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef SHARED_TEST_CLASSES_MEMORY_FQ_CONCAT_PRELU_H -#define SHARED_TEST_CLASSES_MEMORY_FQ_CONCAT_PRELU_H - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector>, //input shapes - InferenceEngine::Precision, //Network precision - std::string, //Device name - std::map, //Configuration - std::tuple< - std::vector, - std::vector, - std::vector, - std::vector, - std::vector>, // StridedSlice - std::tuple< - std::size_t, - std::vector, - std::vector, - std::vector, - std::vector, - std::vector> // FakeQuantize -> MemoryFqConcatPreluTuple; - -class MemoryFqConcatPrelu : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - void Run() override; - -protected: - void SetUp() override; -}; // class MemoryFqConcatPrelu - -} // namespace SubgraphTestsDefinitions - -#endif // SHARED_TEST_CLASSES_MEMORY_FQ_CONCAT_PRELU_H diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_crops_to_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_crops_to_concat.hpp deleted file mode 100644 index c3544435e8b0c6..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_crops_to_concat.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::vector, // Input Shapes - std::vector>, // Offset pairs (begin, end) - std::map // Configuration -> MultiCropsToConcatParams; - - -class MultiCropsToConcatTest : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_input_scale.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_input_scale.hpp deleted file mode 100644 index 4dbe2511e3525a..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multi_input_scale.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "common_test_utils/test_common.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include - -namespace SubgraphTestsDefinitions { -typedef std::tuple< - std::string, // Target device name - InferenceEngine::Precision, // Network precision - size_t, // Input size - std::map // Configuration -> multipleInputScaleParams; - -class MultipleInputScaleTest : public LayerTestsUtils::LayerTestsCommon, - public testing::WithParamInterface { -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - float inputDataMin = -0.2f; - float range = 0.4f; - float inputDataResolution = 0.01f; - int32_t seed = 1; -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -}; -} // namespace SubgraphTestsDefinitions - diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp deleted file mode 100644 index f01bbace577c59..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector>, //input shapes - InferenceEngine::Precision, //Network precision - std::string, //Device name - std::map //Configuration -> MultioutputEltwiseReshapeEltwiseTuple; - -class MultioutputEltwiseReshapeEltwise - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_concat.hpp deleted file mode 100644 index 68fcdd5346020f..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_concat.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "common_test_utils/test_common.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include - -namespace SubgraphTestsDefinitions { -typedef std::tuple< - std::string, // Target device name - InferenceEngine::Precision, // Network precision - size_t, // Input size - size_t, // Const size - std::map // Configuration -> multipleConcatParams; - -class MultipleConcatTest : virtual public LayerTestsUtils::LayerTestsCommon, - public testing::WithParamInterface { -protected: - void SetUp() override; -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_input_fq.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_input_fq.hpp deleted file mode 100644 index 18daa2c6ec3738..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/multiple_input_fq.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef SUBGRAPH_MULTIPLE_INPUT_HPP -#define SUBGRAPH_MULTIPLE_INPUT_HPP - -#include "common_test_utils/test_common.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include - -namespace SubgraphTestsDefinitions { -typedef std::tuple< - std::string, // Target device name - InferenceEngine::Precision, // Network precision - size_t, // Input size - std::map // Configuration -> multipleInputParams; - -class MultipleInputTest : virtual public LayerTestsUtils::LayerTestsCommon, - public testing::WithParamInterface { -protected: - void SetUp() override; -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -}; -} // namespace SubgraphTestsDefinitions - -#endif // SUBGRAPH_MULTIPLE_INPUT_HPP diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/negative_memory_layer_offset.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/negative_memory_layer_offset.hpp deleted file mode 100644 index c39a206ceb5cd4..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/negative_memory_layer_offset.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, //Network precision - std::string, //Device name - size_t, //Input size - size_t, //Hidden size - std::map //Configuration -> NegativeMemoryLayerOffsetTuple; - -class NegativeMemoryOffsetTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -private: - void switchToNgraphFriendlyModel(); - std::vector memory_init; -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - void SetUp() override; - void Run() override; - void LoadNetwork() override; - void Infer() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_reshape_result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_reshape_result.hpp deleted file mode 100644 index d4c310dfd376dc..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/parameter_reshape_result.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input shape - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map // Configuration -> ParamReshapeResultTuple; - -class ParamReshapeResult: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp deleted file mode 100644 index 4bcecd86963e6b..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace SubgraphTestsDefinitions { -typedef std::tuple, // input shapes and permute shapes - InferenceEngine::Precision, // Network precision - std::string // Device name - > - PermuteConcatConcatPermuteTuple; - -class PermuteConcatConcatPermute : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - void Validate() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& inputInfo) const override; - - static std::shared_ptr CreateConst(const std::vector& input_shape, - const ::ngraph::element::Type& precision, - bool use_1_as_first_dimension); - template - static void CompareValues(const T& expectedValue, const T& value, std::size_t index, float threshold); - template - static void CompareBuffers(const T* expexctedData, const T* data, std::size_t size, float threshold); - - int32_t range_{}; - int32_t start_{0}; - int32_t step_{1}; -}; - -template -inline void PermuteConcatConcatPermute::CompareValues(const T& expectedValue, - const T& value, - std::size_t index, - float threshold) { - auto result = std::abs(expectedValue - value); - if (expectedValue == 0.0f && value != 0.0f) { - IE_THROW() << "Relative comparison of values expected exact 0.0f and actual: " << std::to_string(value) - << " at index " << index << " failed"; - } else if (result > threshold) { - IE_THROW() << "Relative comparison of values expected: " << std::to_string(expectedValue) - << " and actual: " << std::to_string(value) << " at index " << index << " with threshold " - << threshold << " failed"; - } -} - -template -inline void PermuteConcatConcatPermute::CompareBuffers(const T* expexctedData, - const T* data, - std::size_t size, - float threshold) { - for (std::size_t i = 0; i < size; ++i) { - CompareValues(expexctedData[i], data[i], i, threshold); - } -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_permute.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_permute.hpp deleted file mode 100644 index 52bcf314830fa9..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_permute.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace SubgraphTestsDefinitions { -typedef std::tuple>, // input shapes and permute shapes - InferenceEngine::Precision, // Network precision - std::string // Device name - > - PermuteConcatPermuteTuple; - -class PermuteConcatPermute : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& inputInfo) const override; - - int32_t range_{}; - int32_t start_{1}; - int32_t step_{1}; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp deleted file mode 100644 index 95c0b2f26104da..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/relu_split_reshape.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input shape - size_t, // Split axis - size_t, // Split number - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map // Configuration -> ReluSplitReshapeTuple; - -class ReluSplitReshape: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp deleted file mode 100644 index 773555036b93c1..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input Shapes - std::vector, // Kernel Shape - size_t // Stride -> convParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - convParams, // Convolution Params - size_t, // Input Channels - size_t // Output Channels -> ScaleShiftConvScaleShiftParams; - -class ScaleShiftAfterConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -class ScaleShiftBeforeConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/softsign.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/softsign.hpp deleted file mode 100644 index 792dd95f5259c8..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/softsign.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - std::vector // Input Shapes -> softsignParams; - -class SoftsignTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - - void Run() override; - -protected: - void SetUp() override; - -private: - std::shared_ptr GenerateNgraphFriendlySoftSign(); -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_multi_inputs.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_multi_inputs.hpp deleted file mode 100644 index 2a2162301b8473..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_concat_multi_inputs.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - std::vector, // Input Shapes - size_t, // Num of Split outputs (concat inputs) - bool // with FC or not -> SplitConcatMultiInputsParams; - - -class SplitConcatMultiInputsTest : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - - float inputDataMin = 0.0; - float inputDataMax = 0.2; - float inputDataResolution = 1; - int32_t seed = 1; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv.hpp deleted file mode 100644 index 2c3fd302a19c6c..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input Shapes - std::vector, // Kernel Shape - size_t // Stride -> convParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - convParams, // Convolution Params - size_t, // Input Channels - size_t // Output Channels -> SplitConvParams; - -class SplitConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_relu.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_relu.hpp deleted file mode 100644 index 9104775214863e..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_relu.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector>, //input shapes - std::vector, //index connected layer - InferenceEngine::Precision, //Network precision - std::string, //Device name - std::map //Configuration -> SplitReluTuple; - - -class SplitRelu: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_trivial_permute_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_trivial_permute_concat.hpp deleted file mode 100644 index 38ee0a40cd5095..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_trivial_permute_concat.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, //Network precision - std::string, //Device name - std::vector, //Input sizes - size_t, //Split axis - size_t, //Concat axis - std::map //Configuration -> SplitTrivialPermuteConcatTuple; - -class SplitTrivialPermuteConcatTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/strided_slice.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/strided_slice.hpp deleted file mode 100644 index 1207cfef9a08ac..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/strided_slice.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace SubgraphTestsDefinitions { - -struct StridedSliceSpecificParams { - InferenceEngine::SizeVector inputShape; - std::vector begin; - std::vector end; - std::vector strides; - std::vector beginMask; - std::vector endMask; - std::vector newAxisMask; - std::vector shrinkAxisMask; - std::vector ellipsisAxisMask; -}; - -using StridedSliceParams = std::tuple< - StridedSliceSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string, // Device name - std::map // Additional network configuration ->; - -class StridedSliceTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_concat.hpp deleted file mode 100644 index ecdf2869bde360..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_concat.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input shape - std::vector, // Begin - std::vector, // End - std::vector, // Strides - std::vector, // Begin mask - std::vector // End mask -> StridedSliceParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - StridedSliceParams // StridedSlice parameters -> SliceConcatParams; - -class SliceConcatTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_conv.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_conv.hpp deleted file mode 100644 index 1d890194221fa6..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stridedslice_conv.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Input Shapes - std::vector, // Kernel Shape - size_t // Stride -> convParams; - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Configuration - convParams, // Convolution Params - size_t // Output Channels -> SliceConvParams; - -class SliceConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp deleted file mode 100644 index bf3a16f3b35fca..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // Kernel Shape - std::vector, // Strides - size_t, // Input channels - size_t // Output channels -> ConvParams; - -typedef std::tuple< - ConvParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice, // Device name - std::map // Additional backend configuration and alis name to it -> TransposeConvTestParams; - -class TransposeConvTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - float inputDataMin = 0.0; - float inputDataMax = 0.2; - float inputDataResolution = 1; - int32_t seed = 1; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/trivial_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/trivial_concat.hpp deleted file mode 100644 index 5a6097cb626147..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/trivial_concat.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { -using trivialConcatParamsTuple = typename std::tuple< - std::vector, // Inputs shape - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map // Configuration ->; - -class TrivialConcatLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp deleted file mode 100644 index 94d70b23103e42..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace SubgraphTestsDefinitions { - -typedef std::tuple< - std::vector, // levels - std::vector>, // const inputs shape - std::vector, // fake quantize inputLow, inputHigh, outputLow, outputHigh or empty for random - std::vector // input generator data: low, high, resolution -> fqSpecificParams; -typedef std::tuple< - fqSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice, // Device name - std::pair>, // Additional backend configuration and alis name to it - bool -> fqSubgraphTestParamsSet; - -class FakeQuantizeSubgraphTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - float inputDataMin = 0.0; - float inputDataMax = 10.0; - float inputDataResolution = 1.0; - int32_t seed = 1; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp deleted file mode 100644 index fe0f99f0b42ee6..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" -#include "shared_test_classes/subgraph/activation_concats_eltwise.hpp" -#include "common_test_utils/node_builders/activation.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" - -namespace SubgraphTestsDefinitions { - -using namespace ov::test::utils; -using namespace InferenceEngine; - -std::string ActivationConcatsEltwise::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - size_t inputSize; - size_t concatSize; - std::string targetDevice; - std::map configuration; - std::tie(inputSize, concatSize, netPrecision, targetDevice, configuration) = obj.param; - - std::ostringstream result; - result << "IS=" << inputSize << "_"; - result << "CS=" << concatSize << "_"; - result << "PRC=" << netPrecision.name() << "_"; - result << "dev=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void ActivationConcatsEltwise::SetUp() { - InferenceEngine::Precision netPrecision; - size_t inputSize; - size_t concatSize; - std::map config; - std::tie(inputSize, concatSize, netPrecision, targetDevice, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - - auto relu = ov::test::utils::make_activation(input[0], ngPrc, ngraph::helpers::ActivationTypes::Relu); - - auto concat_vals_1 = ov::test::utils::generate_float_numbers(concatSize, 14, 14); - auto concat_vals_2 = ov::test::utils::generate_float_numbers(concatSize, 14, 14); - auto concat_const_1 = ov::test::utils::deprecated::make_constant(ngPrc, {1, concatSize}, concat_vals_1); - auto concat_const_2 = ov::test::utils::deprecated::make_constant(ngPrc, {1, concatSize}, concat_vals_2); - - auto concat_1 = std::make_shared(ov::NodeVector{concat_const_1, relu}, 1); - auto concat_2 = std::make_shared(ov::NodeVector{concat_const_2, relu}, 1); - - auto eltw = ov::test::utils::make_eltwise(concat_1, concat_2, ngraph::helpers::EltwiseTypes::ADD); - - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({1, inputSize + concatSize})); - auto final_reshape = std::make_shared(eltw, reshape_pattern, false); - function = std::make_shared(final_reshape, input, "ActivationConcatsEltwise"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp deleted file mode 100644 index 4c22889b864307..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "shared_test_classes/subgraph/activation_fq.hpp" -#include "common_test_utils/node_builders/activation.hpp" -#include "common_test_utils/node_builders/fake_quantize.hpp" - -namespace SubgraphTestsDefinitions { - - std::string ActivationFakeQuantizeSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { - fqSpecificParams fqParams; - ngraph::helpers::ActivationTypes activationType; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::pair> config; - std::tie(fqParams, activationType, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice, config) = obj.param; - std::vector levels; - std::vector> constShape; - std::vector inputParams; - std::tie(levels, constShape, inputParams) = fqParams; - - std::ostringstream result; - result << "InputShape=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "CS=" << ov::test::utils::vec2str(constShape) << "_"; - result << "LEVELS=" << ov::test::utils::vec2str(levels) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - if (!config.first.empty()) { - result << "_targetConfig=" << config.first; - } - if (inputParams.size() == 3) { - result << "_inputArg=" << inputParams[0] << "_" << inputParams[1] << "_" << inputParams[2]; - } - result << "_activation=" << activationNames[activationType]; - return result.str(); - } - - void ActivationFakeQuantizeSubgraphTest::SetUp() { - fqSpecificParams fqParams; - ngraph::helpers::ActivationTypes activationType; - std::vector inputShape; - std::pair> config; - InferenceEngine::Precision netPrecision; - std::tie(fqParams, activationType, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice, config) = this->GetParam(); - configuration.insert(config.second.begin(), config.second.end()); - - std::vector levels; - std::vector> constShape; - std::vector inputArg; - std::tie(levels, constShape, inputArg) = fqParams; - if (inputArg.size() == 3) { - inputDataMin = inputArg[0]; - inputDataMax = inputArg[1]; - inputDataResolution = inputArg[2]; - } - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto act = ov::test::utils::make_activation(params[0], ngPrc, activationType); - - auto FQNode = ov::test::utils::make_fake_quantize(act, ngraph::element::f32, levels[0], constShape[0], - { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - - auto FQ = std::dynamic_pointer_cast(FQNode); - - ngraph::ResultVector results{std::make_shared(FQ)}; - function = std::make_shared(results, params, "ActivationFakeQuantizeSubgraph"); - } - -InferenceEngine::Blob::Ptr ActivationFakeQuantizeSubgraphTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp b/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp deleted file mode 100644 index 7dabfcf9600211..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/broadcast_power.hpp" - -#include "common_test_utils/node_builders/eltwise.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { -std::string BroadcastPowerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::vector> inputs_shapes; - std::tie(inputs_shapes, netPrecision, targetDevice, configuration) = obj.param; - - std::ostringstream result; - result << "inputShape=" << ov::test::utils::vec2str(inputs_shapes) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void BroadcastPowerTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::vector> inputs_shapes; - std::tie(inputs_shapes, netPrecision, targetDevice, configuration) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputs_shapes[0]))}; - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[1].size()}, - inputs_shapes[1]); - auto reshape = std::make_shared(params[0], reshape_pattern, false); - - auto const_mult2 = ov::test::utils::deprecated::make_constant(ngPrc, {}, {-1.0f}); - auto sum = ov::test::utils::make_eltwise(reshape, const_mult2, ngraph::helpers::EltwiseTypes::MULTIPLY); - - auto reshape_pattern_2 = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[0].size()}, - inputs_shapes[0]); - auto reshape_2 = std::make_shared(sum, reshape_pattern_2, false); - function = std::make_shared(reshape_2, params, "BroadcastPowerPass"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp deleted file mode 100644 index 7e18879b9f1183..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/cascade_concat.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string CascadeConcat::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector> input1, input2, input3; - InferenceEngine::Precision netPrecision; - std::string targetName; - bool multioutput; - std::map additional_config; - std::tie(input1, input2, input3, netPrecision, multioutput, targetName, additional_config) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(input1[0]) << "_"; - results << ov::test::utils::vec2str(input2[0]) << "_"; - results << ov::test::utils::vec2str(input3[0]) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "Multioutput=" << multioutput << "_"; - results << "targetDevice=" << targetName << "_"; - return results.str(); -} - -void CascadeConcat::SetUp() { - std::vector> input1, input2, input3; - InferenceEngine::Precision netPrecision; - std::map additional_config; - bool multioutput; - std::tie(input1, input2, input3, netPrecision, multioutput, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape(input1[0])), - std::make_shared(ngPrc, ov::Shape(input2[0])), - std::make_shared(ngPrc, ov::Shape(input3[0]))}; - - auto relu1 = std::make_shared(input[0]); - auto relu2 = std::make_shared(input[1]); - auto relu3 = std::make_shared(input[2]); - auto concat = std::make_shared(ov::OutputVector{relu1->output(0), - relu2->output(0)}, - 1); - - auto reshape_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); - auto reshape = std::make_shared(concat, reshape_constant); - auto reshape2_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); - auto reshape2 = std::make_shared(reshape, reshape2_constant); - - auto concat2 = std::make_shared(ov::OutputVector{reshape2->output(0), - relu3->output(0)}, - 1); - ngraph::ResultVector results; - if (multioutput) { - auto const_mult = ov::test::utils::deprecated::make_constant(ngPrc, ngraph::Shape{1, input1[0][1]+input2[0][1]}, - std::vector{1.01f}); - auto mult = std::make_shared(concat, const_mult); - results = ngraph::ResultVector{std::make_shared(concat2), - std::make_shared(mult)}; - } else { - results = ngraph::ResultVector{std::make_shared(concat2)}; - } - function = std::make_shared(results, input, "concat_reshape_reshape_concat_mul"); -} - -std::string CascadeConcatWithMultiConnReshape::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map additional_config; - std::tie(inputShape, netPrecision, targetName, additional_config) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - for (auto const& configItem : additional_config) { - results << "_configItem=" << configItem.first << "_" << configItem.second; - } - return results.str(); -} - -/** - * Tests a case when 2 concats have Squeeze between them and Concat2 is the second connection of Squeeze output - * Input Const1 - * | | - * Relu | - * | | - * Concat1 - * | - * Squeeze Const2 - * | | | - * Relu1 Concat2 - * | | - * Unsqueeze1 Relu2 - * | - * Unsqueeze2 - */ -void CascadeConcatWithMultiConnReshape::SetUp() { - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputShape, netPrecision, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - auto inputShapeSqueezed = inputShape; - inputShapeSqueezed.insert(std::begin(inputShapeSqueezed), 1); - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputShapeSqueezed))}; - auto relu = std::make_shared(input[0]); - auto const1 = ov::test::utils::deprecated::make_constant(ngPrc, inputShapeSqueezed, std::vector{}, true); - auto concat1 = std::make_shared(ov::NodeVector{relu, const1}, inputShapeSqueezed.size() - 1); - - auto squeeze_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); - auto squeeze = std::make_shared(concat1, squeeze_constant); - - auto relu1 = std::make_shared(squeeze); - - auto unsqueeze1_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); - auto unsqueeze1 = std::make_shared(relu1, unsqueeze1_constant); - - auto const2 = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, std::vector{}, true); - auto concat2 = std::make_shared(ov::NodeVector{squeeze, const2}, 1); - // Change concat name to make it the second connection in the map of squeeze output connections - concat2->set_friendly_name("XConcat"); - - auto relu2 = std::make_shared(concat2); - - auto unsqueeze2_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); - auto unsqueeze2 = std::make_shared(relu2, unsqueeze2_constant); - - ngraph::ResultVector results = {std::make_shared(unsqueeze1), - std::make_shared(unsqueeze2)}; - - function = std::make_shared(results, input, "CascadeConcatWithMultiConnReshapeTest"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp deleted file mode 100644 index ed5fcc3f939575..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "shared_test_classes/subgraph/clamp_fq.hpp" -#include "common_test_utils/node_builders/fake_quantize.hpp" - -namespace SubgraphTestsDefinitions { - - std::string ClampFakeQuantizeSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { - fqSpecificParams fqParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::pair> config; - std::tie(fqParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice, config) = obj.param; - std::vector levels; - std::vector> constShape; - std::vector inputParams; - std::vector clampMinMax; - std::tie(levels, constShape, clampMinMax, inputParams) = fqParams; - - std::ostringstream result; - result << "InputShape=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "CS=" << ov::test::utils::vec2str(constShape) << "_"; - result << "LEVELS=" << ov::test::utils::vec2str(levels) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - if (!config.first.empty()) { - result << "_targetConfig=" << config.first; - } - if (inputParams.size() == 3) { - result << "_inputArg=" << inputParams[0] << "_" << inputParams[1] << "_" << inputParams[2]; - } - if (clampMinMax.size() == 2) { - result << "_clampMaxMin=" << clampMinMax[0] << "_" << clampMinMax[1]; - } - return result.str(); - } - void ClampFakeQuantizeSubgraphTest::SetUp() { - fqSpecificParams fqParams; - std::vector inputShape; - std::pair> config; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(fqParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice, config) = this->GetParam(); - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector levels; - std::vector> constShape; - std::vector clamp_min_max; - std::vector inputArg; - std::tie(levels, constShape, clamp_min_max, inputArg) = fqParams; - if (inputArg.size() == 3) { - inputDataMin = inputArg[0]; - inputDataMax = inputArg[1]; - inputDataResolution = inputArg[2]; - } - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto clamp = std::make_shared(params[0], clamp_min_max[0], clamp_min_max[1]); - - auto FQNode = ov::test::utils::make_fake_quantize(clamp, ngraph::element::f32, levels[0], constShape[0], - { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - - - auto FQ = std::dynamic_pointer_cast(FQNode); - auto sigmoid = std::make_shared(FQ); - - ngraph::ResultVector results{std::make_shared(sigmoid)}; - function = std::make_shared(results, params, "fakeQuantizeSubgraph"); - configuration = config.second; - } - -InferenceEngine::Blob::Ptr ClampFakeQuantizeSubgraphTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp deleted file mode 100644 index d3d2a02905a98b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/concat_conv.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/convolution.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConcatConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr ConcatConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -2.0f, 2.0f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void ConcatConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto relu1 = std::make_shared(params[0]); - - auto const_values = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); - auto constant = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, const_values); - auto concat = std::make_shared(ov::NodeVector{constant, relu1}, 1); - - std::vector convInputShape = {1, inputChannels, 1, 2 * inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(concat, reshapePattern1, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto conv = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - - function = std::make_shared(reshape2, params, "ConcatConvTest"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp deleted file mode 100644 index 985583db1607aa..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/concat_multi_input.hpp" -#include "common_test_utils/node_builders/activation.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConcatMultiInput::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map additional_config; - std::tie(inputShapes, netPrecision, targetDevice, additional_config) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : additional_config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - - return result.str(); -} - -void ConcatMultiInput::SetUp() { - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputShapes, netPrecision, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - - ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - paramSize = { 1, 0 }; - for (const auto& val : inputShapes) { - paramSize[1] += val[1]; - } -} - -void ConcatMultiInput::GenerateStridedSliceModel() { - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(paramSize))}; - auto stride = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, std::vector{ 1, 1 }); - - std::vector newAxis = { 0, 0 }; - std::vector begin_mask = { 0, 0 }; - std::vector end_mask = { 0, 0 }; - std::vector> ssArray; - ngraph::OutputVector concatInput; - - auto relu = std::make_shared(params[0]); - std::vector startOffset = { 0, 0 }; - for (size_t i = 0; i < inputShapes.size(); ++i) { - std::vector shape = { static_cast(inputShapes[i][0]), - static_cast(inputShapes[i][1]) }; - std::vector endoffset = { static_cast(inputShapes[i][0]) + startOffset[0], - static_cast(inputShapes[i][1]) + startOffset[1]}; - auto begin = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, startOffset); - auto end = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, endoffset); - auto ss = std::make_shared(relu, begin, end, stride, begin_mask, end_mask, newAxis); - ssArray.push_back(ss); - concatInput.push_back(ssArray[i]); - - startOffset[1] += shape[1]; - } - - auto concat = std::make_shared(concatInput, 1); - - ngraph::ResultVector results{ std::make_shared(concat) }; - function = std::make_shared(results, params, "ConcatMultiInput"); -} - -void ConcatMultiInput::GenerateConstOnlyModel() { - ngraph::OutputVector concatInputs; - - const int seed = 0; - std::mt19937 gen(seed); - - auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable { - std::vector res; - - std::uniform_real_distribution dist(min, max); - for (std::size_t i = 0; i < vec_len; i++) - res.emplace_back(static_cast(dist(gen))); - - return res; - }; - ov::ParameterVector input_vector; - for (size_t i = 0; i < inputShapes.size(); ++i) { - size_t total_size = 1; - for (auto dim : inputShapes[i]) { - total_size *= dim; - } - if (i == 0) { - input_vector = ov::ParameterVector{std::make_shared(ngPrc, ov::Shape{1, total_size})}; - auto relu = ov::test::utils::make_activation(input_vector[0], ngPrc, ngraph::helpers::ActivationTypes::Relu); - concatInputs.push_back(relu); - } else { - auto min_max = (i % 2 == 0) ? 2 : 30; - auto const_values = generateFloatNumbers(total_size, -min_max, min_max); - auto const_node = ov::test::utils::deprecated::make_constant(ngPrc, {1, total_size}, const_values); - concatInputs.push_back(const_node); - } - } - - auto concat = std::make_shared(concatInputs, 1); - - ngraph::ResultVector results{ std::make_shared(concat) }; - function = std::make_shared(results, input_vector, "ConcatConstOnly"); -} - -void ConcatMultiInput::GenerateMemoryModel() { - int axis = 1; - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape(inputShapes[0]))}; - - auto variable = std::make_shared(ngraph::VariableInfo{ov::Shape(inputShapes[0]), - ngraph::element::dynamic, "concat_input_memory"}); - auto mem_i = std::make_shared(ngPrc, inputShapes[0]); - auto mem_r = std::make_shared(mem_i, variable); - - ngraph::OutputVector concat_input; - concat_input.push_back(mem_r); - concat_input.push_back(input.at(0)); - auto concat = std::make_shared(concat_input, axis); - - auto mem_w = std::make_shared(input.at(0), variable); - - auto res = std::make_shared(concat); - function = std::make_shared(ngraph::ResultVector{res}, ngraph::SinkVector{mem_w}, input, "ConcatMemory"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp deleted file mode 100644 index a462cb5871b5d2..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/concat_quantization_during_memory_requantization.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" - -namespace SubgraphTestsDefinitions { - std::string ConcatQuantDuringMemoryRequantTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetName; - size_t inputSize; - size_t hiddenSize; - std::map config; - std::tie(netPrecision, targetName, inputSize, hiddenSize, config) = obj.param; - std::ostringstream results; - - results << "netPRC=" << netPrecision.name() << "_"; - results << "IS=" << inputSize << "_"; - results << "HS=" << hiddenSize << "_"; - results << "targetDevice=" << targetName; - for (auto const& configItem : config) { - results << "_configItem=" << configItem.second; - } - return results.str(); - } - - void ConcatQuantDuringMemoryRequantTest::SetUp() { - InferenceEngine::Precision netPrecision; - size_t inputSize; - size_t hiddenSize; - std::map config; - std::tie(netPrecision, targetDevice, inputSize, hiddenSize, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - memory_1_init = ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f); - memory_2_init = ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f); - - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - - auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); - auto mem_1_read = std::make_shared(mem_1_const, "memory_1"); - - auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_read, input[0] }, 1); - // Revert concat names to set the needed order of scale factors calculation - concat_1->set_friendly_name("concat2"); - auto split_axis_op_1 = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto num_split_1 = std::make_shared(ov::element::u64, ov::Shape{2}, std::vector{inputSize, hiddenSize}); - auto split_1 = std::make_shared(concat_1, split_axis_op_1, num_split_1); - - auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, - ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f)); - auto mul = ov::test::utils::make_eltwise(split_1->output(1), mul_const, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto mem_1_write = std::make_shared(mul, "memory_1"); - - auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); - auto mem_2_read = std::make_shared(mem_2_const, "memory_2"); - - auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_read, mul }, 1); - // Revert concat names to set the needed order of scale factors calculation - concat_2->set_friendly_name("concat1"); - auto split_axis_op_2 = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto split_2 = std::make_shared(concat_2, split_axis_op_2, 2); - - auto mem_2_write = std::make_shared(split_2->output(0), "memory_2"); - auto sigm = std::make_shared(split_2->output(1)); - - mem_1_write->add_control_dependency(mem_1_read); - sigm->add_control_dependency(mem_1_write); - mem_2_write->add_control_dependency(mem_2_read); - sigm->add_control_dependency(mem_2_write); - - function = std::make_shared(sigm, input, "concat_quant_during_memory_requant_memory"); - } - - void ConcatQuantDuringMemoryRequantTest::switchToNgraphFriendlyModel() { - InferenceEngine::Precision netPrecision; - size_t inputSize; - size_t hiddenSize; - std::map config; - std::tie(netPrecision, targetDevice, inputSize, hiddenSize, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - memory_1_init = ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f); - memory_2_init = ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f); - - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - - auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); - auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_const, input[0] }, 1); - - auto split_axis_op_1 = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto num_split_1 = std::make_shared(ov::element::u64, ov::Shape{2}, std::vector{inputSize, hiddenSize}); - auto split_1 = std::make_shared(concat_1, split_axis_op_1, num_split_1); - - auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, - ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f)); - auto mul = ov::test::utils::make_eltwise(split_1->output(1), mul_const, ngraph::helpers::EltwiseTypes::MULTIPLY); - - auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); - auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_const, mul }, 1); - auto split_axis_op_2 = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split_2 = std::make_shared(concat_2, split_axis_op_2, 2); - - auto sigm = std::make_shared(split_2->output(1)); - - function = std::make_shared(sigm, input, "concat_quant_during_memory_requant_nomemory"); - } - - void ConcatQuantDuringMemoryRequantTest::LoadNetwork() { - LayerTestsUtils::LayerTestsCommon::LoadNetwork(); - inferRequest = executableNetwork.CreateInferRequest(); - } - - void ConcatQuantDuringMemoryRequantTest::Infer() { - ConfigureInferRequest(); - inferRequest.Infer(); - } - - void ConcatQuantDuringMemoryRequantTest::Run() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - LoadNetwork(); - - auto states = inferRequest.QueryState(); - for (auto& state : states) { - auto name = state.GetName(); - if (name == "memory_1") { - auto blob = FuncTestUtils::createAndFillBlobWithFloatArray(state.GetState()->getTensorDesc(), - memory_1_init.data(), memory_1_init.size()); - state.SetState(blob); - } else if (name == "memory_2") { - auto blob = FuncTestUtils::createAndFillBlobWithFloatArray(state.GetState()->getTensorDesc(), - memory_2_init.data(), memory_2_init.size()); - state.SetState(blob); - } else { - GTEST_FAIL() << "unknown memory state"; - } - } - GenerateInputs(); - Infer(); - switchToNgraphFriendlyModel(); - Validate(); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp deleted file mode 100644 index bbc06133f44bc9..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/concat_quantization.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConcatQuantization::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes, newInputShapes; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - - return result.str(); -} - -void ConcatQuantization::SetUp() { - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, 160})}; - - std::vector outFormShapes1 = { 1, 5, 32 }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); - - auto tanh = std::make_shared(reshape1); - - std::vector outFormShapes2 = { 1, 160 }; - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); - auto reshape2 = std::make_shared(tanh, pattern2, false); - auto scale = ov::test::utils::deprecated::make_constant(ngPrc, outFormShapes2, {}, true); - //For ov::op::v0::ScaleShift: Cannot cast ngraph node ScaleShift to CNNLayer! - auto scale_shift = std::make_shared(reshape2, scale); - - std::vector outFormShapes3 = { 5, 32 }; - auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto reshape3 = std::make_shared(scale_shift, pattern3, false); - - auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto reshape4 = std::make_shared(tanh, pattern4, false); - - auto concat = std::make_shared(ngraph::OutputVector{ reshape3, reshape4 }, 0); - concat->set_friendly_name("concat"); - - ngraph::ResultVector results{std::make_shared(concat)}; - function = std::make_shared(results, params, "ConcatQuantization"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp deleted file mode 100644 index 3906d8572652eb..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/connect_split_concat_concat.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { -std::string SplitConcatConcatTest::getTestCaseName(const testing::TestParamInfo &obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - for (auto const &configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void SplitConcatConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, 256})}; - auto relu_start = std::make_shared(params[0]); - auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(relu_start, split_axis_op, 2); - - auto const_concat = ov::test::utils::deprecated::make_constant(ngPrc, {1, 96}, std::vector{0}); - auto const_concat_2 = ov::test::utils::deprecated::make_constant(ngPrc, {1, 96}, std::vector{0}); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), const_concat}, 1); - auto concat_2 = std::make_shared(ngraph::OutputVector{concat, const_concat_2}, - 1); - auto relu = std::make_shared(concat_2); - ngraph::ResultVector resultVector{ - std::make_shared(relu) - }; - function = std::make_shared(resultVector, params, "Multiple_connection_split_concat"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp deleted file mode 100644 index c7c3c02ada2705..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/const_conv_concat.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/convolution.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConstConvConcatTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr ConstConvConcatTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -0.2f, 0.2f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void ConstConvConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::vector convInputShape = {inputShape[0], inputChannels, 1, inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - 0.0f, 0.1f); - auto conv = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - - auto const_values = ov::test::utils::generate_float_numbers(outputChannels * widthAfterConv, -0.2f, 0.2f); - auto constant = ov::test::utils::deprecated::make_constant(ngPrc, {1, outputChannels, 1, widthAfterConv}, const_values); - auto concat = std::make_shared(ov::NodeVector{constant, conv}, 3); - - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, - std::vector{1, 2 * outputChannels * widthAfterConv }); - auto reshape2 = std::make_shared(concat, reshapePattern2, false); - - function = std::make_shared(reshape2, params, "ConstConvConcatTest"); - functionRefs = ngraph::clone_function(*function); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/const_strided_slice_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/const_strided_slice_concat.cpp deleted file mode 100644 index 97601230dd5af3..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/const_strided_slice_concat.cpp +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/const_strided_slice_concat.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConstStridedSliceConcatTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - uint32_t inputSliceSize; - uint32_t constSliceSize; - uint32_t inputSlices; - uint32_t constSlices; - std::tie(netPrecision, targetDevice, configuration, inputSliceSize, inputSlices, constSliceSize, constSlices) = obj.param; - - std::ostringstream result; - result << "ISS=" << inputSliceSize << "_"; - result << "ISN=" << inputSlices << "_"; - result << "CSS=" << constSliceSize << "_"; - result << "CSN=" << constSlices << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr ConstStridedSliceConcatTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -0.5f, 0.5f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -namespace { -template -void appendSlices(A&& destVector, B&& src, const int64_t chunkSize, const int64_t totalSize, C precission) { - for (int64_t start = 0; start < totalSize; start += chunkSize) { - ov::Shape constShape = {2}; - auto beginNode = std::make_shared(ov::element::i64, constShape, std::vector{ 0, start }); - auto endNode = std::make_shared(ov::element::i64, constShape, std::vector{ 0, start + chunkSize }); - auto strideNode = std::make_shared(ov::element::i64, constShape, std::vector{ 1, 1 }); - auto ssNode = std::make_shared(src, - beginNode, - endNode, - strideNode, - std::vector{ 1, 0 }, - std::vector{ 1, 0 }, - std::vector{}, - std::vector{}, - std::vector{}); - destVector.push_back(ssNode); - } -} -} // namespace - -// Topology: -// -// Constant Parameter -// | | | | -// +---+ +---+ +---+ +---+ -// | | | | -// SS_1c ... SS_Nc SS_1p ... SS_Np -// | | | | -// | +----+ +----+ | -// | | | | -// +-------------+ | | +-------------+ -// \ | | / -// Concat -// -// Legend: -// SS == Strided Slice -void ConstStridedSliceConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - uint32_t inputSliceSize; - uint32_t constSliceSize; - uint32_t inputSlices; - uint32_t constSlices; - std::tie(netPrecision, targetDevice, tempConfig, inputSliceSize, inputSlices, constSliceSize, constSlices) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - const size_t totalInputSize = static_cast(inputSlices) * inputSliceSize; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, totalInputSize})}; - - const auto totalConstantSize = constSlices * constSliceSize; - auto constantValues = ov::test::utils::generate_float_numbers(totalConstantSize, -0.2f, 0.2f); - auto constant = ov::test::utils::deprecated::make_constant(ngPrc, { 1, totalConstantSize }, constantValues); - - std::vector> allToConcat; - appendSlices(allToConcat, params[0], inputSliceSize, totalInputSize, ngPrc); - appendSlices(allToConcat, constant, constSliceSize, totalConstantSize, ngPrc); - auto concat = std::make_shared(allToConcat, 1); - - function = std::make_shared(concat, params, "ConstStridedSliceConcatTest"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp deleted file mode 100644 index a8c3b4b085fca8..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/conv_fq_eltwise.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConvFqEltwiseTest::getTestCaseName(const testing::TestParamInfo& obj) { - FqSpecificParams fqParams; - ConvParams convParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::map config; - std::tie(fqParams, convParams, netPrecision, inputShapes, targetDevice, config) = obj.param; - - size_t levels; - std::vector inputArg; - float convFQValue; - std::tie(levels, inputArg, convFQValue) = fqParams; - - std::vector kernelShape; - std::vector strides; - size_t inputChannels; - size_t outputChannels; - std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "LEVELS=" << levels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice; - for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - if (inputArg.size() == 3) { - result << "_inputArg=" << inputArg[0] << "_" << inputArg[1] << "_" << inputArg[2]; - } - result << "_convFQ=" << convFQValue; - result << "_KERNEL=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "STRIDES=" << ov::test::utils::vec2str(strides) << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels; - return result.str(); -} - -void ConvFqEltwiseTest::SetUp() { - FqSpecificParams fqParams; - ConvParams convParams; - std::vector inputShape; - std::map config; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(fqParams, convParams, netPrecision, inputShape, targetDevice, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - - size_t levels; - std::vector inputArg; - float convFQValue; - std::tie(levels, inputArg, convFQValue) = fqParams; - if (inputArg.size() == 3) { - inputDataMin = inputArg[0]; - inputDataMax = inputArg[1]; - inputDataResolution = inputArg[2]; - } - - std::vector kernelShape; - std::vector strides; - size_t inputChannels; - size_t outputChannels; - std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - const int seed = 0; - std::mt19937 gen(seed); - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - float weightVal = 0.2; - auto filterWeightsNode = ov::test::utils::deprecated::make_constant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, - { weightVal }); - auto convLowNode = - ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{1}, std::vector{-convFQValue}); - auto convHighNode = - ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{1}, std::vector{convFQValue}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, - convLowNode, convHighNode, convLowNode, convHighNode, levels); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, - std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ov::op::PadType::VALID); - auto biasesWeightsNode = ov::test::utils::deprecated::make_constant(ngPrc, {}, std::vector{ 0.0f }); - auto add_1 = std::make_shared(conv, biasesWeightsNode); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; - auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv * heightAfterConv }; - - auto lowNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{ 1 }, - std::vector{inputDataMin * weightVal * kernelShape[1] * 1.5f}); - auto highNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{ 1 }, - std::vector{inputDataMax * weightVal * kernelShape[1] * 1.5f}); - auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); - - auto constNode = ov::test::utils::deprecated::make_constant(ngPrc, {}, std::vector{ 0.5f }); - auto add_2 = std::make_shared(fq, constNode); - - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(add_2, reshapePattern2, false); - - function = std::make_shared(reshape2, params, "convFqEltwise"); -} - -InferenceEngine::Blob::Ptr ConvFqEltwiseTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp deleted file mode 100644 index 0fa966d7f633ba..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/conv_fq_relu.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConvFqReluTest::getTestCaseName(const testing::TestParamInfo& obj) { - FqSpecificParams fqParams; - ConvParams convParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::map config; - std::tie(fqParams, convParams, netPrecision, inputShapes, targetDevice, config) = obj.param; - - size_t levels; - std::vector inputArg; - float convFQValue; - std::tie(levels, inputArg, convFQValue) = fqParams; - - std::vector kernelShape; - std::vector strides; - size_t inputChannels; - size_t outputChannels; - std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "LEVELS=" << levels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice; - for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - if (inputArg.size() == 3) { - result << "_inputArg=" << inputArg[0] << "_" << inputArg[1] << "_" << inputArg[2]; - } - result << "_convFQ=" << convFQValue; - result << "_KERNEL=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "STRIDES=" << ov::test::utils::vec2str(strides) << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels; - return result.str(); -} - -void ConvFqReluTest::SetUp() { - FqSpecificParams fqParams; - ConvParams convParams; - std::vector inputShape; - std::map config; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(fqParams, convParams, netPrecision, inputShape, targetDevice, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - - size_t levels; - std::vector inputArg; - float convFQValue; - std::tie(levels, inputArg, convFQValue) = fqParams; - if (inputArg.size() == 3) { - inputDataMin = inputArg[0]; - inputDataMax = inputArg[1]; - inputDataResolution = inputArg[2]; - } - - std::vector kernelShape; - std::vector strides; - size_t inputChannels; - size_t outputChannels; - std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - const int seed = 0; - std::mt19937 gen(seed); - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - float weightVal = 0.2; - auto filterWeightsNode = ov::test::utils::deprecated::make_constant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, - { weightVal }); - auto convLowNode = - ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{1}, std::vector{-convFQValue}); - auto convHighNode = - ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{1}, std::vector{convFQValue}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, - convLowNode, convHighNode, convLowNode, convHighNode, levels); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, - std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ov::op::PadType::VALID); - auto biasesWeightsNode = ov::test::utils::deprecated::make_constant(ngPrc, {}, std::vector{ 0.0f }); - auto add_1 = std::make_shared(conv, biasesWeightsNode); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; - auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv * heightAfterConv }; - - auto lowNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{ 1 }, - std::vector{inputDataMin * weightVal * kernelShape[1] * 1.5f}); - auto highNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{ 1 }, - std::vector{inputDataMax * weightVal * kernelShape[1] * 1.5f}); - auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); - - auto relu = std::make_shared(fq); - - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(relu, reshapePattern2, false); - - function = std::make_shared(reshape2, params, "convFqEltwise"); -} - -InferenceEngine::Blob::Ptr ConvFqReluTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp b/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp deleted file mode 100644 index 1ae92846d7618f..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/convolution_relu_sequence.hpp" -#include "common_test_utils/node_builders/convolution.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConvolutionReluSequenceTest::getTestCaseName(const testing::TestParamInfo& obj) { - convReluSpecificParamsAll convParamsAll; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - std::string targetDevice; - std::map config; - std::tie(convParamsAll, netPrecision, inPrc, outPrc, targetDevice, config) = - obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(convParamsAll.inputShape) << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice << "_"; - - for (auto&& single : convParamsAll.sequenceDesc) { - result << "K" << ov::test::utils::vec2str(single.kernelSize) << "_"; - result << "S" << ov::test::utils::vec2str(single.strides) << "_"; - result << "PB" << ov::test::utils::vec2str(single.padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(single.padEnd) << "_"; - result << "O=" << single.numOutChannels << "_"; - result << "PW" << ov::test::utils::vec2str(single.poolingWindow) << "_"; - result << "PS" << ov::test::utils::vec2str(single.poolingStride) << "_"; - } - - for (auto&& single : config) { - result << single.first << "=" << single.second; - } - return result.str(); -} - -void ConvolutionReluSequenceTest::SetUp() { - threshold = 0.0031; - const InferenceEngine::SizeVector dilation = { 1, 1 }; - convReluSpecificParamsAll convParamsAll; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::map config; - std::tie(convParamsAll, netPrecision, inPrc, outPrc, targetDevice, config) = - this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(convParamsAll.inputShape))}; - std::shared_ptr lastOutputs = params.front(); - auto inputChannels = convParamsAll.inputShape[1]; - - for (auto&& single : convParamsAll.sequenceDesc) { - const auto addBiases = true; - const auto filtersRange = 0.1f; - const auto biasesRange = 0.05f; - std::vector filter_weights; - std::vector biases; - - std::shared_ptr conv = - std::dynamic_pointer_cast( - ov::test::utils::make_convolution( - lastOutputs, - ngPrc, single.kernelSize, single.strides, single.padBegin, single.padEnd, - dilation, ov::op::PadType::EXPLICIT, single.numOutChannels, addBiases, filter_weights, biases)); - lastOutputs = std::make_shared(conv); - if (single.poolingWindow.size() == 2 && - (single.poolingWindow[0] != 1 || - single.poolingWindow[1] != 1)) { - lastOutputs = std::make_shared(lastOutputs, single.poolingStride, - ngraph::Shape{ 0, 0 }, - ngraph::Shape{ 0, 0 }, - single.poolingWindow); - } - inputChannels = single.numOutChannels; - } - - ngraph::ResultVector results{std::make_shared(lastOutputs)}; - function = std::make_shared(results, params, "convolution_relu_sequence"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp b/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp deleted file mode 100644 index 1362295cd1dbd2..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/copy_before_squeeze.hpp" - -namespace SubgraphTestsDefinitions { - std::string CopyBeforeSqueezeTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetName; - std::vector inputShape; - std::tie(netPrecision, targetName, inputShape, std::ignore) = obj.param; - std::ostringstream results; - - results << "netPRC=" << netPrecision.name() << "_"; - results << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - results << "targetDevice=" << targetName; - return results.str(); - } - - void CopyBeforeSqueezeTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::vector inputShape; - std::map config; - std::tie(netPrecision, targetDevice, inputShape, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto reshape_0_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{3}, - std::vector{1, inputShape[1] / 64, 64}); - auto reshape_0 = std::make_shared(input[0], reshape_0_pattern, false); - auto relu = std::make_shared(reshape_0); - - auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto reshape_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{2}, - std::vector{1, inputShape[1]}); - auto squeeze_1 = std::make_shared(relu, constant_squeeze); - auto reshape_1 = std::make_shared(squeeze_1, reshape_pattern, false); - auto squeeze_2 = std::make_shared(relu, constant_squeeze); - auto reshape_2 = std::make_shared(squeeze_2, reshape_pattern, false); - - auto concat = std::make_shared(ngraph::OutputVector{reshape_1, reshape_2}, 1); - function = std::make_shared(concat, input, "copy_before_squeeze"); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp b/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp deleted file mode 100644 index 9ee7a222f6760b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/delayed_copy_layer.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - void DelayedCopyTestBase::InitMemory() { - auto states = inferRequest.QueryState(); - for (auto& state : states) { - auto name = state.GetName(); - if (name.find("id") != std::string::npos) { - auto blob = FuncTestUtils::createAndFillBlobWithFloatArray(state.GetState()->getTensorDesc(), - memory_init.data(), memory_init.size()); - state.SetState(blob); - } else { - GTEST_FAIL() << "unknown memory state"; - } - } - } - - void DelayedCopyTestBase::LoadNetwork() { - LayerTestsUtils::LayerTestsCommon::LoadNetwork(); - inferRequest = executableNetwork.CreateInferRequest(); - } - - void DelayedCopyTestBase::Infer() { - ConfigureInferRequest(); - inferRequest.Infer(); - } - - - void DelayedCopyTestBase::Run() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - LoadNetwork(); - InitMemory(); - GenerateInputs(); - Infer(); - switchToNgraphFriendlyModel(); - Validate(); - } - - std::string DelayedCopyTestBase::getTestCaseName(const testing::TestParamInfo &obj) { - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map additional_config; - size_t memory_size; - std::tie(netPrecision, targetName, additional_config, memory_size) = obj.param; - std::ostringstream results; - - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - results << "memorySize=" << memory_size; - for (auto const& configItem : additional_config) { - results << "_configItem=" << configItem.first << "_" << configItem.second; - } - return results.str(); - } - - void DelayedCopyTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map additional_config; - size_t memory_size; - std::tie(netPrecision, targetDevice, additional_config, memory_size) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - - ASSERT_EQ(memory_size % 2, 0); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape{1, 3 * memory_size})}; - - memory_init = ov::test::utils::generate_float_numbers(memory_size, -0.2f, 0.2f); - - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); - - auto mem_r = std::make_shared(mem_c, "id"); - - auto concat = std::make_shared(ngraph::OutputVector{mem_r, input[0]}, 1); - - auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto num_split = std::make_shared(ov::element::u64, ov::Shape{2}, std::vector{3 * memory_size, memory_size}); - auto split = std::make_shared(concat, split_axis_op, num_split); - - auto mem_w = std::make_shared(split->output(1), "id"); - - auto split_axis_op_variadic = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto num_split_variadic = std::make_shared(ov::element::u64, ov::Shape{2}, - std::vector{memory_size / 2, 3 * memory_size + memory_size / 2}); - auto VariadicSplit = std::make_shared(concat, split_axis_op_variadic, num_split_variadic); - - auto relu2 = std::make_shared(VariadicSplit->output(1)); - - mem_w->add_control_dependency(mem_r); - relu2->add_control_dependency(mem_w); - - function = std::make_shared(relu2, input, "delayed_copy_layer_memory"); - } - - void DelayedCopyTest::switchToNgraphFriendlyModel() { - InferenceEngine::Precision netPrecision; - std::map config; - std::map additional_config; - size_t memory_size; - std::tie(netPrecision, targetDevice, additional_config, memory_size) = this->GetParam(); - - ASSERT_EQ(memory_size % 2, 0); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape{1, 3 * memory_size})}; - - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); - auto concat = std::make_shared(ngraph::OutputVector{mem_c, input[0]}, 1); - - auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto num_split = std::make_shared(ov::element::u64, ov::Shape{2}, std::vector{3 * memory_size, memory_size}); - auto split = std::make_shared(concat, split_axis_op, num_split); - - auto split_axis_op_variadic = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto num_split_variadic = std::make_shared(ov::element::u64, ov::Shape{2}, - std::vector{memory_size / 2, 3 * memory_size + memory_size / 2}); - auto VariadicSplit = std::make_shared(concat, split_axis_op_variadic, num_split_variadic); - - auto relu2 = std::make_shared(VariadicSplit->output(1)); - - function = std::make_shared(relu2, input, "delayed_copy_layer_nonmemory"); - } - - void DelayedCopyAfterReshapeWithMultipleConnTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map additional_config; - size_t memory_size; - std::tie(netPrecision, targetDevice, additional_config, memory_size) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - - ASSERT_EQ(memory_size % 8, 0); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape{1, memory_size / 2})}; - - memory_init = ov::test::utils::generate_float_numbers(memory_size, -0.2f, 0.2f); - - auto mem_c = ov::test::utils::deprecated::make_constant(ngPrc, ngraph::Shape{8, memory_size / 8}, memory_init); - auto mem_r = std::make_shared(mem_c, "id"); - auto reshape_pattern1 = ov::test::utils::deprecated::make_constant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape1 = std::make_shared(mem_r, reshape_pattern1, false); - auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(reshape1, split_axis_op, 2); - - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); - auto reshape_pattern2 = ov::test::utils::deprecated::make_constant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{8, memory_size / 8}); - auto reshape2 = std::make_shared(concat, reshape_pattern2, false); - - auto mem_w = std::make_shared(reshape2, "id"); - - auto relu = std::make_shared(reshape2); - auto reshape_pattern3 = ov::test::utils::deprecated::make_constant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape3 = std::make_shared(relu, reshape_pattern3, false); - - mem_w->add_control_dependency(mem_r); - reshape3->add_control_dependency(mem_w); - - function = std::make_shared(reshape3, input, "delayed_copy_layer_reshape_memory"); - } - - void DelayedCopyAfterReshapeWithMultipleConnTest::switchToNgraphFriendlyModel() { - InferenceEngine::Precision netPrecision; - std::map config; - std::map additional_config; - size_t memory_size; - std::tie(netPrecision, targetDevice, additional_config, memory_size) = this->GetParam(); - - ASSERT_EQ(memory_size % 8, 0); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape{1, memory_size / 2})}; - - auto mem_c = ov::test::utils::deprecated::make_constant(ngPrc, ngraph::Shape{1, memory_size}, memory_init); - auto reshape_pattern1 = ov::test::utils::deprecated::make_constant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape1 = std::make_shared(mem_c, reshape_pattern1, false); - auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(reshape1, split_axis_op, 2); - - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); - auto reshape_pattern2 = ov::test::utils::deprecated::make_constant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{8, memory_size / 8}); - auto reshape2 = std::make_shared(concat, reshape_pattern2, false); - - auto relu = std::make_shared(reshape2); - auto reshape_pattern3 = ov::test::utils::deprecated::make_constant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape3 = std::make_shared(relu, reshape_pattern3, false); - - function = std::make_shared(reshape3, input, "delayed_copy_layer_reshape_nonmemory"); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp deleted file mode 100644 index 7705ff6f497836..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/eltwise_conv_eltwise.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/convolution.hpp" - -namespace SubgraphTestsDefinitions { - -std::string EltwiseAfterConvTest::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr EltwiseAfterConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -2.0f, 2.0f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void EltwiseAfterConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto conv = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - - auto scale = ov::test::utils::generate_float_numbers(outFormShapes[1], -2.0f, 2.0f); - auto shift = ov::test::utils::generate_float_numbers(outFormShapes[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); - auto mul = std::make_shared(reshape2, mul_const); - auto add_const = std::make_shared(ngPrc, outFormShapes, shift); - auto add = std::make_shared(mul, add_const); - - function = std::make_shared(mul, params, "EltwiseAfterConvTest"); -} - -std::string EltwiseBeforeConvTest::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr EltwiseBeforeConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -2.0f, 2.0f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void EltwiseBeforeConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto scale = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); - auto shift = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, inputShape, scale); - auto mul = std::make_shared(params[0], mul_const); - auto add_const = std::make_shared(ngPrc, inputShape, shift); - auto add = std::make_shared(mul, add_const); - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(mul, reshapePattern1, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto conv = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - - function = std::make_shared(reshape2, params, "EltwiseBeforeConvTest"); -} - -std::string EltwiseWithTwoConvsAsInputsTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr EltwiseWithTwoConvsAsInputsTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -2.0f, 2.0f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void EltwiseWithTwoConvsAsInputsTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape)), - std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - auto filterWeights1 = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto stride_h = kernelShape[0] > 1 ? stride : 1; - auto conv1 = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {stride_h, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights1); - - auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv1, reshapePattern2, false); - - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape3 = std::make_shared(params[1], reshapePattern3, false); - - auto filterWeights2 = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto conv2 = ov::test::utils::make_convolution(reshape3, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {stride_h, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights2); - - auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape4 = std::make_shared(conv2, reshapePattern4, false); - - auto add = std::make_shared(reshape2, reshape4); - function = std::make_shared(add, params, "EltwiseWithTwoConvsAsInputsTest"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp deleted file mode 100644 index b450b7db18baf0..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#include "ov_models/builders.hpp" -#include "shared_test_classes/subgraph/eltwise_reshape_activation.hpp" -#include "common_test_utils/node_builders/activation.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" - -namespace SubgraphTestsDefinitions { - -using namespace ov::test::utils; -using namespace InferenceEngine; - -std::string EltwiseReshapeActivation::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::vector> shapes; - std::string targetDevice; - std::map configuration; - std::tie(shapes, netPrecision, targetDevice, configuration) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(shapes[0]) << "_"; - result << "AS=" << ov::test::utils::vec2str(shapes[1]) << "_"; - result << "PRC=" << netPrecision.name() << "_"; - result << "dev=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void EltwiseReshapeActivation::SetUp() { - InferenceEngine::Precision netPrecision; - std::vector> shapes; - std::map config; - std::tie(shapes, netPrecision, targetDevice, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape(shapes[0])), - std::make_shared(ngPrc, ov::Shape(shapes[0]))}; - auto eltw = ov::test::utils::make_eltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); - - auto reshape_pattern1 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[1].size()}, shapes[1]); - auto reshape1 = std::make_shared(eltw, reshape_pattern1, false); - - auto relu = ov::test::utils::make_activation(reshape1, ngPrc, ngraph::helpers::ActivationTypes::Relu); - - auto reshape_pattern2 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[0].size()}, shapes[0]); - auto reshape2 = std::make_shared(relu, reshape_pattern2, false); - - function = std::make_shared(reshape2, input, "EltwiseReshapeActivation"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp deleted file mode 100644 index e4a6fa32b3dda3..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/fc_conv_fc.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/convolution.hpp" - -namespace SubgraphTestsDefinitions { - -std::string FcAfterConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr FcAfterConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -2.0f, 2.0f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void FcAfterConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.1f, 0.1f); - auto conv = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - auto relu1 = std::make_shared(reshape2); - - std::vector fc3_weights = ov::test::utils::generate_float_numbers(outFormShapes[1] * outFormShapes[1], -0.1f, 0.1f); - auto fc3 = ngraph::builder::makeFullyConnected(relu1, ngPrc, outFormShapes[1], false, {}, fc3_weights); - - auto fc4_weights = ov::test::utils::generate_float_numbers(outFormShapes[1] * outFormShapes[1], -0.1f, 0.1f); - auto fc4 = ngraph::builder::makeFullyConnected(fc3, ngPrc, outFormShapes[1], false, {}, fc4_weights); - - function = std::make_shared(fc4, params, "FcAfterConvTest"); -} - -std::string FcBeforeConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr FcBeforeConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -0.1f, 0.1f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void FcBeforeConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto fc1_weights = ov::test::utils::generate_float_numbers(inputShape[1] * inputShape[1], -0.1f, 0.1f); - auto fc1 = ngraph::builder::makeFullyConnected(params[0], ngPrc, inputShape[1], false, {}, fc1_weights); - - auto fc2_weights = ov::test::utils::generate_float_numbers(inputShape[1] * inputShape[1], -0.05f, 0.05f); - auto fc2 = ngraph::builder::makeFullyConnected(fc1, ngPrc, inputShape[1], false, {}, fc2_weights); - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(fc2, reshapePattern1, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.1f, 0.1f); - auto conv = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - - function = std::make_shared(reshape2, params, "FcBeforeConvTest"); -} - -std::string FcBetweenConvsTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr FcBetweenConvsTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -0.2f, 0.2f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void FcBetweenConvsTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::vector conv1InputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv1InputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - auto filter1Weights = ov::test::utils::generate_float_numbers(outputChannels * conv1InputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto conv1 = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filter1Weights); - - auto widthAfterConv1 = (conv1InputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes1 = {1, outputChannels * widthAfterConv1 }; - - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes1); - auto reshape2 = std::make_shared(conv1, reshapePattern2, false); - auto relu = std::make_shared(reshape2); - - auto fc_weights = ov::test::utils::generate_float_numbers(outFormShapes1[1] * outFormShapes1[1], -0.2f, 0.2f); - auto fc = ngraph::builder::makeFullyConnected(relu, ngPrc, outFormShapes1[1], false, {}, fc_weights); - - std::vector conv2InputShape = {1, outputChannels, 1, widthAfterConv1}; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv2InputShape); - auto reshape3 = std::make_shared(fc, reshapePattern3, false); - - auto filter2Weights = ov::test::utils::generate_float_numbers(outputChannels * conv2InputShape[1], - -0.2f, 0.2f); - auto conv2 = ov::test::utils::make_convolution(reshape3, ngPrc, { 1, 1 }, { 1, 1 }, { 0, 0 }, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filter2Weights); - std::vector outFormShapes2 = {1, outputChannels * conv2InputShape[3]}; - - auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); - auto reshape4 = std::make_shared(conv2, reshapePattern4, false); - - function = std::make_shared(reshape4, params, "FcBetweenConvsTest"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp deleted file mode 100644 index 7745b2f82ca64b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/first_connect_input_concat.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ConcatFirstInputTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map additional_config; - std::tie(inputShapes, netPrecision, targetDevice, additional_config) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : additional_config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void ConcatFirstInputTest::SetUp() { - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputShapes, netPrecision, targetDevice, configuration) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params; - for (auto&& shape : inputShapes) { - params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); - } - auto const_second_param = ov::test::utils::deprecated::make_constant(ngPrc, {1, 8}, std::vector{-1.0f}); - auto concat = std::make_shared(ngraph::OutputVector{params[0], const_second_param}, 1); - auto relu = std::make_shared(concat); - - ngraph::ResultVector results{std::make_shared(relu)}; - - function = std::make_shared(results, params, "ConcatMultiInput"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp deleted file mode 100644 index cce8072546c57e..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/fq_conv_fq_affine.hpp" - -#include "common_test_utils/node_builders/constant.hpp" -#include "common_test_utils/node_builders/fake_quantize.hpp" - -namespace SubgraphTestsDefinitions { - -std::string FqConvFqAffineTest::getTestCaseName(const testing::TestParamInfo& obj) { - FqSpecificParams fqParams; - ConvParams convParams; - bool permute; - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::map config; - std::tie(fqParams, convParams, permute, netPrecision, inputShapes, targetDevice, config) = obj.param; - - std::vector levels; - std::vector inputArg; - std::tie(levels, inputArg) = fqParams; - - std::vector kernelShape; - std::vector strides; - size_t inputChannels; - size_t outputChannels; - std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "LEVELS=" << ov::test::utils::vec2str(levels) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice; - for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - if (inputArg.size() == 3) { - result << "_inputArg=" << inputArg[0] << "_" << inputArg[1] << "_" << inputArg[2]; - } - result << "_KERNEL=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "STRIDES=" << ov::test::utils::vec2str(strides) << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "permute=" << permute << "\n"; - return result.str(); -} - -void FqConvFqAffineTest::SetUp() { - FqSpecificParams fqParams; - ConvParams convParams; - bool permute; - std::vector inputShape; - std::map config; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(fqParams, convParams, permute, netPrecision, inputShape, targetDevice, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - - std::vector levels; - std::vector inputArg; - std::tie(levels, inputArg) = fqParams; - if (inputArg.size() == 3) { - inputDataMin = inputArg[0]; - inputDataMax = inputArg[1]; - inputDataResolution = inputArg[2]; - } - - std::vector kernelShape; - std::vector strides; - size_t inputChannels; - size_t outputChannels; - std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - const int seed = 0; - std::mt19937 gen(seed); - - auto inputFQNode = ov::test::utils::make_fake_quantize(params[0], ngraph::element::f32, levels[0], std::vector{}, - { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto inputFQ = std::dynamic_pointer_cast(inputFQNode); - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(inputFQ, reshapePattern1, false); - - auto filterWeightsNode = ov::test::utils::deprecated::make_constant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, - { 1.0f }); - - auto convLowNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMin}); - auto convHighNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, - convLowNode, convHighNode, convLowNode, convHighNode, levels[1]); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, - std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ov::op::PadType::VALID); - auto biasesWeightsNode = ov::test::utils::deprecated::make_constant(ngPrc, {}, std::vector{ 0.0f }); - auto add = std::make_shared(conv, biasesWeightsNode); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; - auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv * heightAfterConv }; - - ngraph::Output nodeBeforeReshape; - if (permute) { - auto permuteOrder = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{{0, 3, 2, 1}}); - auto transpose = std::make_shared(add, permuteOrder); - nodeBeforeReshape = transpose; - } else { - nodeBeforeReshape = add; - } - - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(nodeBeforeReshape, reshapePattern2, false); - - auto matMulWeightsNode = ov::test::utils::deprecated::make_constant(ngPrc, {outFormShapes[1], outFormShapes[1]}, { 1.0f }); - auto matMulLowNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMin}); - auto matMulHighNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax}); - auto matMulWeightsFQNode = std::make_shared(matMulWeightsNode, - matMulLowNode, matMulHighNode, matMulLowNode, matMulHighNode, levels[1]); - auto matMulWeightsFQ = std::dynamic_pointer_cast(matMulWeightsFQNode); - - auto matmul = std::make_shared(reshape2, matMulWeightsFQ, false, true); - - function = std::make_shared(matmul, params, "fqConvfqAffine"); -} - -InferenceEngine::Blob::Ptr FqConvFqAffineTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp deleted file mode 100644 index 177e252e4682b7..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/fq_with_mixed_levels.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" -#include "common_test_utils/node_builders/constant.hpp" -#include "common_test_utils/node_builders/fake_quantize.hpp" - -namespace SubgraphTestsDefinitions { - -std::string FqWithMixedLevelsTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration) = obj.param; - - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void FqWithMixedLevelsTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - std::tie(netPrecision, targetDevice, tempConfig) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto unit = [=](const std::shared_ptr& input, - const std::vector>& shapes, - float weights_min, float weights_max, - size_t level1, const std::vector>& data1, - size_t level2, const std::vector>& data2, - size_t level3, const std::vector>& data3) { - auto sigmoid = std::make_shared(input); - auto fake1 = ov::test::utils::make_fake_quantize(sigmoid, ngPrc, level1, { 1 }, data1[0], data1[1], data1[2], data1[3]); - std::vector weights = ov::test::utils::generate_float_numbers(shapes[1][0] * shapes[1][1], weights_min, weights_max); - auto constant = std::make_shared(ngPrc, ngraph::Shape{shapes[1][0], shapes[1][1]}, weights); - auto fake2 = ov::test::utils::make_fake_quantize(constant, ngPrc, level2, { 1 }, data2[0], data2[1], data2[2], data2[3]); - auto matmul = std::make_shared(fake1, fake2, false, true); - auto bias = ov::test::utils::deprecated::make_constant(ngPrc, std::vector{shapes[0][0], shapes[1][0]}, std::vector{ 1.0 }); - auto add = ov::test::utils::make_eltwise(matmul, bias, ngraph::helpers::EltwiseTypes::ADD); - return ov::test::utils::make_fake_quantize(add, ngPrc, level3, { 1 }, data3[0], data3[1], data3[2], data3[3]); - }; - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{ 1, 8 })}; - auto input = ov::test::utils::make_fake_quantize(params[0], ngPrc, std::numeric_limits::max(), { 1 }, - { -10. }, { 10. }, { -10. }, { 10. }); - input = unit(input, - {{1, 8}, {8, 8}}, - -20., 20., - std::numeric_limits::max(), {{ -1.0 }, { 1.0 }, { -1.0 }, { 1.0 }}, - std::numeric_limits::max(), {{ -2.5 }, { 2.5 }, { -2.5 }, { 2.5 }}, - std::numeric_limits::max(), {{ -5. } , { 5. }, { -5. }, { 5. }}); - input = unit(input, - {{ 1, 8 }, { 8, 8 }}, - -13., 13., - std::numeric_limits::max(), {{ -1.0 }, { 1.0 }, { -1.0 }, { 1.0 }}, - std::numeric_limits::max(), {{ -2.5 }, { 2.5 }, { -2.5 }, { 2.5 }}, - std::numeric_limits::max(), {{ -5. } , { 5. }, { -5. }, { 5. }}); - input = unit(input, - {{1, 8}, {8, 8}}, - -20., 20., - std::numeric_limits::max(), {{ -1.0 }, { 1.0 }, { -1.0 }, { 1.0 }}, - std::numeric_limits::max(), {{ -2.5 }, { 2.5 }, { -2.5 }, { 2.5 }}, - std::numeric_limits::max(), {{ -5. } , { 5. }, { -5. }, { 5. }}); - auto result = std::make_shared(input); - function = std::make_shared(ngraph::ResultVector{result}, params, "FqWithMixedLevelsTest"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp deleted file mode 100644 index 493921febf43bf..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/handling_orientation_conv.hpp" -#include "common_test_utils/node_builders/convolution.hpp" - -namespace SubgraphTestsDefinitions { - std::string HandlingOrientationClass::getTestCaseName(const testing::TestParamInfo &obj) { - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map configuration; - std::tie(netPrecision, targetName, configuration) = obj.param; - std::ostringstream results; - - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - return results.str(); - } - - void HandlingOrientationClass::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, 336}), - std::make_shared(ngPrc, ov::Shape{1, 336})}; - std::vector outFormShapes1 = { 1, 1, 168, 2 }; - std::vector outFormShapes2 = { 1, 336, 1, 1 }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); - - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes2); - auto reshape2 = std::make_shared(params[1], pattern2, false); - - auto permute1 = std::make_shared(reshape1, - ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 })); - - auto conv1 = ov::test::utils::make_convolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 }, - ov::op::PadType::VALID, 12); - - auto permute2 = std::make_shared(conv1, - ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 })); - - auto conv2 = ov::test::utils::make_convolution(reshape2, ngPrc, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 }, - ov::op::PadType::VALID, 336); - - std::vector outFormShapes3 = { 1, 1932 }; - std::vector outFormShapes4 = { 1, 336 }; - auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes4); - auto reshape3 = std::make_shared(permute2, pattern3, false); - auto reshape4 = std::make_shared(conv2, pattern4, false); - ngraph::ResultVector results{ std::make_shared(reshape3), - std::make_shared(reshape4)}; - function = std::make_shared(results, params, "RemovePermutationPass"); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp deleted file mode 100644 index bd97c43551bba7..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/input_conv.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/convolution.hpp" - -namespace SubgraphTestsDefinitions { - -std::string InputConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - bool addReshape; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, outputChannels, addReshape) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "OC=" << outputChannels << "_"; - result << "addReshape=" << addReshape << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr InputConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - auto precision = info.getPrecision(); - - auto* rawBlobDataPtr = blob->buffer().as(); - for (size_t i = 0; i < blob->size(); i++) { - float value = i % 16; - if (typeid(precision) == typeid(typename InferenceEngine::PrecisionTrait::value_type)) { - rawBlobDataPtr[i] = ngraph::float16(value).to_bits(); - } else { - rawBlobDataPtr[i] = value; - } - } - return blob; -} - -void InputConvTest::SetUp() { - auto generateWeights = [](std::size_t out_channels, std::size_t kernel_size) { - std::vector res; - for (std::size_t i = 0; i < out_channels; ++i) { - for (std::size_t j = 0; j < kernel_size; ++j) { - j == 0 ? res.emplace_back(0.2f) : res.emplace_back(0.0f); - } - } - - return res; - }; - - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t outputChannels; - bool addReshape; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, outputChannels, addReshape) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto conv0 = ov::test::utils::make_convolution(params[0], - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::VALID, - outputChannels, - true, - generateWeights(outputChannels, kernelShape[1])); - - if (addReshape) { - size_t numOutputWidth = (((inputShape[1] * inputShape[2] * inputShape[3] - kernelShape[1] * kernelShape[0]) / (inputShape[1] * stride)) + 1); - std::vector outFormShapes0 = { 1, outputChannels * numOutputWidth }; - auto pattern0 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes0); - auto reshape0 = std::make_shared(conv0, pattern0, false); - - ngraph::ResultVector results{ std::make_shared(reshape0) }; - function = std::make_shared(results, params, "InputConvTest"); - } else { - ngraph::ResultVector results{ std::make_shared(conv0) }; - function = std::make_shared(results, params, "InputConvTest"); - } -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp deleted file mode 100644 index f2c4c9dbbc56a7..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/input_split_concat.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string InputSplitConcatTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::vector inputShape; - std::tie(netPrecision, targetDevice, configuration, inputShape) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void InputSplitConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - std::vector inputShape; - std::tie(netPrecision, targetDevice, tempConfig, inputShape) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[0], split_axis_op, 2); - - auto relu1 = std::make_shared(split->output(0)); - - auto const_vals = ov::test::utils::generate_float_numbers(inputShape[1], -5.0f, 5.0f); - auto constant = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, const_vals); - auto concat = std::make_shared(ngraph::OutputVector{constant, split->output(1)}, 1); - auto relu2 = std::make_shared(concat); - - ngraph::ResultVector results{ std::make_shared(relu1), std::make_shared(relu2) }; - function = std::make_shared(results, params, "InputSplitConcatTest"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp deleted file mode 100644 index f8566dccc8c0f0..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/matmul_act_add.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { -std::string MatMulActAddTest::getTestCaseName(const testing::TestParamInfo &obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - size_t inputSize; - std::map configuration; - std::tie(inputSize, netPrecision, targetDevice, configuration) = obj.param; - - std::ostringstream result; - result << "inputSize=" << inputSize << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - for (auto const &configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void MatMulActAddTest::SetUp() { - InferenceEngine::Precision netPrecision; - size_t inputSize; - std::map config; - std::tie(inputSize, netPrecision, targetDevice, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - std::vector outFormShapes = {1, 2 * inputSize}; - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{ 1, inputSize })}; - - auto mul_const = ov::test::utils::deprecated::make_constant(ngPrc, { outFormShapes[1], inputSize }, - ov::test::utils::generate_float_numbers(outFormShapes[1] * inputSize, -0.5f, 0.5f), false); - - auto matmul = std::make_shared(params[0], mul_const, false, true); - - auto tanh = std::make_shared(matmul); - auto eltw = std::make_shared(matmul, tanh); - auto res = std::make_shared(eltw); - function = std::make_shared(res, params, "MatMul_Act_Add"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp deleted file mode 100644 index f1fe64352cb828..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" -#include "shared_test_classes/subgraph/memory_eltwise_reshape_concat.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" - -namespace SubgraphTestsDefinitions { - -std::string MemoryEltwiseReshapeConcatTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::string targetDevice; - InferenceEngine::Precision netPrecision; - size_t inputSize; - size_t concatSize; - std::map config; - std::tie(targetDevice, netPrecision, inputSize, concatSize, config) = obj.param; - std::ostringstream result; - - result << "netPrecision=" << netPrecision.name() << "_"; - result << "IS=" << inputSize << "_"; - result << "CS=" << concatSize << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -void MemoryEltwiseReshapeConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map config; - std::tie(targetDevice, netPrecision, inputSize, concatSize, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - const int seed = 0; - std::mt19937 gen(seed); - - auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable { - std::vector res; - - std::uniform_real_distribution dist(min, max); - for (std::size_t i = 0; i < vec_len; i++) - res.emplace_back(static_cast(dist(gen))); - - return res; - }; - - memory_init = generateFloatNumbers(inputSize * concatSize, -1.0f, 1.0f); - concat_vals = generateFloatNumbers(concatSize, 12.0f, 14.0f); -} - -void MemoryEltwiseReshapeConcatTest::initTestModel() { - InferenceEngine::SizeVector input_dims = {1, inputSize * concatSize}; - ov::ParameterVector input_parameter {std::make_shared(ngPrc, ov::Shape(input_dims))}; - - auto memory_constant = ov::test::utils::deprecated::make_constant(ngPrc, input_dims, memory_init); - memory_constant->set_friendly_name("memory_constant"); - auto memory_read = std::make_shared(memory_constant, "memory"); - memory_read->set_friendly_name("memory_read"); - - auto mul = ov::test::utils::make_eltwise(input_parameter[0], memory_read, ngraph::helpers::EltwiseTypes::MULTIPLY); - mul->set_friendly_name("multiplication"); - - auto memory_write = std::make_shared(mul, "memory"); - memory_write->set_friendly_name("memory_write"); - - auto reshape_1_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({inputSize, concatSize})); - reshape_1_pattern->set_friendly_name("reshape_pattern"); - auto reshape_1 = std::make_shared(mul, reshape_1_pattern, false); - reshape_1->set_friendly_name("reshape"); - - auto concat_constant = ov::test::utils::deprecated::make_constant(ngPrc, {1, concatSize}, concat_vals); - concat_constant->set_friendly_name("concat_constant"); - - auto concat = std::make_shared(ov::NodeVector{concat_constant, reshape_1}, 0); - - memory_write->add_control_dependency(memory_read); - concat->add_control_dependency(memory_write); - - auto final_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, - std::vector({1, 1, inputSize + 1, concatSize})); - auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); - - function = std::make_shared(final_reshape, input_parameter, "memory_multiply_reshape_concat"); -} - -void MemoryEltwiseReshapeConcatTest::initNgraphFriendlyModel() { - InferenceEngine::SizeVector input_dims = {1, inputSize * concatSize}; - ov::ParameterVector input_parameter {std::make_shared(ngPrc, ov::Shape(input_dims))}; - - auto memory_constant = ov::test::utils::deprecated::make_constant(ngPrc, input_dims, memory_init); - memory_constant->set_friendly_name("memory_constant"); - - auto mul = ov::test::utils::make_eltwise(input_parameter[0], memory_constant, ngraph::helpers::EltwiseTypes::MULTIPLY); - mul->set_friendly_name("multiplication"); - - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector({1, inputSize, concatSize})); - reshape_pattern->set_friendly_name("reshape_pattern"); - auto reshape = std::make_shared(mul, reshape_pattern, false); - reshape->set_friendly_name("reshape"); - - auto squeeze_const = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); - squeeze_const->set_friendly_name("squeeze_const"); - auto squeeze = std::make_shared(reshape, squeeze_const); - squeeze->set_friendly_name("squeeze"); - - auto concat_constant = ov::test::utils::deprecated::make_constant(ngPrc, {1, concatSize}, concat_vals); - concat_constant->set_friendly_name("concat_constant"); - - auto concat = std::make_shared(ov::NodeVector{concat_constant, squeeze}, 0); - - function = std::make_shared(concat, input_parameter, "memory_multiply_reshape_concat"); -} - -void MemoryEltwiseReshapeConcatTest::LoadNetwork() { - LayerTestsUtils::LayerTestsCommon::LoadNetwork(); - inferRequest = executableNetwork.CreateInferRequest(); -} - -void MemoryEltwiseReshapeConcatTest::Infer() { - ConfigureInferRequest(); - inferRequest.Infer(); -} - -void MemoryEltwiseReshapeConcatTest::Run() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - initTestModel(); - LoadNetwork(); - - InferenceEngine::TensorDesc state_description(InferenceEngine::Precision::FP32, - InferenceEngine::SizeVector({1, inputSize * concatSize}), - InferenceEngine::Layout::NC); - - IE_SUPPRESS_DEPRECATED_START - auto states = inferRequest.QueryState(); - auto state_values_blob = FuncTestUtils::createAndFillBlobWithFloatArray(state_description, - memory_init.data(), memory_init.size()); - states[0].SetState(state_values_blob); - IE_SUPPRESS_DEPRECATED_END - GenerateInputs(); - Infer(); - initNgraphFriendlyModel(); - Validate(); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp deleted file mode 100644 index fb2e734a1d7d17..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/memory_fq_concat_prelu.hpp" -#include - -#include "common_test_utils/node_builders/constant.hpp" -#include "common_test_utils/node_builders/fake_quantize.hpp" - -namespace SubgraphTestsDefinitions { - -template -inline typename std::enable_if::value, void>::type - printTupleElement(std::ostringstream& out, const T& value) { - out << "_" << value; -} - -template -inline typename std::enable_if>::value, void>::type - printTupleElement(std::ostringstream& out, const T& vector) { - for (const auto& value : vector) { - out << "_" << value; - } -} - -template -inline typename std::enable_if::type printTuple(std::ostringstream& out, std::tuple& t) { -} - -template -inline typename std::enable_if::type printTuple(std::ostringstream& out, std::tuple& t) { - printTupleElement(out, std::get(t)); - printTuple(out, t); -} - -std::string MemoryFqConcatPrelu::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector> input; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map additional_config; - std::tuple< - std::vector, - std::vector, - std::vector, - std::vector, - std::vector> strided_slice_params; - std::tuple< - std::size_t, - std::vector, - std::vector, - std::vector, - std::vector, - std::vector> fake_quantize_params; - std::tie(input, netPrecision, targetName, additional_config, strided_slice_params, fake_quantize_params) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(input[0]) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - for (auto const &item : additional_config) { - results << "_additional_config=" << item.first << "_" << item.second; - } - results << "_strided_slice_params="; - printTuple(results, strided_slice_params); - results << "_fake_quantize_params="; - printTuple(results, fake_quantize_params); - return results.str(); -} - -void MemoryFqConcatPrelu::Run() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - LoadNetwork(); - GenerateInputs(); - Infer(); -} - -void MemoryFqConcatPrelu::SetUp() { - std::vector> inputs; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tuple< - std::vector, - std::vector, - std::vector, - std::vector, - std::vector> strided_slice_params; - std::tuple< - std::size_t, - std::vector, - std::vector, - std::vector, - std::vector, - std::vector> fake_quantize_params; - std::tie(inputs, netPrecision, targetDevice, additional_config, strided_slice_params, fake_quantize_params) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector input; - for (auto&& shape : inputs) { - input.push_back(std::make_shared(ngPrc, ov::Shape(shape))); - } - auto memory_read = ov::test::utils::deprecated::make_constant(ngPrc, {inputs[0]}, {0}); - auto read = std::make_shared(memory_read, "variable1"); - auto fake_constatnt = ov::test::utils::deprecated::make_constant(ngPrc, {inputs[0]}, {0}); - auto fake = ov::test::utils::make_fake_quantize(fake_constatnt, ngPrc, - std::get<0>(fake_quantize_params), - std::get<1>(fake_quantize_params), - std::get<2>(fake_quantize_params), - std::get<3>(fake_quantize_params), - std::get<4>(fake_quantize_params), - std::get<5>(fake_quantize_params)); - auto concat = std::make_shared(ov::OutputVector{read, fake, input[0]}, 1); - auto prelu_constant = ov::op::v0::Constant::create(ngPrc, {1}, {-2}); - auto prelu = std::make_shared(concat, prelu_constant); - - auto begin = std::get<0>(strided_slice_params); - auto end = std::get<1>(strided_slice_params); - auto stride = std::get<2>(strided_slice_params); - auto begin_mask = std::get<3>(strided_slice_params); - auto end_mask = std::get<4>(strided_slice_params); - ov::Shape constShape = {begin.size()}; - auto beginNode = std::make_shared(ov::element::i64, constShape, begin.data()); - auto endNode = std::make_shared(ov::element::i64, constShape, end.data()); - auto strideNode = std::make_shared(ov::element::i64, constShape, stride.data()); - auto slice = std::make_shared(prelu, - beginNode, - endNode, - strideNode, - begin_mask, - end_mask, - std::vector{}, - std::vector{}, - std::vector{}); - - auto assign = std::make_shared(slice, "variable1"); - auto result = std::make_shared(prelu); - assign->add_control_dependency(read); - result->add_control_dependency(assign); - function = std::make_shared(ngraph::ResultVector{result}, input, "memory_fq_concat_prelu"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp deleted file mode 100644 index 63b93b70b349f3..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/multi_crops_to_concat.hpp" - -namespace SubgraphTestsDefinitions { - -std::string MultiCropsToConcatTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - std::vector inputShape; - std::vector> offsets; - std::tie(netPrecision, targetDevice, inputShape, offsets, configuration) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - result << "offset="; - for (auto offset : offsets) { - result << "(" << offset.first << "," << offset.second << ")"; - } - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void MultiCropsToConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - std::vector inputShape; - std::vector> offsets; - std::tie(netPrecision, targetDevice, inputShape, offsets, tempConfig) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - ov::Shape const_shape_crop1 = {2}; - auto begin_node_crop1 = std::make_shared(ov::element::i64, const_shape_crop1, std::vector{ 0, offsets[0].first }); - auto end_node_crop1 = std::make_shared(ov::element::i64, const_shape_crop1, std::vector{ 1, offsets[0].second }); - auto strideN_node_crop1 = std::make_shared(ov::element::i64, const_shape_crop1, std::vector{ 1, 1 }); - auto crop1 = std::make_shared(params[0], - begin_node_crop1, - end_node_crop1, - strideN_node_crop1, - std::vector{ 1, 0 }, - std::vector{ 1, 0 }, - std::vector{ 0, 0 }, - std::vector{ 0, 0 }, - std::vector{ 0, 0 }); - - ov::Shape const_shape_crop2 = {2}; - auto begin_node_crop2 = std::make_shared(ov::element::i64, const_shape_crop2, std::vector{ 0, offsets[1].first }); - auto end_node_crop2 = std::make_shared(ov::element::i64, const_shape_crop2, std::vector{ 1, offsets[1].second }); - auto strideN_node_crop2 = std::make_shared(ov::element::i64, const_shape_crop2, std::vector{ 1, 1 }); - auto crop2 = std::make_shared(params[0], - begin_node_crop2, - end_node_crop2, - strideN_node_crop2, - std::vector{ 1, 0 }, - std::vector{ 1, 0 }, - std::vector{ 0, 0 }, - std::vector{ 0, 0 }, - std::vector{ 0, 0 }); - - auto concat1 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); - std::shared_ptr result; - - // Case with 3 crops - if (offsets.size() == 3) { - ov::Shape const_shape_crop3 = {2}; - auto begin_node_crop3 = std::make_shared(ov::element::i64, const_shape_crop3, std::vector{ 0, offsets[2].first }); - auto end_node_crop3 = std::make_shared(ov::element::i64, const_shape_crop3, std::vector{ 1, offsets[2].second }); - auto strideN_node_crop3 = std::make_shared(ov::element::i64, const_shape_crop3, std::vector{ 1, 1 }); - auto crop3 = std::make_shared(params[0], - begin_node_crop3, - end_node_crop3, - strideN_node_crop3, - std::vector{ 1, 0 }, - std::vector{ 1, 0 }, - std::vector{ 0, 0 }, - std::vector{ 0, 0 }, - std::vector{ 0, 0 }); - - auto concat2 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); - result = std::make_shared(concat2); - } else { - result = std::make_shared(concat1); - } - function = std::make_shared(result, params, "InputSplitConcatTest"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp deleted file mode 100644 index 5494db53c6b9ef..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/subgraph/multi_input_scale.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" - -namespace SubgraphTestsDefinitions { - -std::string MultipleInputScaleTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::string targetDevice; - InferenceEngine::Precision netPrecision; - size_t inputSize; - std::map config; - std::tie(targetDevice, netPrecision, inputSize, config) = obj.param; - std::ostringstream result; - result << "netPrecision=" << netPrecision.name() << "_"; - result << "IS=" << inputSize << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr MultipleInputScaleTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMin, range, 1 / inputDataResolution, seed); -} - -void MultipleInputScaleTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map config; - size_t inputSize; - std::tie(targetDevice, netPrecision, inputSize, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - std::vector inputShape = {1, inputSize}; - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputShape)), - std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto fc1_weights = ov::test::utils::generate_float_numbers(inputSize * inputSize, -0.5f, 0.5f); - auto fc2_weights = ov::test::utils::generate_float_numbers(inputSize * inputSize, -0.2f, 0.2f); - - auto fc1 = ngraph::builder::makeFullyConnected(input[0], ngPrc, inputSize, false, {inputSize, inputSize}, fc1_weights); - auto fc2 = ngraph::builder::makeFullyConnected(input[1], ngPrc, inputSize, false, {inputSize, inputSize}, fc2_weights); - - auto add = ov::test::utils::make_eltwise(fc1, fc2, ngraph::helpers::EltwiseTypes::ADD); - - auto result = std::make_shared(add); - function = std::make_shared(result, input, "multiple_input_scale"); - functionRefs = ngraph::clone_function(*function); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp deleted file mode 100644 index a2d8eaf4936c16..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/multioutput_eltwise_squeeze_eltwise.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - std::string MultioutputEltwiseReshapeEltwise::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector> input; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map additional_config; - std::tie(input, netPrecision, targetName, additional_config) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(input[0]) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - for (auto const& configItem : additional_config) { - results << "_configItem=" << configItem.first << "_" << configItem.second; - } - return results.str(); - } - - void MultioutputEltwiseReshapeEltwise::SetUp() { - std::vector> inputs; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputs, netPrecision, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input; - for (auto&& shape : inputs) { - input.push_back(std::make_shared(ngPrc, ov::Shape(shape))); - } - auto eltwise_const = ov::test::utils::deprecated::make_constant(ngPrc, - ngraph::Shape{input[0]->get_shape()}, - std::vector{-1.0f}); - auto eltwise = std::make_shared(input[0], eltwise_const); - - auto squeeze_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); - auto squeeze = std::make_shared(eltwise, squeeze_constant); - auto unsqueeze_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); - auto unsqueeze = std::make_shared(squeeze, unsqueeze_constant); - - auto eltwise_const2 = ov::test::utils::deprecated::make_constant(ngPrc, ngraph::Shape{1}, std::vector{1.01f}); - auto eltwise_const3 = ov::test::utils::deprecated::make_constant(ngPrc, ngraph::Shape{1}, std::vector{1.01f}); - auto eltwise2 = std::make_shared(eltwise, eltwise_const2); - auto eltwise3 = std::make_shared(unsqueeze, eltwise_const3); - ngraph::ResultVector results{std::make_shared(eltwise2), - std::make_shared(eltwise3)}; - function = std::make_shared(results, input, "eltwise_reshape_eltwise_multioutput"); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_concat.cpp deleted file mode 100644 index c7f9efeb5cb800..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_concat.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" -#include "shared_test_classes/subgraph/multiple_concat.hpp" -#include "common_test_utils/node_builders/activation.hpp" - -namespace SubgraphTestsDefinitions { - -std::string MultipleConcatTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::string targetDevice; - InferenceEngine::Precision netPrecision; - size_t inputSize; - size_t constantSize; - std::map config; - std::tie(targetDevice, netPrecision, inputSize, constantSize, config) = obj.param; - std::ostringstream result; - - result << "netPrecision=" << netPrecision.name() << "_"; - result << "IS=" << inputSize << "_"; - result << "CS=" << constantSize << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : config) { - result << "_configItem=" << configItem.second; - } - return result.str(); -} - -void MultipleConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map config; - size_t inputSize; - size_t constantSize; - std::tie(targetDevice, netPrecision, inputSize, constantSize, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - std::vector input_dims { 1, inputSize }; - std::vector constant_dims {1, constantSize}; - - auto concat_1_vals = ov::test::utils::generate_float_numbers(constantSize, -2.0f, 2.0f); - auto concat_2_vals = ov::test::utils::generate_float_numbers(constantSize, -5.0f, 5.0f); - - ov::ParameterVector input_parameter {std::make_shared(ngPrc, ov::Shape(input_dims))}; - - auto const_1 = ov::test::utils::deprecated::make_constant(ngPrc, constant_dims, concat_1_vals); - auto concat_1 = std::make_shared(ov::NodeVector{const_1, input_parameter[0]}, 1); - - auto const_2 = ov::test::utils::deprecated::make_constant(ngPrc, constant_dims, concat_1_vals); - auto concat_2 = std::make_shared(ov::NodeVector{concat_1, const_2}, 1); - - auto act = ov::test::utils::make_activation(concat_2, ngPrc, ngraph::helpers::ActivationTypes::Relu); - - function = std::make_shared(act, input_parameter, "multiple_concat"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp deleted file mode 100644 index a3b0ce0e7c5b04..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/subgraph/multiple_input_fq.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" -#include "common_test_utils/node_builders/fake_quantize.hpp" - -namespace SubgraphTestsDefinitions { - -std::string MultipleInputTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::string targetDevice; - InferenceEngine::Precision netPrecision; - size_t inputSize; - std::map config; - std::tie(targetDevice, netPrecision, inputSize, config) = obj.param; - std::ostringstream result; - result << "netPrecision=" << netPrecision.name() << "_"; - result << "IS=" << inputSize << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void MultipleInputTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map config; - size_t inputSize; - std::tie(targetDevice, netPrecision, inputSize, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - const float minInput = -10.0; - const float maxInput = 10.0; - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize}), - std::make_shared(ngPrc, ov::Shape{1, inputSize}), - std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto fake1 = ov::test::utils::make_fake_quantize(input[0], ngPrc, std::numeric_limits::max(), { 1 }, - { minInput }, { maxInput }, { minInput }, { maxInput }); - auto add1 = ov::test::utils::make_eltwise(input[0], fake1, ngraph::helpers::EltwiseTypes::ADD); - auto fake_add1 = ov::test::utils::make_fake_quantize(add1, ngPrc, std::numeric_limits::max(), { 1 }, - { 2 * minInput }, { 2 * maxInput }, { 2 * minInput }, { 2 * maxInput }); - - auto fake2 = ov::test::utils::make_fake_quantize(input[1], ngPrc, std::numeric_limits::max(), { 1 }, - { minInput }, { maxInput }, { minInput }, { maxInput }); - auto add2 = ov::test::utils::make_eltwise(input[1], fake2, ngraph::helpers::EltwiseTypes::ADD); - auto fake_add2 = ov::test::utils::make_fake_quantize(add2, ngPrc, std::numeric_limits::max(), { 1 }, - { 2 * minInput }, { 2 * maxInput }, { 2 * minInput }, { 2 * maxInput }); - - auto add3 = ov::test::utils::make_eltwise(fake_add1, fake_add2, ngraph::helpers::EltwiseTypes::ADD); - auto fake_add3 = ov::test::utils::make_fake_quantize(add3, ngPrc, std::numeric_limits::max(), { 1 }, - { 4 * minInput }, { 4 * maxInput }, { 4 * minInput }, { 4 * maxInput }); - - auto fake3 = ov::test::utils::make_fake_quantize(input[2], ngPrc, std::numeric_limits::max(), { 1 }, - { minInput }, { maxInput }, { minInput }, { maxInput }); - auto add4 = ov::test::utils::make_eltwise(fake3, fake_add3, ngraph::helpers::EltwiseTypes::ADD); - auto fake_add4 = ov::test::utils::make_fake_quantize(add4, ngPrc, std::numeric_limits::max(), { 1 }, - { 5 * minInput }, { 5 * maxInput }, { 5 * minInput }, { 5 * maxInput }); - - auto result = std::make_shared(fake_add4); - function = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); -} - -} // namespace SubgraphTestsDefinitions - diff --git a/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp b/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp deleted file mode 100644 index 78f6ab7012b5ee..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/negative_memory_layer_offset.hpp" - -namespace SubgraphTestsDefinitions { - std::string NegativeMemoryOffsetTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetName; - size_t inputSize; - size_t hiddenSize; - std::tie(netPrecision, targetName, inputSize, hiddenSize, std::ignore) = obj.param; - std::ostringstream results; - - results << "netPRC=" << netPrecision.name() << "_"; - results << "IS=" << inputSize << "_"; - results << "HS=" << hiddenSize << "_"; - results << "targetDevice=" << targetName; - return results.str(); - } - - void NegativeMemoryOffsetTest::SetUp() { - InferenceEngine::Precision netPrecision; - size_t inputSize; - size_t hiddenSize; - std::map config; - std::tie(netPrecision, targetDevice, inputSize, hiddenSize, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - const int seed = 0; - std::mt19937 gen(seed); - std::uniform_real_distribution dist(-0.2f, 0.2f); - for (size_t i = 0; i < hiddenSize; ++i) - memory_init.emplace_back(static_cast(dist(gen))); - - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); - auto mem_r = std::make_shared(mem_c, "memory"); - - // Use memory layer as the second input of 'concat' to get negative offset - auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_r }, 1); - - auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto num_split = std::make_shared(ov::element::u64, ov::Shape{2}, std::vector{hiddenSize, inputSize}); - auto split = std::make_shared(concat, split_axis_op, num_split); - - auto mem_w = std::make_shared(split->output(0), "memory"); - auto sigm = std::make_shared(split->output(1)); - - mem_w->add_control_dependency(mem_r); - sigm->add_control_dependency(mem_w); - - function = std::make_shared(sigm, input, "negative_memory_layer_offset_memory"); - } - - void NegativeMemoryOffsetTest::switchToNgraphFriendlyModel() { - InferenceEngine::Precision netPrecision; - size_t inputSize; - size_t hiddenSize; - std::tie(netPrecision, targetDevice, inputSize, hiddenSize, std::ignore) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); - auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_c }, 1); - - auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); - auto num_split = std::make_shared(ov::element::u64, ov::Shape{2}, std::vector{hiddenSize, inputSize}); - auto split = std::make_shared(concat, split_axis_op, num_split); - - auto sigm = std::make_shared(split->output(1)); - - function = std::make_shared(sigm, input, "negative_memory_layer_offset_nonmemory"); - } - - void NegativeMemoryOffsetTest::LoadNetwork() { - LayerTestsUtils::LayerTestsCommon::LoadNetwork(); - inferRequest = executableNetwork.CreateInferRequest(); - } - - void NegativeMemoryOffsetTest::Infer() { - ConfigureInferRequest(); - inferRequest.Infer(); - } - - void NegativeMemoryOffsetTest::Run() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - LoadNetwork(); - auto states = inferRequest.QueryState(); - for (auto& state : states) { - auto name = state.GetName(); - if (name == "memory") { - auto blob = FuncTestUtils::createAndFillBlobWithFloatArray(state.GetState()->getTensorDesc(), - memory_init.data(), memory_init.size()); - state.SetState(blob); - } else { - GTEST_FAIL() << "unknown memory state"; - } - } - GenerateInputs(); - Infer(); - switchToNgraphFriendlyModel(); - Validate(); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp deleted file mode 100644 index 7504596faf75a4..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/parameter_reshape_result.hpp" - -namespace SubgraphTestsDefinitions { -std::string ParamReshapeResult::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map config; - std::tie(inputShape, netPrecision, targetName, config) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - for (auto const& configItem : config) { - results << "_configItem=" << configItem.first << "_" << configItem.second; - } - return results.str(); -} - -void ParamReshapeResult::SetUp() { - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputShape, netPrecision, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto shape = inputShape; - shape[shape.size() - 2] *= 2; - shape[shape.size() - 1] /= 2; - auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape.size()}, shape); - auto reshape = std::make_shared(params[0], reshape_const, false); - - function = std::make_shared(reshape, params, "ParamReshapeResult"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp deleted file mode 100644 index 1c2f28ea17618b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/permute_concat_concat_permute.hpp" - -#include - -#include -#include - -namespace SubgraphTestsDefinitions { -std::string PermuteConcatConcatPermute::getTestCaseName( - const testing::TestParamInfo& obj) { - std::vector input_shape; - InferenceEngine::Precision net_precision; - std::string targetName; - std::tie(input_shape, net_precision, targetName) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(input_shape) << "_"; - results << "netPRC=" << net_precision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - return results.str(); -} - -void PermuteConcatConcatPermute::SetUp() { - std::srand(std::time(nullptr)); - - std::vector input_shape; - InferenceEngine::Precision net_precision; - std::tie(input_shape, net_precision, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(net_precision); - - auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); - std::vector permute_param = {1, 0}; - auto permute_params = - ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_param.size()}, permute_param); - auto permute_1 = std::make_shared(input_param, permute_params); - - auto const_input_1 = CreateConst(input_shape, ngPrc, false); - auto concat_1 = std::make_shared(ngraph::OutputVector{const_input_1, permute_1}, 0); - - auto const_input_2 = CreateConst(input_shape, ngPrc, true); - auto concat_2 = std::make_shared(ngraph::OutputVector{concat_1, const_input_2}, 0); - - auto permute_2 = std::make_shared(concat_2, permute_params); - - function = std::make_shared(permute_2, - ngraph::ParameterVector{input_param}, - "permute_concat_concat_permute_zero_validation"); - range_ = InferenceEngine::details::product(input_shape); -} - -std::shared_ptr PermuteConcatConcatPermute::CreateConst( - const std::vector& input_shape, - const ::ngraph::element::Type& precision, - bool use_1_as_first_dimension) { - auto const_input_shape_vec = std::vector{}; - if (input_shape.size() == 1) { - const_input_shape_vec.push_back(input_shape.front()); - } else { - if (use_1_as_first_dimension) { - const_input_shape_vec.push_back(1); - const_input_shape_vec.push_back(input_shape[0]); - } else { - const_input_shape_vec.push_back(input_shape[1]); - const_input_shape_vec.push_back(input_shape[0]); - } - - const_input_shape_vec.insert(const_input_shape_vec.end(), std::next(input_shape.begin(), 2), input_shape.end()); - } - - const auto const_input_shape = ngraph::Shape{const_input_shape_vec}; - auto const_input_values_size = InferenceEngine::details::product(const_input_shape_vec); - auto const_input_values = std::vector(const_input_values_size, 0); - return ov::op::v0::Constant::create(precision, const_input_shape, const_input_values); -} - -void PermuteConcatConcatPermute::Validate() { - if (functionRefs == nullptr) { - functionRefs = ngraph::clone_function(*function); - } - const auto& actual_outputs = GetOutputs(); - IE_ASSERT(actual_outputs.size() == 1); - - auto expected_outputs = CalculateRefs(); - IE_ASSERT(expected_outputs.size() == actual_outputs.size()); - - const auto& expected = expected_outputs[0]; - const auto& actual = actual_outputs[0]; - - IE_ASSERT(actual->byteSize() == expected.second.size()); - - auto memory = InferenceEngine::as(actual); - IE_ASSERT(memory); - - const auto locked_memory = memory->wmap(); - auto precision = actual->getTensorDesc().getPrecision(); - - switch (precision) { - case InferenceEngine::Precision::FP16: { - IE_ASSERT(expected.first == ngraph::element::f16); - const auto actual_output_buffer = locked_memory.as(); - const auto expected_output_buffer = reinterpret_cast(expected.second.data()); - CompareBuffers(expected_output_buffer, actual_output_buffer, actual->size(), threshold); - break; - } - case InferenceEngine::Precision::FP32: { - IE_ASSERT(expected.first == ngraph::element::f32); - const auto actual_output_buffer = locked_memory.as(); - const auto expected_output_buffer = reinterpret_cast(expected.second.data()); - CompareBuffers(expected_output_buffer, actual_output_buffer, actual->size(), threshold); - break; - } - default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; - } -} - -InferenceEngine::Blob::Ptr PermuteConcatConcatPermute::GenerateInput( - const InferenceEngine::InputInfo& inputInfo) const { - return FuncTestUtils::createAndFillBlobConsistently(inputInfo.getTensorDesc(), range_, start_, step_); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp deleted file mode 100644 index bb5cf428a9920a..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/permute_concat_permute.hpp" - -#include - -#include -#include -#include - -namespace SubgraphTestsDefinitions { -std::string PermuteConcatPermute::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector> input; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::tie(input, netPrecision, targetName) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(input[0]) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - return results.str(); -} - -void PermuteConcatPermute::SetUp() { - std::srand(std::time(nullptr)); - - std::vector> inputs; - InferenceEngine::Precision netPrecision; - std::tie(inputs, netPrecision, targetDevice) = this->GetParam(); - auto input_shape = inputs[0]; - auto permute_1_param = inputs[1]; - auto permute_2_param = inputs[2]; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); - auto permute_params_1 = - ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_1_param.size()}, permute_1_param); - - auto permute_1 = std::make_shared(input_param, permute_params_1); - - auto const_input_shape_vec = std::vector{1}; - const_input_shape_vec.insert(const_input_shape_vec.end(), input_shape.begin(), std::prev(input_shape.end())); - const auto constinput_shape = ngraph::Shape{const_input_shape_vec}; - auto const_input_values_size = InferenceEngine::details::product(const_input_shape_vec); - auto const_input_values = std::vector(const_input_values_size, 0); - - auto const_input_1 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); - auto const_input_2 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); - auto const_input_3 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); - - auto concat = std::make_shared( - ngraph::OutputVector{const_input_1, const_input_2, permute_1, const_input_3}, - 0); - auto permute_params_2 = - ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_2_param.size()}, permute_2_param); - auto permute_2 = std::make_shared(concat, permute_params_2); - - function = - std::make_shared(permute_2, ngraph::ParameterVector{input_param}, "permute_concat_permute"); - range_ = InferenceEngine::details::product(input_shape); -} - -InferenceEngine::Blob::Ptr PermuteConcatPermute::GenerateInput(const InferenceEngine::InputInfo& inputInfo) const { - return FuncTestUtils::createAndFillBlobConsistently(inputInfo.getTensorDesc(), range_, start_, step_); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp b/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp deleted file mode 100644 index 1779f62ec1ff44..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/relu_split_reshape.hpp" - -namespace SubgraphTestsDefinitions { -std::string ReluSplitReshape::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inputShape; - size_t splitAxis, splitNum; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map config; - std::tie(inputShape, splitAxis, splitNum, netPrecision, targetName, config) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - results << "axis=" << splitAxis << "_"; - results << "num=" << splitNum << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - for (auto const& configItem : config) { - results << "_configItem=" << configItem.first << "_" << configItem.second; - } - return results.str(); -} - -void ReluSplitReshape::SetUp() { - std::vector inputShape; - size_t splitAxis, splitNum; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputShape, splitAxis, splitNum, netPrecision, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto relu = std::make_shared(params[0]); - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{static_cast(splitAxis)}); - auto split = std::make_shared(relu, split_axis_op, splitNum); - - auto shape = split->get_output_shape(0); - shape[shape.size() - 2] *= 2; - shape[shape.size() - 1] /= 2; - auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape.size()}, shape); - auto reshape = std::make_shared(split->output(0), reshape_const, false); - - function = std::make_shared(reshape, params, "ReluSplitReshape"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp deleted file mode 100644 index 10bc1365b2ea2f..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/scaleshift_conv_scaleshift.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/convolution.hpp" - -namespace SubgraphTestsDefinitions { - -std::string ScaleShiftAfterConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr ScaleShiftAfterConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -2.0f, 2.0f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void ScaleShiftAfterConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto conv = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; - - std::vector outFormShapes = { 1, outputChannels * widthAfterConv, 1, 1 }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - - auto scale = ov::test::utils::generate_float_numbers(outputChannels * widthAfterConv, -2.0f, 2.0f); - auto shift = ov::test::utils::generate_float_numbers(outputChannels * widthAfterConv, -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); - auto mul = std::make_shared(reshape2, mul_const); - auto add_const = std::make_shared(ngPrc, outFormShapes, shift); - auto add = std::make_shared(mul, add_const); - - outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape3 = std::make_shared(add, reshapePattern3, false); - - function = std::make_shared(mul, params, "ScaleShiftAfterConvTest"); -} - -std::string ScaleShiftBeforeConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr ScaleShiftBeforeConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -0.1f, 0.1f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void ScaleShiftBeforeConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::vector convInputShape = {1, inputShape[1], 1, 1}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); - - auto scale = ov::test::utils::generate_float_numbers(convInputShape[1], -2.0f, 2.0f); - auto shift = ov::test::utils::generate_float_numbers(convInputShape[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, convInputShape, scale); - auto mul = std::make_shared(reshape1, mul_const); - auto add_const = std::make_shared(ngPrc, convInputShape, shift); - auto add = std::make_shared(mul, add_const); - - convInputShape = {1, inputChannels, 1, inputShape[1] / inputChannels}; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape2 = std::make_shared(mul, reshapePattern2, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.1f, 0.1f); - auto conv = ov::test::utils::make_convolution(reshape2, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape3 = std::make_shared(conv, reshapePattern3, false); - - function = std::make_shared(reshape3, params, "ScaleShiftBeforeConvTest"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp b/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp deleted file mode 100644 index ae74a084d4cefa..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "shared_test_classes/subgraph/softsign.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string SoftsignTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::vector inputShape; - std::string targetDevice; - std::map configuration; - std::tie(netPrecision, targetDevice, configuration, inputShape) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void SoftsignTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - std::vector inputShape; - std::tie(netPrecision, targetDevice, tempConfig, inputShape) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto abs = std::make_shared(params[0]); - - auto const_1 = ov::op::v0::Constant::create(ngPrc, ngraph::Shape{}, {1}); - auto const_neg_1 = ov::op::v0::Constant::create(ngPrc, ngraph::Shape{}, {-1}); - - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); - - auto mul = std::make_shared(power, params[0]); - ngraph::ResultVector results{ std::make_shared(mul) }; - function = std::make_shared(results, params, "SoftSignTest"); -} - -void SoftsignTest::Run() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - LoadNetwork(); - GenerateInputs(); - Infer(); - - function = GenerateNgraphFriendlySoftSign(); - Validate(); -} - -std::shared_ptr SoftsignTest::GenerateNgraphFriendlySoftSign() { - InferenceEngine::Precision netPrecision = std::get<0>(this->GetParam()); - std::vector inputShape = std::get<3>(this->GetParam()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto abs = std::make_shared(params[0]); - auto constant_0 = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, { 1 }); - auto add = std::make_shared(abs, constant_0); - auto constant_1 = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, { -1 }); - auto power = std::make_shared(add, constant_1); - auto mul = std::make_shared(power, params[0]); - - ngraph::ResultVector results{ std::make_shared(mul) }; - return std::make_shared(results, params, "SoftSignTest"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp deleted file mode 100644 index 1c654dac126b7a..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/split_concat_multi_inputs.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string SplitConcatMultiInputsTest::getTestCaseName(testing::TestParamInfo obj) { - std::vector inputShape; - size_t splitsNum; - std::map config; - InferenceEngine::Precision netPrecision; - std::string targetName; - bool withFC; - std::tie(netPrecision, targetName, config, inputShape, splitsNum, withFC) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "SplitsN=" << splitsNum << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetName << "_"; - result << "FC=" << withFC; - for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void SplitConcatMultiInputsTest::SetUp() { - std::vector inputShape; - size_t splitsNum; - std::map tempConfig; - InferenceEngine::Precision netPrecision; - bool withFC; - std::tie(netPrecision, targetDevice, tempConfig, inputShape, splitsNum, withFC) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - inputShape[1] *= splitsNum; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[0], split_axis_op, splitsNum); - - ngraph::OutputVector concatInputs = split->outputs(); - - auto concat = std::make_shared(concatInputs, 1); - - if (withFC) { - auto mul_const = ov::test::utils::deprecated::make_constant(ngPrc, { 10, inputShape[1] }, - ov::test::utils::generate_float_numbers(10 * inputShape[1], -0.2f, 0.2f), false); - auto matmul = std::make_shared(concat, mul_const, false, true); - function = std::make_shared(matmul, params, "SplitConcatMultiInputs"); - } else { - function = std::make_shared(concat, params, "SplitConcatMultiInputs"); - } -} - -InferenceEngine::Blob::Ptr SplitConcatMultiInputsTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp deleted file mode 100644 index 4a8ad4bb219f99..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/split_conv.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/convolution.hpp" - -namespace SubgraphTestsDefinitions { - -std::string SplitConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t inputChannels; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, inputChannels, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr SplitConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -2.0f, 2.0f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void SplitConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t inputChannels; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, inputChannels, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - const auto splitsNum = 2; - const auto splitAxis = 1; - auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{splitAxis}); - auto split = std::make_shared(params[0], split_axis_op, splitsNum); - - auto relu1 = std::make_shared(split->output(0)); - - auto relu2 = std::make_shared(split->output(1)); - std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels / 2}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(relu2, reshapePattern1, false); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto conv = ov::test::utils::make_convolution(reshape1, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; - std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - - ngraph::ResultVector results{std::make_shared(relu1), - std::make_shared(reshape2)}; - function = std::make_shared(results, params, "SplitConvTest"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp deleted file mode 100644 index b885ceb2343c2a..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/split_relu.hpp" - -namespace SubgraphTestsDefinitions { - std::string SplitRelu::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector> input; - std::vector connect_input; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map additional_config; - std::tie(input, connect_input, netPrecision, targetName, additional_config) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(input[0]) << "_"; - results << "ConnectInput=" << ov::test::utils::vec2str(connect_input) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - return results.str(); - } - - void SplitRelu::SetUp() { - std::vector> inputs; - std::vector connect_index; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputs, connect_index, netPrecision, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input; - for (auto&& shape : inputs) { - input.push_back(std::make_shared(ngPrc, ov::Shape(shape))); - } - auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(input[0], split_axis_op, 4); - ngraph::ResultVector results; - - for (size_t i : connect_index) { - auto relu = std::make_shared(split->output(i)); - results.push_back(std::make_shared(relu)); - } - function = std::make_shared(results, input, "split_relu"); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp deleted file mode 100644 index 52506fc08fa2eb..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/split_trivial_permute_concat.hpp" -#include "common_test_utils/node_builders/activation.hpp" - -namespace SubgraphTestsDefinitions { - std::string SplitTrivialPermuteConcatTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetName; - std::vector inputShape; - size_t splitAxis; - size_t concatAxis; - std::tie(netPrecision, targetName, inputShape, splitAxis, concatAxis, std::ignore) = obj.param; - std::ostringstream results; - - results << "netPRC=" << netPrecision.name() << "_"; - results << "IS="; - for (size_t size : inputShape) - results << size << "_"; - results << "SA=" << splitAxis << "_"; - results << "CA=" << concatAxis << "_"; - results << "targetDevice=" << targetName; - return results.str(); - } - - void SplitTrivialPermuteConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::vector inputShape; - size_t splitAxis; - size_t concatAxis; - std::map config; - std::tie(netPrecision, targetDevice, inputShape, splitAxis, concatAxis, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{static_cast(splitAxis)}); - auto split = std::make_shared(input[0], split_axis_op, 2); - - auto permute_in_params = std::make_shared(ngraph::element::i64, - ngraph::Shape{ 4 }, - ngraph::Shape{ {0, 3, 2, 1} }); - auto permute_0 = std::make_shared(split->output(0), permute_in_params); - auto permute_1 = std::make_shared(split->output(1), permute_in_params); - - auto concat = std::make_shared(ngraph::OutputVector{ permute_0, permute_1 }, concatAxis); - auto act = ov::test::utils::make_activation(concat, ngPrc, ngraph::helpers::ActivationTypes::Relu); - function = std::make_shared(act, input, "split_trivial_permute_concat"); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp b/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp deleted file mode 100644 index 4430174dd21d82..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/strided_slice.hpp" - -namespace SubgraphTestsDefinitions { - -std::string StridedSliceTest::getTestCaseName(const testing::TestParamInfo &obj) { - StridedSliceSpecificParams params; - InferenceEngine::Precision netPrc; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::map additionalConfig; - std::tie(params, netPrc, inPrc, outPrc, inLayout, outLayout, targetName, additionalConfig) = obj.param; - std::ostringstream result; - result << "inShape=" << ov::test::utils::vec2str(params.inputShape) << "_"; - result << "netPRC=" << netPrc.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "begin=" << ov::test::utils::vec2str(params.begin) << "_"; - result << "end=" << ov::test::utils::vec2str(params.end) << "_"; - result << "stride=" << ov::test::utils::vec2str(params.strides) << "_"; - result << "begin_m=" << ov::test::utils::vec2str(params.beginMask) << "_"; - result << "end_m=" << ov::test::utils::vec2str(params.endMask) << "_"; - result << "new_axis_m=" << (params.newAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.newAxisMask)) << "_"; - result << "shrink_m=" << (params.shrinkAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.shrinkAxisMask)) << "_"; - result << "ellipsis_m=" << (params.ellipsisAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.ellipsisAxisMask)) << "_"; - result << "trgDev=" << targetName; - for (auto const& configItem : additionalConfig) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -void StridedSliceTest::SetUp() { - StridedSliceSpecificParams ssParams; - InferenceEngine::Precision netPrecision; - std::map additionalConfig; - std::tie(ssParams, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice, additionalConfig) = this->GetParam(); - configuration.insert(additionalConfig.begin(), additionalConfig.end()); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(ssParams.inputShape))}; - auto relu = std::make_shared(params[0]); - - ov::Shape constShape = {ssParams.begin.size()}; - auto beginNode = std::make_shared(ov::element::i64, constShape, ssParams.begin.data()); - auto endNode = std::make_shared(ov::element::i64, constShape, ssParams.end.data()); - auto strideNode = std::make_shared(ov::element::i64, constShape, ssParams.strides.data()); - - auto ss = std::make_shared(relu, - beginNode, - endNode, - strideNode, - ssParams.beginMask, - ssParams.endMask, - ssParams.newAxisMask, - ssParams.shrinkAxisMask, - ssParams.ellipsisAxisMask); - - ngraph::ResultVector results{std::make_shared(ss)}; - function = std::make_shared(results, params, "strided_slice"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp deleted file mode 100644 index 04c2ac032b9d87..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/stridedslice_concat.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string SliceConcatTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - StridedSliceParams sliceParams; - std::tie(netPrecision, targetDevice, configuration, sliceParams) = obj.param; - std::vector inputShape, begin, end, strides, beginMask, endMask; - std::tie(inputShape, begin, end, strides, beginMask, endMask) = sliceParams; - - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "B=" << ov::test::utils::vec2str(begin) << "_"; - result << "E=" << ov::test::utils::vec2str(end) << "_"; - result << "S=" << ov::test::utils::vec2str(strides) << "_"; - result << "BM=" << ov::test::utils::vec2str(beginMask) << "_"; - result << "EM=" << ov::test::utils::vec2str(endMask) << "_"; - return result.str(); -} - -void SliceConcatTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - StridedSliceParams sliceParams; - std::tie(netPrecision, targetDevice, tempConfig, sliceParams) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - std::vector inputShape, begin, end, strides, beginMask, endMask; - std::tie(inputShape, begin, end, strides, beginMask, endMask) = sliceParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - size_t input_size = std::accumulate(std::begin(inputShape), std::end(inputShape), 1, std::multiplies()); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, input_size})}; - - ngraph::Output input = params[0]; - if (inputShape[0] != 1 || inputShape.size() != 2) { - input = std::make_shared(params[0], - ov::test::utils::deprecated::make_constant(ngraph::element::i64, ngraph::Shape{inputShape.size()}, inputShape), false); - } - - ov::Shape constShape = {begin.size()}; - auto beginNode = std::make_shared(ov::element::i64, constShape, begin.data()); - auto endNode = std::make_shared(ov::element::i64, constShape, end.data()); - auto strideNode = std::make_shared(ov::element::i64, constShape, strides.data()); - - auto ss = std::make_shared(input, - beginNode, - endNode, - strideNode, - beginMask, - endMask, - std::vector(inputShape.size(), 0), - std::vector(inputShape.size(), 0), - std::vector(inputShape.size(), 0)); - - ngraph::Shape const_shape(inputShape.size(), 1); - const_shape.back() = 32; - auto const_input = ov::test::utils::deprecated::make_constant(ngPrc, const_shape, std::vector{}, true); - auto concat = std::make_shared(ov::NodeVector{const_input, ss}, inputShape.size() - 1); - - function = std::make_shared(concat, params, "StridedSliceConcatTest"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp deleted file mode 100644 index 56b1e2a980785e..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/stridedslice_conv.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/convolution.hpp" - -namespace SubgraphTestsDefinitions { - -std::string SliceConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map configuration; - size_t outputChannels; - convParams convolutionParams; - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(netPrecision, targetDevice, configuration, convolutionParams, outputChannels) = obj.param; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "S=" << stride << "_"; - result << "OC=" << outputChannels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - for (auto const& configItem : configuration) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); -} - -InferenceEngine::Blob::Ptr SliceConvTest::GenerateInput(const InferenceEngine::InputInfo& info) const { - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - auto* rawBlobDataPtr = blob->buffer().as(); - std::vector values = ov::test::utils::generate_float_numbers(blob->size(), -2.0f, 2.0f); - for (size_t i = 0; i < blob->size(); i++) { - rawBlobDataPtr[i] = values[i]; - } - return blob; -} - -void SliceConvTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::map tempConfig; - convParams convolutionParams; - size_t outputChannels; - std::tie(netPrecision, targetDevice, tempConfig, convolutionParams, outputChannels) = this->GetParam(); - configuration.insert(tempConfig.begin(), tempConfig.end()); - - std::vector inputShape; - std::vector kernelShape; - size_t stride; - std::tie(inputShape, kernelShape, stride) = convolutionParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - ov::Shape constShape = {4}; - auto beginNode = std::make_shared(ov::element::i64, constShape, std::vector{0, 0, 0, 64}); - auto endNode = std::make_shared(ov::element::i64, constShape, std::vector{1, 1, 1, 128}); - auto strideNode = std::make_shared(ov::element::i64, constShape, std::vector{1, 1, 1, 1}); - auto ss = std::make_shared(params[0], - beginNode, - endNode, - strideNode, - std::vector{1, 1, 1, 0}, - std::vector{1, 1, 1, 0}, - std::vector{0, 0, 0, 0}, - std::vector{0, 0, 0, 0}, - std::vector{0, 0, 0, 0}); - - auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * inputShape[1] * kernelShape[0] * kernelShape[1], - -0.2f, 0.2f); - auto conv = ov::test::utils::make_convolution(ss, - ngPrc, - {kernelShape[0], kernelShape[1]}, - {kernelShape[0] > 1 ? stride : 1, stride}, - {0, 0}, - { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); - - function = std::make_shared(conv, params, "StridedSliceConvTest"); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp b/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp deleted file mode 100644 index bd69b837380f12..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/transpose_conv_transpose_squeeze.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string TransposeConvTest::getTestCaseName(const testing::TestParamInfo& obj) { - ConvParams convParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::map config; - std::tie(convParams, netPrecision, inputShapes, targetDevice, config) = obj.param; - - std::vector inputArg; - std::vector kernelShape; - std::vector strides; - size_t inputChannels; - size_t outputChannels; - std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice; - for (auto const& configItem : config) { - result << "_configItem=" << configItem.first << "_" << configItem.second; - } - result << "_KERNEL=" << ov::test::utils::vec2str(kernelShape) << "_"; - result << "STRIDES=" << ov::test::utils::vec2str(strides) << "_"; - result << "IC=" << inputChannels << "_"; - result << "OC=" << outputChannels; - return result.str(); -} - -void TransposeConvTest::SetUp() { - ConvParams conv_params; - std::vector input_shape; - std::map config; - auto net_precision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(conv_params, net_precision, input_shape, targetDevice, config) = this->GetParam(); - configuration.insert(config.begin(), config.end()); - - std::vector kernel_shape, strides; - size_t input_channels, output_channels; - std::tie(kernel_shape, strides, input_channels, output_channels) = conv_params; - auto ng_prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(net_precision); - ov::ParameterVector params {std::make_shared(ng_prc, ov::Shape(input_shape))}; - - std::vector nchw_order = { 0, 3, 1, 2 }; - std::vector nhwc_order = { 0, 2, 3, 1 }; - std::vector conv_input_shape = {1, 1, input_shape[0] * input_shape[1] / input_channels, input_channels}; - auto reshape_pattern = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{conv_input_shape.size()}, conv_input_shape); - auto reshape = std::make_shared(params[0], reshape_pattern, false); - - const auto input_order1 = std::make_shared(ngraph::element::i64, - ngraph::Shape({conv_input_shape.size()}), - nchw_order); - auto transpose1 = std::make_shared(reshape, input_order1); - - float weight_val = 0.02; - auto filter_weights_node = ov::test::utils::deprecated::make_constant(ng_prc, {output_channels, input_channels, kernel_shape[0], kernel_shape[1]}, - { weight_val }); - - auto conv = std::make_shared(transpose1, filter_weights_node, strides, std::vector{ 0, 0 }, - std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ov::op::PadType::VALID); - - const auto input_order2 = std::make_shared(ngraph::element::i64, - ngraph::Shape({conv_input_shape.size()}), - nhwc_order); - auto transpose2 = std::make_shared(conv, input_order2); - - auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto squeeze = std::make_shared(transpose2, constant_squeeze); - - function = std::make_shared(squeeze, params, "transposeConv"); -} - -InferenceEngine::Blob::Ptr TransposeConvTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp deleted file mode 100644 index ebab5fe187f030..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/trivial_concat.hpp" -#include "common_test_utils/node_builders/activation.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace SubgraphTestsDefinitions { - -std::string TrivialConcatLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inputShapes; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::map config; - std::tie(inputShapes, netPrecision, targetName, config) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetName << "_"; - return result.str(); -} - -void TrivialConcatLayerTest::SetUp() { - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputShape, netPrecision, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - int axis = inputShape.size() - 2; - size_t total_size = std::accumulate(inputShape.begin(), inputShape.end(), static_cast(1), std::multiplies()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, total_size})}; - - auto input_relu = ov::test::utils::make_activation(params[0], ngPrc, ngraph::helpers::ActivationTypes::Relu); - - auto input_reshape_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{inputShape.size()}, std::vector(inputShape)); - auto input = std::make_shared(input_relu, input_reshape_pattern, false); - - auto constant_values = ov::test::utils::generate_float_numbers(total_size, 15.5f, 16.1f); - auto constant = ov::test::utils::deprecated::make_constant(ngPrc, std::vector({1, total_size}), constant_values); - - auto first_reshape = std::make_shared(constant, input_reshape_pattern, false); - - auto concat = std::make_shared(ngraph::OutputVector({first_reshape, input}), axis); - - auto final_reshape_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{2}, std::vector({1, 2 * total_size})); - auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); - - auto act = ov::test::utils::make_activation(final_reshape, ngPrc, ngraph::helpers::ActivationTypes::Relu); - - ngraph::ResultVector results{std::make_shared(act)}; - function = std::make_shared(results, params, "trivial_concat"); -} -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp b/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp deleted file mode 100644 index f23cacf31b1e98..00000000000000 --- a/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/subgraph/two_fake_quantize_to_fullyconnected.hpp" - -#include "common_test_utils/node_builders/constant.hpp" -#include "common_test_utils/node_builders/fake_quantize.hpp" - -namespace SubgraphTestsDefinitions { - -std::string FakeQuantizeSubgraphTest::getTestCaseName(const testing::TestParamInfo& obj) { - fqSpecificParams fqParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::pair> config; - bool biases = false; - std::tie(fqParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice, config, biases) = obj.param; - std::vector levels; - std::vector> constShape; - std::vector fqDirectArgs; - std::vector inputArg; - std::tie(levels, constShape, fqDirectArgs, inputArg) = fqParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "CS=" << ov::test::utils::vec2str(constShape) << "_"; - result << "LEVELS=" << ov::test::utils::vec2str(levels) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "biases=" << biases << "_"; - result << "trgDev=" << targetDevice; - if (!config.first.empty()) { - result << "_targetConfig=" << config.first; - } - if (!fqDirectArgs.empty()) { - result << "_fqArgs=" << fqDirectArgs[0] << "_" << fqDirectArgs[1] << "_" << fqDirectArgs[2] << "_" << fqDirectArgs[3]; - } - if (inputArg.size() == 3) { - result << "_inputArg=" << inputArg[0] << "_" << inputArg[1] << "_" << inputArg[2]; - } - return result.str(); -} - -void FakeQuantizeSubgraphTest::SetUp() { - fqSpecificParams fqParams; - std::vector inputShape; - std::pair> config; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - bool biases = false; - std::tie(fqParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice, config, biases) = this->GetParam(); - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector levels; - std::vector> constShape; - std::vector fqDirectArg; - std::vector inputArg; - std::tie(levels, constShape, fqDirectArg, inputArg) = fqParams; - if (inputArg.size() == 3) { - inputDataMin = inputArg[0]; - inputDataMax = inputArg[1]; - inputDataResolution = inputArg[2]; - } - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - const int seed = 0; - std::mt19937 gen(seed); - - - auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable { - std::vector res; - - std::uniform_real_distribution dist(min, max); - for (std::size_t i = 0; i < vec_len; i++) - res.emplace_back(static_cast(dist(gen))); - - return res; - }; - - - auto weightsRowNum = constShape[1][0]; - auto weightsColNum = inputShape[1]; - auto weightsData = generateFloatNumbers(weightsRowNum * weightsColNum, inputDataMin, inputDataMax); - auto const_param = ov::test::utils::deprecated::make_constant(ngPrc, { constShape[1][0], inputShape[1] }, { 1.0f }); - auto inputMinRange = std::vector{}; - auto inputMaxRange = std::vector{}; - auto channelDataSize = constShape[1]; - - if (channelDataSize[0] == 1) { - // If per tensor data needs to be provided - inputMinRange.push_back(inputDataMin); - inputMaxRange.push_back(inputDataMax); - } else if (channelDataSize[0] == weightsRowNum) { - // If per channel data needs to be provided - for (size_t i = 0; i < weightsRowNum; ++i) { - auto minChannelVal = std::numeric_limits::max(); - auto maxChannelVal = std::numeric_limits::min(); - for (size_t j = 0; j < weightsColNum; ++j) { - minChannelVal = std::min(minChannelVal, weightsData[i * weightsColNum + j]); - maxChannelVal = std::max(maxChannelVal, weightsData[i * weightsColNum + j]); - } - - inputMinRange.push_back(minChannelVal); - inputMaxRange.push_back(maxChannelVal); - } - } else { - FAIL() << "Invalid test configuration"; - } - - auto lowNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, channelDataSize, inputMinRange, false); - auto highNode = ov::test::utils::deprecated::make_constant(ngraph::element::f32, channelDataSize, inputMaxRange, false); - - auto inputFQNode = ov::test::utils::make_fake_quantize(params[0], ngraph::element::f32, levels[0], constShape[0], - { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - - auto weightsFQNode = std::make_shared(const_param, - lowNode, highNode, lowNode, highNode, levels[1]); - - auto inputFQ = std::dynamic_pointer_cast(inputFQNode); - auto weightsFQ = std::dynamic_pointer_cast(weightsFQNode); - auto matmul = std::make_shared(inputFQ, weightsFQ, false, true); - std::shared_ptr biases_node; - if (biases) { - auto const_bias = ov::test::utils::deprecated::make_constant(ngPrc, {1, constShape[1][0]}, std::vector{ -1.0f }); - biases_node = std::make_shared(matmul, const_bias); - } else { - biases_node = matmul; - } - - auto sigmoid = std::make_shared(biases_node); - ngraph::ResultVector results{std::make_shared(sigmoid)}; - if (biases) { - auto sigmoid_2 = std::make_shared(inputFQ); - results.push_back(std::make_shared(sigmoid_2)); - } - function = std::make_shared(results, params, "fakeQuantizeSubgraph"); - configuration = config.second; -} - -InferenceEngine::Blob::Ptr FakeQuantizeSubgraphTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} -} // namespace SubgraphTestsDefinitions From dca5f0d1f7ab446bcba1a3409cabfc41e617b335 Mon Sep 17 00:00:00 2001 From: Alicja Miloszewska Date: Fri, 12 Jan 2024 15:03:56 +0100 Subject: [PATCH 28/43] [OV JS] Refactor GetClassConstructors and Inits (#21966) * Add reg_class function * Remove init methods * Remove PrePostProcessor Init * Rename get_class_constructor to get_class * Move FunctionReference initialization * Remove ptr to FunctionReferences --- src/bindings/js/node/include/addon.hpp | 28 +++++++++------ .../js/node/include/compiled_model.hpp | 6 +--- src/bindings/js/node/include/core_wrap.hpp | 7 ++-- .../js/node/include/infer_request.hpp | 7 +--- src/bindings/js/node/include/model_wrap.hpp | 7 +--- src/bindings/js/node/include/node_output.hpp | 14 ++------ .../js/node/include/partial_shape_wrap.hpp | 6 +--- .../preprocess/pre_post_process_wrap.hpp | 6 +--- src/bindings/js/node/include/tensor.hpp | 7 +--- src/bindings/js/node/src/addon.cpp | 35 +++++++++++------- src/bindings/js/node/src/async_reader.cpp | 2 +- src/bindings/js/node/src/compiled_model.cpp | 18 ++-------- src/bindings/js/node/src/core_wrap.cpp | 22 +++--------- src/bindings/js/node/src/infer_request.cpp | 18 ++-------- src/bindings/js/node/src/model_wrap.cpp | 18 ++-------- src/bindings/js/node/src/node_output.cpp | 36 ++++--------------- .../js/node/src/partial_shape_wrap.cpp | 18 ++-------- .../src/preprocess/pre_post_process_wrap.cpp | 14 +------- .../js/node/src/preprocess/preprocess.cpp | 5 +-- src/bindings/js/node/src/tensor.cpp | 18 ++-------- 20 files changed, 82 insertions(+), 210 deletions(-) diff --git a/src/bindings/js/node/include/addon.hpp b/src/bindings/js/node/include/addon.hpp index 460129d7a0fe2d..f9312978d0b87d 100644 --- a/src/bindings/js/node/include/addon.hpp +++ b/src/bindings/js/node/include/addon.hpp @@ -5,17 +5,25 @@ #include +typedef Napi::Function (*Prototype)(Napi::Env); + /** @brief A structure with data that will be associated with the instance of the ov.js node-addon. */ struct AddonData { - Napi::FunctionReference* compiled_model_prototype; - Napi::FunctionReference* core_prototype; - Napi::FunctionReference* const_output_prototype; - Napi::FunctionReference* infer_request_prototype; - Napi::FunctionReference* model_prototype; - Napi::FunctionReference* output_prototype; - Napi::FunctionReference* partial_shape_prototype; - Napi::FunctionReference* ppp_prototype; - Napi::FunctionReference* tensor_prototype; + Napi::FunctionReference compiled_model; + Napi::FunctionReference core; + Napi::FunctionReference const_output; + Napi::FunctionReference infer_request; + Napi::FunctionReference model; + Napi::FunctionReference output; + Napi::FunctionReference partial_shape; + Napi::FunctionReference ppp; + Napi::FunctionReference tensor; }; -Napi::Object init_all(Napi::Env env, Napi::Object exports); +void init_class(Napi::Env env, + Napi::Object exports, + std::string class_name, + Prototype func, + Napi::FunctionReference& reference); + +Napi::Object init_module(Napi::Env env, Napi::Object exports); diff --git a/src/bindings/js/node/include/compiled_model.hpp b/src/bindings/js/node/include/compiled_model.hpp index 71bdd97b056283..b6de5e5a9d62d7 100644 --- a/src/bindings/js/node/include/compiled_model.hpp +++ b/src/bindings/js/node/include/compiled_model.hpp @@ -19,11 +19,7 @@ class CompiledModelWrap : public Napi::ObjectWrap { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript CompiledModel class. */ - static Napi::Function get_class_constructor(Napi::Env env); - /** @brief This method is called during initialization of OpenVino native add-on. - * It exports JavaScript CompiledModel class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); /** * @brief Creates JavaScript CompiledModel object and wraps inside of it ov::CompiledModel object. diff --git a/src/bindings/js/node/include/core_wrap.hpp b/src/bindings/js/node/include/core_wrap.hpp index 8c0dd7d99eb44e..464456269467c9 100644 --- a/src/bindings/js/node/include/core_wrap.hpp +++ b/src/bindings/js/node/include/core_wrap.hpp @@ -4,6 +4,7 @@ #pragma once #include + #include #include "async_reader.hpp" @@ -22,11 +23,7 @@ class CoreWrap : public Napi::ObjectWrap { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript Core class. */ - static Napi::Function get_class_constructor(Napi::Env env); - /** @brief This method is called during initialization of OpenVino native add-on. - * It exports JavaScript Core class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); /** * @brief Reads a model synchronously. diff --git a/src/bindings/js/node/include/infer_request.hpp b/src/bindings/js/node/include/infer_request.hpp index 03f5abd95414dd..1aaee2107af29e 100644 --- a/src/bindings/js/node/include/infer_request.hpp +++ b/src/bindings/js/node/include/infer_request.hpp @@ -32,12 +32,7 @@ class InferRequestWrap : public Napi::ObjectWrap { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript InferRequest class. */ - static Napi::Function get_class_constructor(Napi::Env env); - - /** @brief This method is called during initialization of OpenVino native add-on. - * It exports JavaScript InferRequest class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); void set_infer_request(const ov::InferRequest& infer_request); /** diff --git a/src/bindings/js/node/include/model_wrap.hpp b/src/bindings/js/node/include/model_wrap.hpp index 8523dbaaabf65f..cedb965045b1f0 100644 --- a/src/bindings/js/node/include/model_wrap.hpp +++ b/src/bindings/js/node/include/model_wrap.hpp @@ -23,12 +23,7 @@ class ModelWrap : public Napi::ObjectWrap { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript Model class. */ - static Napi::Function get_class_constructor(Napi::Env env); - - /** @brief This method is called during initialization of OpenVino native add-on. - * It exports JavaScript Model class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); void set_model(const std::shared_ptr& model); /** diff --git a/src/bindings/js/node/include/node_output.hpp b/src/bindings/js/node/include/node_output.hpp index 6c78c133d05eb7..e7038690fe05cf 100644 --- a/src/bindings/js/node/include/node_output.hpp +++ b/src/bindings/js/node/include/node_output.hpp @@ -21,12 +21,7 @@ class Output : public Napi::ObjectWrap> { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript Output class. */ - static Napi::Function get_class_constructor(Napi::Env env); - - /** @brief This method is called during initialization of OpenVino native add-on. - * It exports JavaScript Output class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); ov::Output get_output() const; @@ -54,12 +49,7 @@ class Output : public Napi::ObjectWrap> { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript Output class. */ - static Napi::Function get_class_constructor(Napi::Env env); - - /** @brief This method is called during initialization of OpenVino native add-on. - * It exports JavaScript Output class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); ov::Output get_output() const; diff --git a/src/bindings/js/node/include/partial_shape_wrap.hpp b/src/bindings/js/node/include/partial_shape_wrap.hpp index cb8de11e22f2d8..d52baf3ab01d67 100644 --- a/src/bindings/js/node/include/partial_shape_wrap.hpp +++ b/src/bindings/js/node/include/partial_shape_wrap.hpp @@ -21,11 +21,7 @@ class PartialShapeWrap : public Napi::ObjectWrap { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript PartialShape class. */ - static Napi::Function get_class_constructor(Napi::Env env); - /** @brief This method is called during initialization of OpenVino node-addon. - * It exports JavaScript PartialShape class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); /** * @brief Creates JavaScript PartialShape object and wraps inside of it ov::PartialShape object. diff --git a/src/bindings/js/node/include/preprocess/pre_post_process_wrap.hpp b/src/bindings/js/node/include/preprocess/pre_post_process_wrap.hpp index 4d40f4b34c4472..a751a3c7588472 100644 --- a/src/bindings/js/node/include/preprocess/pre_post_process_wrap.hpp +++ b/src/bindings/js/node/include/preprocess/pre_post_process_wrap.hpp @@ -26,11 +26,7 @@ class PrePostProcessorWrap : public Napi::ObjectWrap { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript PrePostProcessor class. */ - static Napi::Function get_class_constructor(Napi::Env env); - /** @brief This method is called during initialization of OpenVino native add-on. - * It exports JavaScript PrePostProcessor class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); Napi::Value input(const Napi::CallbackInfo& info); diff --git a/src/bindings/js/node/include/tensor.hpp b/src/bindings/js/node/include/tensor.hpp index 6406194b1727ce..e531be6671a9bf 100644 --- a/src/bindings/js/node/include/tensor.hpp +++ b/src/bindings/js/node/include/tensor.hpp @@ -34,12 +34,7 @@ class TensorWrap : public Napi::ObjectWrap { * @param env The environment in which to construct a JavaScript class. * @return Napi::Function representing the constructor function for the Javascript Tensor class. */ - static Napi::Function get_class_constructor(Napi::Env env); - - /** @brief This method is called during initialization of OpenVino native add-on. - * It exports JavaScript Tensor class. - */ - static Napi::Object init(Napi::Env env, Napi::Object exports); + static Napi::Function get_class(Napi::Env env); ov::Tensor get_tensor() const; void set_tensor(const ov::Tensor& tensor); diff --git a/src/bindings/js/node/src/addon.cpp b/src/bindings/js/node/src/addon.cpp index 584ac93aee16cc..3b619104edb0a9 100644 --- a/src/bindings/js/node/src/addon.cpp +++ b/src/bindings/js/node/src/addon.cpp @@ -16,25 +16,36 @@ #include "preprocess/preprocess.hpp" #include "tensor.hpp" +void init_class(Napi::Env env, + Napi::Object exports, + std::string class_name, + Prototype func, + Napi::FunctionReference& reference) { + const auto& prototype = func(env); + + reference = Napi::Persistent(prototype); + exports.Set(class_name, prototype); +} + /** @brief Initialize native add-on */ -Napi::Object init_all(Napi::Env env, Napi::Object exports) { +Napi::Object init_module(Napi::Env env, Napi::Object exports) { auto addon_data = new AddonData(); env.SetInstanceData(addon_data); - ModelWrap::init(env, exports); - CoreWrap::init(env, exports); - CompiledModelWrap::init(env, exports); - InferRequestWrap::init(env, exports); - TensorWrap::init(env, exports); - Output::init(env, exports); - Output::init(env, exports); - PartialShapeWrap::init(env, exports); - + init_class(env, exports, "Model", &ModelWrap::get_class, addon_data->model); + init_class(env, exports, "Core", &CoreWrap::get_class, addon_data->core); + init_class(env, exports, "CompiledModel", &CompiledModelWrap::get_class, addon_data->compiled_model); + init_class(env, exports, "InferRequest", &InferRequestWrap::get_class, addon_data->infer_request); + init_class(env, exports, "Tensor", &TensorWrap::get_class, addon_data->tensor); + init_class(env, exports, "Output", &Output::get_class, addon_data->output); + init_class(env, exports, "ConstOutput", &Output::get_class, addon_data->const_output); + init_class(env, exports, "PartialShape", &PartialShapeWrap::get_class, addon_data->partial_shape); + preprocess::init(env, exports); element::init(env, exports); - + return exports; } /** @brief Register and initialize native add-on */ -NODE_API_MODULE(addon_openvino, init_all) +NODE_API_MODULE(addon_openvino, init_module) diff --git a/src/bindings/js/node/src/async_reader.cpp b/src/bindings/js/node/src/async_reader.cpp index 8c44de7fe236ae..f9e104d67053fa 100644 --- a/src/bindings/js/node/src/async_reader.cpp +++ b/src/bindings/js/node/src/async_reader.cpp @@ -14,7 +14,7 @@ void ReaderWorker::Execute() { void ReaderWorker::OnOK() { Napi::HandleScope scope(Env()); - Napi::Object mw = ModelWrap::get_class_constructor(Env()).New({}); + Napi::Object mw = ModelWrap::get_class(Env()).New({}); ModelWrap* m = Napi::ObjectWrap::Unwrap(mw); m->set_model(_model); diff --git a/src/bindings/js/node/src/compiled_model.cpp b/src/bindings/js/node/src/compiled_model.cpp index a5eba758a68ad7..82deba2a7d93ea 100644 --- a/src/bindings/js/node/src/compiled_model.cpp +++ b/src/bindings/js/node/src/compiled_model.cpp @@ -12,7 +12,7 @@ CompiledModelWrap::CompiledModelWrap(const Napi::CallbackInfo& info) : Napi::ObjectWrap(info), _compiled_model{} {} -Napi::Function CompiledModelWrap::get_class_constructor(Napi::Env env) { +Napi::Function CompiledModelWrap::get_class(Napi::Env env) { return DefineClass(env, "CompiledModel", {InstanceMethod("createInferRequest", &CompiledModelWrap::create_infer_request), @@ -22,25 +22,13 @@ Napi::Function CompiledModelWrap::get_class_constructor(Napi::Env env) { InstanceAccessor<&CompiledModelWrap::get_outputs>("outputs")}); } -Napi::Object CompiledModelWrap::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->compiled_model_prototype = ref; - - exports.Set("CompiledModel", prototype); - return exports; -} - Napi::Object CompiledModelWrap::wrap(Napi::Env env, ov::CompiledModel compiled_model) { Napi::HandleScope scope(env); - const auto prototype = env.GetInstanceData()->compiled_model_prototype; + const auto& prototype = env.GetInstanceData()->compiled_model; if (!prototype) { OPENVINO_THROW("Invalid pointer to CompiledModel prototype."); } - auto obj = prototype->New({}); + auto obj = prototype.New({}); const auto cm = Napi::ObjectWrap::Unwrap(obj); cm->_compiled_model = compiled_model; return obj; diff --git a/src/bindings/js/node/src/core_wrap.cpp b/src/bindings/js/node/src/core_wrap.cpp index 61b21e57496d6e..ce9fe5121ff21d 100644 --- a/src/bindings/js/node/src/core_wrap.cpp +++ b/src/bindings/js/node/src/core_wrap.cpp @@ -10,7 +10,7 @@ CoreWrap::CoreWrap(const Napi::CallbackInfo& info) : Napi::ObjectWrap(info), _core{} {} -Napi::Function CoreWrap::get_class_constructor(Napi::Env env) { +Napi::Function CoreWrap::get_class(Napi::Env env) { return DefineClass(env, "Core", { @@ -21,18 +21,6 @@ Napi::Function CoreWrap::get_class_constructor(Napi::Env env) { }); } -Napi::Object CoreWrap::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->core_prototype = ref; - - exports.Set("Core", prototype); - return exports; -} - Napi::Value CoreWrap::read_model_sync(const Napi::CallbackInfo& info) { try { ReadModelArgs* args; @@ -66,8 +54,8 @@ Napi::Value CoreWrap::read_model_async(const Napi::CallbackInfo& info) { Napi::Value CoreWrap::compile_model_sync(const Napi::CallbackInfo& info, const Napi::Object& model, const Napi::String& device) { - const auto model_prototype = info.Env().GetInstanceData()->model_prototype; - if (model_prototype && model.InstanceOf(model_prototype->Value().As())) { + const auto& model_prototype = info.Env().GetInstanceData()->model; + if (model_prototype && model.InstanceOf(model_prototype.Value().As())) { const auto m = Napi::ObjectWrap::Unwrap(model); const auto& compiled_model = _core.compile_model(m->get_model(), device); return CompiledModelWrap::wrap(info.Env(), compiled_model); @@ -142,7 +130,7 @@ void compileModelThreadModel(TsfnContextModel* context) { auto callback = [](Napi::Env env, Napi::Function, TsfnContextModel* context) { Napi::HandleScope scope(env); - auto obj = CompiledModelWrap::get_class_constructor(env).New({}); + auto obj = CompiledModelWrap::get_class(env).New({}); auto cm = Napi::ObjectWrap::Unwrap(obj); cm->set_compiled_model(context->_compiled_model); @@ -159,7 +147,7 @@ void compileModelThreadPath(TsfnContextPath* context) { auto callback = [](Napi::Env env, Napi::Function, TsfnContextPath* context) { Napi::HandleScope scope(env); - auto obj = CompiledModelWrap::get_class_constructor(env).New({}); + auto obj = CompiledModelWrap::get_class(env).New({}); auto cm = Napi::ObjectWrap::Unwrap(obj); cm->set_compiled_model(context->_compiled_model); diff --git a/src/bindings/js/node/src/infer_request.cpp b/src/bindings/js/node/src/infer_request.cpp index 7fb382cb652d2b..2a61411fb8d420 100644 --- a/src/bindings/js/node/src/infer_request.cpp +++ b/src/bindings/js/node/src/infer_request.cpp @@ -19,7 +19,7 @@ InferRequestWrap::InferRequestWrap(const Napi::CallbackInfo& info) : Napi::ObjectWrap(info), _infer_request{} {} -Napi::Function InferRequestWrap::get_class_constructor(Napi::Env env) { +Napi::Function InferRequestWrap::get_class(Napi::Env env) { return DefineClass(env, "InferRequest", { @@ -35,29 +35,17 @@ Napi::Function InferRequestWrap::get_class_constructor(Napi::Env env) { }); } -Napi::Object InferRequestWrap::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->infer_request_prototype = ref; - - exports.Set("InferRequest", prototype); - return exports; -} - void InferRequestWrap::set_infer_request(const ov::InferRequest& infer_request) { _infer_request = infer_request; } Napi::Object InferRequestWrap::wrap(Napi::Env env, ov::InferRequest infer_request) { Napi::HandleScope scope(env); - const auto prototype = env.GetInstanceData()->infer_request_prototype; + const auto& prototype = env.GetInstanceData()->infer_request; if (!prototype) { OPENVINO_THROW("Invalid pointer to InferRequest prototype."); } - auto obj = prototype->New({}); + auto obj = prototype.New({}); const auto ir = Napi::ObjectWrap::Unwrap(obj); ir->set_infer_request(infer_request); return obj; diff --git a/src/bindings/js/node/src/model_wrap.cpp b/src/bindings/js/node/src/model_wrap.cpp index c88375bbd88923..9ba601cc728daa 100644 --- a/src/bindings/js/node/src/model_wrap.cpp +++ b/src/bindings/js/node/src/model_wrap.cpp @@ -12,7 +12,7 @@ ModelWrap::ModelWrap(const Napi::CallbackInfo& info) _core{}, _compiled_model{} {} -Napi::Function ModelWrap::get_class_constructor(Napi::Env env) { +Napi::Function ModelWrap::get_class(Napi::Env env) { return DefineClass(env, "ModelWrap", {InstanceMethod("getName", &ModelWrap::get_name), @@ -22,29 +22,17 @@ Napi::Function ModelWrap::get_class_constructor(Napi::Env env) { InstanceAccessor<&ModelWrap::get_outputs>("outputs")}); } -Napi::Object ModelWrap::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->model_prototype = ref; - - exports.Set("Model", prototype); - return exports; -} - void ModelWrap::set_model(const std::shared_ptr& model) { _model = model; } Napi::Object ModelWrap::wrap(Napi::Env env, std::shared_ptr model) { Napi::HandleScope scope(env); - const auto prototype = env.GetInstanceData()->model_prototype; + const auto& prototype = env.GetInstanceData()->model; if (!prototype) { OPENVINO_THROW("Invalid pointer to model prototype."); } - const auto& model_js = prototype->New({}); + const auto& model_js = prototype.New({}); const auto mw = Napi::ObjectWrap::Unwrap(model_js); mw->set_model(model); return model_js; diff --git a/src/bindings/js/node/src/node_output.cpp b/src/bindings/js/node/src/node_output.cpp index 686b3666891132..4c2fba452c4cd4 100644 --- a/src/bindings/js/node/src/node_output.cpp +++ b/src/bindings/js/node/src/node_output.cpp @@ -9,7 +9,7 @@ Output::Output(const Napi::CallbackInfo& info) : Napi::ObjectWrap>(info), _output{} {} -Napi::Function Output::get_class_constructor(Napi::Env env) { +Napi::Function Output::get_class(Napi::Env env) { return Output::DefineClass( env, "Output", @@ -21,28 +21,16 @@ Napi::Function Output::get_class_constructor(Napi::Env env) { Output::InstanceMethod("toString", &Output::get_any_name)}); } -Napi::Object Output::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->output_prototype = ref; - - exports.Set("Output", prototype); - return exports; -} - ov::Output Output::get_output() const { return _output; } Napi::Object Output::wrap(Napi::Env env, ov::Output output) { - const auto prototype = env.GetInstanceData()->output_prototype; + const auto& prototype = env.GetInstanceData()->output; if (!prototype) { OPENVINO_THROW("Invalid pointer to Output prototype."); } - const auto& obj = prototype->New({}); + const auto& obj = prototype.New({}); Output* output_ptr = Napi::ObjectWrap::Unwrap(obj); output_ptr->_output = output; return obj; @@ -64,7 +52,7 @@ Output::Output(const Napi::CallbackInfo& info) : Napi::ObjectWrap>(info), _output{} {} -Napi::Function Output::get_class_constructor(Napi::Env env) { +Napi::Function Output::get_class(Napi::Env env) { return Output::DefineClass( env, "ConstOutput", @@ -76,28 +64,16 @@ Napi::Function Output::get_class_constructor(Napi::Env env) { Output::InstanceMethod("toString", &Output::get_any_name)}); } -Napi::Object Output::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->const_output_prototype = ref; - - exports.Set("ConstOutput", prototype); - return exports; -} - ov::Output Output::get_output() const { return _output; } Napi::Object Output::wrap(Napi::Env env, ov::Output output) { - const auto prototype = env.GetInstanceData()->const_output_prototype; + const auto& prototype = env.GetInstanceData()->const_output; if (!prototype) { OPENVINO_THROW("Invalid pointer to ConstOutput prototype."); } - const auto& obj = prototype->New({}); + const auto& obj = prototype.New({}); Output* output_ptr = Napi::ObjectWrap::Unwrap(obj); output_ptr->_output = output; return obj; diff --git a/src/bindings/js/node/src/partial_shape_wrap.cpp b/src/bindings/js/node/src/partial_shape_wrap.cpp index b46f689bcbf033..7f7ef05e53d3cd 100644 --- a/src/bindings/js/node/src/partial_shape_wrap.cpp +++ b/src/bindings/js/node/src/partial_shape_wrap.cpp @@ -21,7 +21,7 @@ PartialShapeWrap::PartialShapeWrap(const Napi::CallbackInfo& info) : Napi::Objec } } -Napi::Function PartialShapeWrap::get_class_constructor(Napi::Env env) { +Napi::Function PartialShapeWrap::get_class(Napi::Env env) { return DefineClass(env, "PartialShapeWrap", { @@ -32,24 +32,12 @@ Napi::Function PartialShapeWrap::get_class_constructor(Napi::Env env) { }); } -Napi::Object PartialShapeWrap::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->partial_shape_prototype = ref; - - exports.Set("PartialShape", prototype); - return exports; -} - Napi::Object PartialShapeWrap::wrap(Napi::Env env, ov::PartialShape partial_shape) { - const auto prototype = env.GetInstanceData()->partial_shape_prototype; + const auto& prototype = env.GetInstanceData()->partial_shape; if (!prototype) { OPENVINO_THROW("Invalid pointer to PartialShape prototype."); } - auto obj = prototype->New({}); + auto obj = prototype.New({}); const auto t = Napi::ObjectWrap::Unwrap(obj); t->_partial_shape = partial_shape; diff --git a/src/bindings/js/node/src/preprocess/pre_post_process_wrap.cpp b/src/bindings/js/node/src/preprocess/pre_post_process_wrap.cpp index d064502538814f..b715647c8f2b58 100644 --- a/src/bindings/js/node/src/preprocess/pre_post_process_wrap.cpp +++ b/src/bindings/js/node/src/preprocess/pre_post_process_wrap.cpp @@ -16,7 +16,7 @@ PrePostProcessorWrap::PrePostProcessorWrap(const Napi::CallbackInfo& info) } } -Napi::Function PrePostProcessorWrap::get_class_constructor(Napi::Env env) { +Napi::Function PrePostProcessorWrap::get_class(Napi::Env env) { return DefineClass(env, "PrePostProcessorWrap", {InstanceMethod("input", &PrePostProcessorWrap::input), @@ -24,18 +24,6 @@ Napi::Function PrePostProcessorWrap::get_class_constructor(Napi::Env env) { InstanceMethod("build", &PrePostProcessorWrap::build)}); } -Napi::Object PrePostProcessorWrap::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->ppp_prototype = ref; - - exports.Set("PrePostProcessor", prototype); - return exports; -} - Napi::Value PrePostProcessorWrap::input(const Napi::CallbackInfo& info) { if (info.Length() != 0 && info.Length() != 1) { reportError(info.Env(), "Wrong number of parameters."); diff --git a/src/bindings/js/node/src/preprocess/preprocess.cpp b/src/bindings/js/node/src/preprocess/preprocess.cpp index 96b63eae1d8b8f..ef085ea6b0058c 100644 --- a/src/bindings/js/node/src/preprocess/preprocess.cpp +++ b/src/bindings/js/node/src/preprocess/preprocess.cpp @@ -3,7 +3,7 @@ #include "preprocess/preprocess.hpp" - +#include "addon.hpp" namespace preprocess { Napi::Object init(Napi::Env env, Napi::Object exports) { @@ -19,7 +19,8 @@ namespace preprocess { auto preprocess = Napi::Object::New(env); auto resizeAlgorithm = Napi::PropertyDescriptor::Accessor("resizeAlgorithm"); - PrePostProcessorWrap::init(env, preprocess); + const auto data = env.GetInstanceData(); + init_class(env, preprocess, "PrePostProcessor", &PrePostProcessorWrap::get_class, data->ppp); preprocess.DefineProperty(resizeAlgorithm); return preprocess; diff --git a/src/bindings/js/node/src/tensor.cpp b/src/bindings/js/node/src/tensor.cpp index a644692720b1bb..1c9f67240c416d 100644 --- a/src/bindings/js/node/src/tensor.cpp +++ b/src/bindings/js/node/src/tensor.cpp @@ -38,7 +38,7 @@ TensorWrap::TensorWrap(const Napi::CallbackInfo& info) : Napi::ObjectWrap("data"), @@ -47,18 +47,6 @@ Napi::Function TensorWrap::get_class_constructor(Napi::Env env) { InstanceMethod("getElementType", &TensorWrap::get_element_type)}); } -Napi::Object TensorWrap::init(Napi::Env env, Napi::Object exports) { - const auto& prototype = get_class_constructor(env); - - const auto ref = new Napi::FunctionReference(); - *ref = Napi::Persistent(prototype); - const auto data = env.GetInstanceData(); - data->tensor_prototype = ref; - - exports.Set("Tensor", prototype); - return exports; -} - ov::Tensor TensorWrap::get_tensor() const { return this->_tensor; } @@ -68,11 +56,11 @@ void TensorWrap::set_tensor(const ov::Tensor& tensor) { } Napi::Object TensorWrap::wrap(Napi::Env env, ov::Tensor tensor) { - const auto prototype = env.GetInstanceData()->tensor_prototype; + const auto& prototype = env.GetInstanceData()->tensor; if (!prototype) { OPENVINO_THROW("Invalid pointer to Tensor prototype."); } - auto tensor_js = prototype->New({}); + auto tensor_js = prototype.New({}); const auto t = Napi::ObjectWrap::Unwrap(tensor_js); t->set_tensor(tensor); return tensor_js; From 79447b3a365ee93e3ff06b12ee078bb47acda980 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Fri, 12 Jan 2024 18:16:20 +0400 Subject: [PATCH 29/43] [CONFORMANCE] Pre-commit migration over new version (#21725) * [CONFORMANCE] Migration to new version * Cache using in gha. Update expected failures * Device in GHA * Increase cpu_cnt used in run_conformance * cache * Increase Op conformance timeout up 20 min * Update expected_failures_API.csv * Update linux.yml * Remove extra file * Update run_conformance.py --- .github/workflows/linux.yml | 5 +- .../layer_tests_summary/data/models.lst | 2 +- .../github/cache/CPU/test_cache_API.lst | 688 ++ .../github/cache/CPU/test_cache_OP.lst | 5631 +++++++++++++++++ .../CPU/expected_failures_API.csv | 27 + .../skip_configs/CPU/expected_failures_OP.csv | 211 + .../layer_tests_summary/run_conformance.py | 4 +- .../CPU/expected_failures_API.csv | 3764 ----------- .../skip_configs/CPU/expected_failures_OP.csv | 1134 ---- 9 files changed, 6563 insertions(+), 4903 deletions(-) create mode 100644 src/tests/test_utils/functional_test_utils/layer_tests_summary/github/cache/CPU/test_cache_API.lst create mode 100644 src/tests/test_utils/functional_test_utils/layer_tests_summary/github/cache/CPU/test_cache_OP.lst create mode 100644 src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv create mode 100644 src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv delete mode 100644 src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv delete mode 100644 src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 51a59b35f51cd6..319abaa44d564a 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -302,7 +302,7 @@ jobs: Conformance: needs: [ Build, Smart_CI ] - timeout-minutes: ${{ matrix.TEST_TYPE == 'API' && 5 || 15 }} + timeout-minutes: ${{ matrix.TEST_TYPE == 'API' && 5 || 20 }} defaults: run: shell: bash @@ -386,7 +386,8 @@ jobs: -d=${TEST_DEVICE} \ -t=${{ matrix.TEST_TYPE }} \ -w=${CONFORMANCE_ARTIFACTS_DIR} \ - -f=${CONFORMANCE_TOOLS_DIR}/skip_configs/${TEST_DEVICE}/expected_failures_${{ matrix.TEST_TYPE }}.csv + --cache_path=${CONFORMANCE_TOOLS_DIR}/github/cache/${TEST_DEVICE}/test_cache_${{ matrix.TEST_TYPE }}.lst \ + -f=${CONFORMANCE_TOOLS_DIR}/github/skip_configs/${TEST_DEVICE}/expected_failures_${{ matrix.TEST_TYPE }}.csv - name: Pack Conformance Artifacts if: ${{ always() }} diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/data/models.lst b/src/tests/test_utils/functional_test_utils/layer_tests_summary/data/models.lst index 99c5151ca2f0d4..3f517d20061dbb 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/data/models.lst +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/data/models.lst @@ -1 +1 @@ -https://storage.openvinotoolkit.org/test_data/conformance_ir/conformance_ir.tar \ No newline at end of file +https://storage.openvinotoolkit.org/test_data/conformance_ir/2023.3.0-13657-d5b0f4d2d73.tar \ No newline at end of file diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/cache/CPU/test_cache_API.lst b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/cache/CPU/test_cache_API.lst new file mode 100644 index 00000000000000..0accefe8608859 --- /dev/null +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/cache/CPU/test_cache_API.lst @@ -0,0 +1,688 @@ +1009:ov_plugin_mandatory/OVHoldersTest.Orders/target_device=CPU +794:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_CPU +781:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_CPU +753:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_CPU +751:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_CPU +701:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_CPU +700:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_CPU +698:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_CPU +688:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_CPU +678:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_CPU +670:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_CPU +663:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_CPU +661:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_CPU +654:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_CPU +651:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_CPU +624:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_CPU +615:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_CPU +599:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_CPU +593:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_CPU +550:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_CPU +538:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_CPU +534:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_CPU +495:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_CPU +489:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_CPU +473:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_CPU +459:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_CPU +453:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_CPU +450:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_CPU +445:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_CPU +437:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_CPU +437:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_CPU +426:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_CPU +425:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_CPU +418:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_CPU +408:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_CPU +405:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_CPU +405:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_CPU +405:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_CPU +403:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_CPU +380:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_CPU +373:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_CPU +365:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_CPU +365:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_CPU +364:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_CPU +364:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_CPU +361:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_CPU +360:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_CPU +357:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_CPU +353:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_CPU +353:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_CPU +349:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_CPU +345:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_CPU +344:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_CPU +334:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_CPU +334:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_CPU +331:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_CPU +330:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_CPU +329:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_CPU +329:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_CPU +328:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_CPU +326:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_CPU +324:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_CPU +321:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_CPU +319:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_CPU +317:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_CPU +317:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_CPU +317:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_CPU +315:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_CPU +314:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_CPU +311:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_CPU +311:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_CPU +309:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_CPU +309:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_CPU +309:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_CPU +307:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_CPU +307:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_CPU +306:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_CPU +305:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_CPU +304:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_CPU +303:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_CPU +303:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_CPU +302:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_CPU +302:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_CPU +301:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_CPU +300:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_CPU +300:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_CPU +300:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_CPU +300:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_CPU +299:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_CPU +299:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_CPU +298:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_CPU +298:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_CPU +298:ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_CPU +297:ov_infer_request_mandatory/OVInferenceChainingStatic.StaticOutputToStaticInput/targetDevice=CPU_ +296:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_CPU +296:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_CPU +293:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_CPU +292:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_CPU +292:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_CPU +291:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_CPU +291:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_CPU +291:ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_CPU +290:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_CPU +287:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_CPU +287:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_CPU +287:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_CPU +286:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_CPU +286:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_CPU +285:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_CPU +285:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_CPU +283:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_CPU +283:ov_infer_request/OVInferenceChaining.StaticOutputToStaticInput/targetDevice=CPU_ +281:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_CPU +279:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_CPU +279:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +276:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_CPU +276:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_CPU +276:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_CPU +276:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_CPU +276:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_CPU +275:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_CPU +274:ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_CPU +273:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_CPU +273:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_CPU +271:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_CPU +270:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_CPU +270:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_CPU +269:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_CPU +269:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_CPU +266:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_CPU +265:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_CPU +265:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_CPU +265:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_CPU +264:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_CPU +264:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_CPU +264:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_CPU +263:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_CPU +263:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_CPU +262:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_CPU +261:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_CPU +261:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_CPU +261:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_CPU +261:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_CPU +260:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_CPU +259:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_CPU +259:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_CPU +258:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_CPU +258:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_CPU +258:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_CPU +258:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_CPU +258:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_CPU +258:ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_CPU +257:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_CPU +257:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_CPU +257:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_CPU +257:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_CPU +256:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_CPU +255:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_CPU +255:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_CPU +255:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_CPU +254:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_CPU +253:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_CPU +253:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_CPU +252:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_CPU +252:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_CPU +252:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_CPU +251:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_CPU +251:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_CPU +250:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_CPU +249:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_CPU +249:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_CPU +249:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_CPU +248:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_CPU +247:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_CPU +247:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_CPU +246:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_CPU +246:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_CPU +246:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_CPU +246:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_CPU +246:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_CPU +246:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_CPU +246:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_CPU +246:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_CPU +245:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_CPU +245:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_CPU +245:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_CPU +245:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_CPU +245:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_CPU +245:ov_infer_request/OVInferenceChaining.StaticOutputToDynamicInput/targetDevice=CPU_ +243:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_CPU +243:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_CPU +243:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_CPU +243:ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_CPU +243:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetwork/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +242:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_CPU +241:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_CPU +241:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_CPU +241:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_CPU +240:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_CPU +239:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_CPU +239:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_CPU +239:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_CPU +238:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_CPU +238:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_CPU +238:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_CPU +238:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_CPU +237:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_CPU +237:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_CPU +237:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_CPU +235:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_CPU +235:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_CPU +234:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_CPU +234:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_CPU +234:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_CPU +233:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_CPU +233:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_CPU +232:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_CPU +232:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_CPU +232:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_CPU +232:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetwork/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +231:ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_CPU +230:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_CPU +230:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_CPU +229:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_CPU +229:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_CPU +229:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_CPU +229:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_CPU +228:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_CPU +227:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_CPU +227:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_CPU +227:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_CPU +227:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_CPU +227:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +226:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_CPU +225:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_CPU +225:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_CPU +224:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_CPU +223:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_CPU +223:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_CPU +223:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +222:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_CPU +221:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_CPU +221:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_CPU +221:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_CPU +220:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_CPU +220:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_CPU +219:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_CPU +219:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_CPU +218:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_CPU +218:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_CPU +218:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_CPU +218:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_CPU +217:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_CPU +217:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_CPU +216:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_CPU +216:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_CPU +216:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_CPU +215:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_CPU +215:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_CPU +214:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_CPU +212:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_CPU +211:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_CPU +211:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_CPU +210:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_CPU +210:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_CPU +208:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_CPU +207:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_CPU +207:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkGetOutputThenSetOutputTensorPreAllocatedMemoryBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +206:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_CPU +206:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_CPU +206:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_CPU +205:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_CPU +205:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +205:ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanCreateTwoCompiledModelsAndCheckRuntimeModel/targetDevice=CPU_ +204:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_CPU +204:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_CPU +202:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_CPU +202:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_CPU +201:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_CPU +200:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_CPU +199:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_CPU +199:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_CPU +199:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_CPU +197:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_CPU +197:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_CPU +196:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_CPU +196:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_CPU +195:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_CPU +195:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_CPU +195:ov_infer_request_mandatory/OVInferRequestIOTensorTest.InferStaticNetworkSetChangedOutputTensorThrow/targetDevice=CPU_ +194:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_CPU +193:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_CPU +193:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_CPU +193:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_CPU +192:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_CPU +192:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_CPU +191:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_CPU +190:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_CPU +190:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +189:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_CPU +189:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_CPU +189:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=i16_ +188:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_CPU +188:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_CPU +187:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_CPU +186:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_CPU +186:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=boolean_ +185:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=i32_ +184:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_CPU +182:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_CPU +182:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_CPU +182:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_CPU +181:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_CPU +181:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +181:ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=CPU +179:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_CPU +178:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_CPU +178:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_CPU +178:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={PERFORMANCE_HINT_NUM_REQUESTS:1} +178:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_CPU +178:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_CPU +178:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i32_target_device=CPU_ +178:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +178:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=i64_ +178:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=i8_ +177:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=boolean_target_device=CPU_ +176:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_CPU +176:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_CPU +174:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_CPU +173:ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_CPU +173:ov_infer_request_2/OVInferRequestDynamicTests.InferUpperBoundNetworkAfterIOTensorsReshaping/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +173:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=bf16_ +172:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +172:ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=CPU_ +171:ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=bf16_target_device=CPU_ +171:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=boolean_ +171:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=f64_ +170:ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetInputAfterInferSync/targetDevice=CPU_ +170:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f32_target_device=CPU_ +170:ov_infer_request_1/OVInferRequestDynamicTests.InferUpperBoundNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +170:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=i16_ +169:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_CPU +169:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_CPU +169:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i8_target_device=CPU_ +168:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_CPU +168:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +167:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_CPU +167:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=u32_ +166:ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/0 +166:ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDeviceNoThrow/0 +166:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE} +166:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputShapeBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +166:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=u16_ +166:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=i16_ +165:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i8_target_device=CPU_ +165:ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +165:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=i16_ +163:ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDeviceUsingDevicePropertiesNoThrow/0 +163:ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCreateTwoCompiledModel/targetDevice=CPU_ +162:ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.CreateRequestWithCoreRemoved/target_device=CPU +162:ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=CPU_ +161:ov_infer_request_mandatory/OVInferRequestIOTensorTest.CheckInferIsNotChangeInput/targetDevice=CPU_ +160:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=u64_ +159:ov_plugin_mandatory/OVHoldersTest.LoadedRemoteContext/target_device=CPU +159:ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=CPU_ +159:ov_compiled_model_mandatory/OVCompiledModelBaseTest.precisionsAsInOriginalFunction/targetDevice=CPU_ +159:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=bf16_ +158:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=f32_ +157:ov_infer_request_mandatory/OVInferRequestCallbackTests.ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout/targetDevice=CPU_ +157:ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +157:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=u64_ +156:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_CPU +156:ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDevice2NoThrow/0 +156:ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyWithWait/targetDevice=CPU_ +156:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f64_target_device=CPU_ +156:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i32_target_device=CPU_ +156:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithLocalCore/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +156:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=f16_ +156:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=u8_ +155:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_CPU +155:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_CPU +155:ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsParallelWithWait/targetDevice=CPU_ +154:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_HYPER_THREADING:YES} +154:ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputFromFunctionWithSingleInput/targetDevice=CPU_ +154:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=f16_ +153:ov_infer_request_mandatory/OVInferRequestCallbackTests.returnGeneralErrorIfCallbackThrowException/targetDevice=CPU_ +153:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +152:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_CPU +152:ov_infer_request_mandatory/OVInferRequestIOTensorTest.InferStaticNetworkSetChangedInputTensorThrow/targetDevice=CPU_ +152:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f64_target_device=CPU_ +152:ov_infer_request_1/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorUpper/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +152:ov_infer_request/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=bf16_target_device=CPU_ +152:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=bf16_ +152:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=u64_ +152:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=f32_ +151:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_CPU +151:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u8_target_device=CPU_ +151:ov_infer_request_2/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorLower/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +151:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=f64_ +150:ov_infer_request/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=bf16_target_device=CPU_ +149:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={AFFINITY:NUMA} +149:ov_infer_request_mandatory/OVInferRequestWaitTests.FailedAsyncInferWithNegativeTimeForWait/targetDevice=CPU_ +149:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=f32_ +149:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=bf16_ +148:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i16_target_device=CPU_ +148:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithLocalCore/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +148:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=u16_ +147:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={AFFINITY:HYBRID_AWARE} +147:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=f64_ +146:ov_plugin_mandatory/OVHoldersTest.LoadedAny/target_device=CPU +146:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u16_target_device=CPU_ +146:ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCompileModelwithBrace/targetDevice=CPU_ +146:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=i64_ +145:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_CPU +145:ov_infer_request_1/OVInferRequestDynamicTests.InferUpperBoundNetworkAfterIOTensorsReshaping/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +145:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=u8_ +144:ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_CPU +144:ov_plugin/OVClassModelOptionalTestP.CompileModelCreateDefaultExecGraphResult/0 +144:ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3SyncRequestsConsistentlyFromThreads/targetDevice=CPU_ +144:ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetInputWithIncorrectSizes/targetDevice=CPU_ +144:ov_infer_request_mandatory/OVInferRequestCallbackTests.canCallAsyncWithCompletionCallback/targetDevice=CPU_ +144:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +144:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +144:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=u8_ +144:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=i32_ +143:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_CPU_PINNING:YES} +143:ov_infer_request_mandatory/OVInferRequestWaitTests.throwExceptionOnGetTensorAfterAsyncInfer/targetDevice=CPU_ +143:ov_infer_request_mandatory/OVInferRequestPerfCountersTest.NotEmptyAfterSyncInfer/targetDevice=CPU_ +142:ov_compiled_model_mandatory/OVCompiledModelBaseTest.loadIncorrectV10Model/targetDevice=CPU_ +142:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=f32_ +141:ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanSetOutputPrecisionForNetwork/targetDevice=CPU_ +141:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=f16_ +141:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=u32_ +140:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_HYPER_THREADING:NO} +140:ov_infer_request_mandatory/OVInferRequestWaitTests.canstart_asyncInferWithGetInOutWithStatusOnlyWait/targetDevice=CPU_ +140:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u64_target_device=CPU_ +140:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithoutSetShape/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +140:ov_compiled_model/OVCompiledModelBaseTestOptional.CheckExecGraphInfoBeforeExecution/targetDevice=CPU_ +139:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithoutSetShape/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +139:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=boolean_ +139:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=u64_ +138:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=boolean_target_device=CPU_ +138:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u8_target_device=CPU_ +138:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i64_target_device=CPU_ +138:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkBoundWithoutSetShape/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +138:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=u32_ +137:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={PERFORMANCE_HINT:CUMULATIVE_THROUGHPUT} +137:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=f64_ +137:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=u16_ +136:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i32_target_device=CPU_ +136:ov_infer_request_2/OVInferRequestDynamicTests.InferUpperBoundNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +136:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=u64_ +136:ov_compiled_model/OVClassCompiledModelImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/0 +135:ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetOutputAfterInferSync/targetDevice=CPU_ +135:ov_infer_request_2/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorUpper/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +134:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_DEBUG} +134:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u64_target_device=CPU_ +134:ov_infer_request_2/OVInferRequestDynamicTests.GetSameTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +134:ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=CPU_properties={PERF_COUNT:NO} +134:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=i64_ +134:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=boolean_ +133:ov_infer_request_mandatory/OVInferRequestPerfCountersTest.CheckOperationInProfilingInfo/targetDevice=CPU_ +133:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkGetOutputThenSetOutputTensorPreAllocatedMemoryBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +133:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=i8_ +132:ov_infer_request_mandatory/OVInferRequestWaitTests.CorrectOneAsyncInferWithGetInOutWithInfWait/targetDevice=CPU_ +132:ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferAfterIOBlobReallocation/targetDevice=CPU_ +132:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f32_target_device=CPU_ +132:ov_infer_request_mandatory/OVInferRequestCancellationTests.CanResetAfterCancelAsyncRequest/targetDevice=CPU_ +132:ov_compiled_model/OVCompiledModelBaseTestOptional.CheckExecGraphInfoAfterExecution/targetDevice=CPU_ +131:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f64_target_device=CPU_ +130:ov_plugin_mandatory/OVClassModelTestP.QueryModelWithKSO/0 +130:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_INFO} +130:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={EXECUTION_MODE_HINT:ACCURACY} +130:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_CPU_PINNING:NO} +130:ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithoutSetAndGetInOutSync/targetDevice=CPU_ +130:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u32_target_device=CPU_ +130:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=boolean_target_device=CPU_ +129:ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetUninitializedInputTensor/targetDevice=CPU_ +129:ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelInferRequest/targetDevice=CPU_ +129:ov_compiled_model_mandatory/OVCompiledModelBaseTest.loadIncorrectV11Model/targetDevice=CPU_ +129:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=i64_ +128:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_WARNING} +128:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i8_target_device=CPU_ +128:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i16_target_device=CPU_ +128:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f64_target_device=CPU_ +128:ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetOutputsInfo/targetDevice=CPU_ +128:ov_compiled_model_mandatory/OVClassCompiledModelPropertiesTests.CanUseCache/targetDevice=CPU_properties={PERF_COUNT:NO} +128:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=i8_ +128:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=f64_ +127:ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetOutputDoNotReAllocateData/targetDevice=CPU_ +127:ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithGetOut/targetDevice=CPU_ +127:ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCompileModelFromMemory/targetDevice=CPU_ +127:ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_NETWORK_NAME/0 +127:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=u16_ +127:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=boolean_ +126:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i8_target_device=CPU_ +126:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=u32_ +125:ov_plugin_mandatory/OVHoldersTest.LoadedState/target_device=CPU +125:ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_SUPPORTED_CONFIG_KEYS/0 +124:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={NUM_STREAMS:3} +124:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i16_target_device=CPU_ +124:ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=CPU_elementType=u8_ +123:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={PERF_COUNT:YES} +123:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={AFFINITY:CORE} +123:ov_infer_request_1/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorLower/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +123:ov_compiled_model_mandatory/OVCompiledModelBaseTest.getInputFromFunctionWithSingleInput/targetDevice=CPU_ +123:ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCompileModel/targetDevice=CPU_ +123:ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_OPTIMAL_NUMBER_OF_INFER_REQUESTS/0 +122:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY} +122:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u16_target_device=CPU_ +122:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i64_target_device=CPU_ +122:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=u16_ +121:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_NONE} +121:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={INFERENCE_NUM_THREADS:1} +121:ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithoutSetAndGetInOutAsync/targetDevice=CPU_ +121:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i8_target_device=CPU_ +121:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f64_target_device=CPU_ +121:ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=bf16_target_device=CPU_ +121:ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCompileModelAndCreateInferRequest/targetDevice=CPU_ +121:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=f16_ +120:ov_infer_request_mandatory/OVInferRequestWaitTests.throwExceptionOnSetTensorAfterAsyncInfer/targetDevice=CPU_ +120:ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithSetInOutBlobs/targetDevice=CPU_ +120:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f16_target_device=CPU_ +120:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f16_target_device=CPU_ +120:ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetInputsInfo/targetDevice=CPU_ +120:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=i32_ +119:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u32_target_device=CPU_ +119:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u32_target_device=CPU_ +119:ov_infer_request_mandatory/OVInferRequestCallbackTests.syncInferDoesNotCallCompletionCallback/targetDevice=CPU_ +119:ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputShapeBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +119:ov_infer_request/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=bf16_target_device=CPU_ +119:ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetInputsInfoAndCheck/targetDevice=CPU_ +119:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=f16_ +118:ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetInputDoNotReAllocateData/targetDevice=CPU_ +118:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u16_target_device=CPU_ +118:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f16_target_device=CPU_ +118:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u32_target_device=CPU_ +118:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u32_target_device=CPU_ +118:ov_compiled_model_mandatory/OVClassCompiledModelPropertiesTests.canCompileModelWithPropertiesAndCheckGetProperty/targetDevice=CPU_properties={PERF_COUNT:NO} +117:ov_plugin_mandatory/OVClassModelTestP.QueryModelActualNoThrow/0 +117:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={PERFORMANCE_HINT:THROUGHPUT} +117:ov_infer_request_mandatory/OVInferRequestIOTensorTest.canSetAndGetOutput/targetDevice=CPU_ +117:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f32_target_device=CPU_ +117:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i8_target_device=CPU_ +117:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i16_target_device=CPU_ +116:ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/0 +116:ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithGetIn/targetDevice=CPU_ +116:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=i8_ +116:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=i32_ +116:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=i16_ +115:ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetNullptrForOutput/targetDevice=CPU_ +115:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i8_target_device=CPU_ +115:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i32_target_device=CPU_ +115:ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +115:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=i8_ +114:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u64_target_device=CPU_ +114:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=boolean_target_device=CPU_ +114:ov_infer_request_mandatory/OVInferRequestCallbackTests.canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor/targetDevice=CPU_ +113:ov_plugin_mandatory/OVHoldersTest.LoadedTensor/target_device=CPU +113:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_TRACE} +113:ov_infer_request_mandatory/OVInferRequestWaitTests.canWaitWithotStartSsync/targetDevice=CPU_ +113:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i64_target_device=CPU_ +113:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f16_target_device=CPU_ +113:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f16_target_device=CPU_ +113:ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkBoundWithoutSetShape/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +113:ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigNoThrow/0 +112:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={PERFORMANCE_HINT:LATENCY} +112:ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetTensorWithIncorrectName/targetDevice=CPU_ +112:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u8_target_device=CPU_ +112:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u8_target_device=CPU_ +112:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u8_target_device=CPU_ +112:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u32_target_device=CPU_ +112:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i32_target_device=CPU_ +112:ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelAsyncRequest/targetDevice=CPU_ +112:ov_infer_request_mandatory/OVInferRequestCallbackTests.ImplDoesNotCopyCallback/targetDevice=CPU_ +112:ov_infer_request/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=bf16_target_device=CPU_ +112:ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanSetInputPrecisionForNetwork/targetDevice=CPU_ +111:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={AFFINITY:NONE} +111:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i32_target_device=CPU_ +111:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f64_target_device=CPU_ +111:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i32_target_device=CPU_ +111:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=boolean_target_device=CPU_ +111:ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=bf16_target_device=CPU_ +111:ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.CanCompileModelWithEmptyProperties/0 +110:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=boolean_target_device=CPU_ +110:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u64_target_device=CPU_ +110:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=i64_ +109:ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetUninitializedOutputTensor/targetDevice=CPU_ +109:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f32_target_device=CPU_ +109:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u64_target_device=CPU_ +109:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u16_target_device=CPU_ +109:ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.CanCompileModelWithCustomLocale/0 +108:ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetNullptrForInput/targetDevice=CPU_ +108:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i16_target_device=CPU_ +108:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i64_target_device=CPU_ +108:ov_infer_request_1/OVInferRequestDynamicTests.GetSameTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_ +108:ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/0 +107:ov_plugin_mandatory/OVPropertiesTests.SetCorrectProperties/target_device=CPU_properties={PERF_COUNT:YES} +107:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={EXECUTION_MODE_HINT:PERFORMANCE} +107:ov_infer_request_mandatory/OVInferRequestPerfCountersTest.NotEmptyAfterAsyncInfer/targetDevice=CPU_ +107:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i16_target_device=CPU_ +107:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u8_target_device=CPU_ +107:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=boolean_target_device=CPU_ +106:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i64_target_device=CPU_ +106:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u16_target_device=CPU_ +106:ov_compiled_model_mandatory/OVCompiledModelBaseTest.getInputsFromFunctionWithSeveralInputs/targetDevice=CPU_ +105:ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputsFromFunctionWithSeveralOutputs/targetDevice=CPU_ +104:ov_plugin_mandatory/OVPropertiesTests.canSetPropertyAndCheckGetProperty/target_device=CPU_properties={PERF_COUNT:YES} +104:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={COMPILATION_NUM_THREADS:1} +104:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u64_target_device=CPU_ +104:ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_ +103:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u64_target_device=CPU_ +103:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i16_target_device=CPU_ +103:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i64_target_device=CPU_ +103:ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetOutputsInfoAndCheck/targetDevice=CPU_ +102:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f32_target_device=CPU_ +102:OVCompiledModelBaseTest.canCompileModelToDefaultDevice +101:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f16_target_device=CPU_ +100:ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.LoadedTensor/target_device=CPU +100:ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_ARCHITECTURE:} +100:ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputsFromSplitFunctionWithSeveralOutputs/targetDevice=CPU_ +100:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionDoubleInputOutput/targetDevice=CPU_elementType=f32_ +100:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=u32_ +100:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=CPU_elementType=i32_ +99:ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f32_target_device=CPU_ +99:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u16_target_device=CPU_ +99:ov_compiled_model_mandatory/OVCompiledModelBaseTest.pluginDoesNotChangeOriginalNetwork/targetDevice=CPU_ +98:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_ERROR} +98:ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=CPU_ +95:ov_infer_request_mandatory/OVInferRequestIOTensorTest.canSetAndGetInput/targetDevice=CPU_ +95:ov_compiled_model/OVCompiledModelBaseTestOptional.checkGetExecGraphInfoIsNotNullptr/targetDevice=CPU_ +95:ov_compiled_model/OVClassCompiledModelSetIncorrectConfigTest.canNotSetConfigToCompiledModelWithIncorrectConfig/0 +94:ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={PERF_COUNT:NO} +94:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f32_target_device=CPU_ +93:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u32_target_device=CPU_ +92:ov_plugin_mandatory/OVPropertiesTests.canSetPropertyAndCheckGetProperty/target_device=CPU_ +92:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY} +91:ov_compiled_model_mandatory/OVClassCompiledModelPropertiesIncorrectTests.CanNotCompileModelWithIncorrectProperties/targetDevice=CPU_properties={DEVICE_ID:UNSUPPORTED_DEVICE_ID_STRING} +89:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f16_target_device=CPU_ +88:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f64_target_device=CPU_ +86:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u8_target_device=CPU_ +86:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u16_target_device=CPU_ +86:ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelBeforeAsyncRequest/targetDevice=CPU_ +84:ov_plugin_mandatory/OVPropertiesTests.SetCorrectProperties/target_device=CPU_ +84:ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i64_target_device=CPU_ +84:ov_compiled_model_mandatory/OVClassCompiledModelGetIncorrectPropertyTest.GetConfigThrows/0 +83:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=CPU_elementType=bf16_ +82:ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_TYPE:} +80:ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_OPTIMIZATION_CAPABILITIES/0 +80:ov_compiled_model_mandatory/OVCompiledModelBaseTest.canLoadCorrectNetworkToGetExecutableWithIncorrectConfig/targetDevice=CPU_ +77:ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=CPU_properties={MAX_BATCH_SIZE:} +77:ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=u8_ +76:ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=CPU_properties={OPTIMAL_BATCH_SIZE:} +72:ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/0 +71:ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/2 +71:ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/1 +70:ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOADED_FROM_CACHE:} +69:ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_THERMAL:} +68:ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetOutputWithIncorrectSizes/targetDevice=CPU_ +66:ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_GOPS:} +64:ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/0 +63:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_MMAP:YES} +62:ov_plugin_mandatory/OVPropertiesIncorrectTests.SetPropertiesWithIncorrectKey/target_device=CPU_properties={DEVICE_ID:UNSUPPORTED_DEVICE_ID_STRING} +61:ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait/targetDevice=CPU_ +61:ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/3 +60:ov_plugin_mandatory/OVCompiledModelIncorrectDevice.CanNotCompileModelWithIncorrectDeviceID/0 +59:ov_plugin/OVPropertiesDefaultSupportedTests.CanSetDefaultValueBackToPlugin/0 +58:ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithBigDeviceIDThrows/0 +57:ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_MMAP:NO} +56:ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithInvalidDeviceIDThrows/0 +56:ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_UUID:} +55:ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/0 +53:ov_plugin_mandatory/OVGetMetricPropsTest.GetMetriDeviceFullNameWithoutAdditionalTerminatorChars/0 +53:ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/0 +51:ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={FULL_DEVICE_NAME:} +51:ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/0 +49:ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={EXECUTION_DEVICES:} +49:ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_LUID:} +48:ov_plugin_remove_mandatory/OVBasicPropertiesTestsP.GetMetricThrowUnsupported/0 +42:ov_plugin_remove_mandatory/OVBasicPropertiesTestsP.getVersionsByDeviceClassNoThrow/0 +41:ov_plugin_remove_mandatory/OVBasicPropertiesTestsP.getVersionsByExactDeviceNoThrow/0 +39:ov_plugin_remove_mandatory/OVBasicPropertiesTestsP.SetConfigAllThrows/0 +33:ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/0 +6:OVClassBasicPropsTest.smoke_SetConfigAutoNoThrows +6:OVClassBasicPropsTest.smoke_GetMetricSupportedMetricsHeteroNoThrow +4:ov_plugin_remove_mandatory/OVBasicPropertiesTestsP.SetConfigForUnRegisteredDeviceThrows/0 +3:OVClassBasicPropsTest.smoke_SetConfigHeteroThrows +3:OVClassBasicPropsTest.smoke_SetConfigDevicePropertiesThrows diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/cache/CPU/test_cache_OP.lst b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/cache/CPU/test_cache_OP.lst new file mode 100644 index 00000000000000..7e00aa09e2d998 --- /dev/null +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/cache/CPU/test_cache_OP.lst @@ -0,0 +1,5631 @@ +849647:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=5ae2e8ce34957ac812bd04943714d0b0ca6e2098c46caccfd775620d7f373cbf_Device=CPU_Config=() +731668:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=2f842d4b64513c6df5748c54a1166a3f14436dc1ca59b7a28530bcafcdcde2f6_Device=CPU_Config=() +635397:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=d11097e7fa04dc0b540bf3b963cde252591b39b7dcbfae66e64ed19cd2b3b06e_Device=CPU_Config=() +456010:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=c20603ac895226554bc910680f6be82589e053503b3067b3074bcc210f4d0ef2_Device=CPU_Config=() +446637:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=static_IR=214e4e8f7de64e9cc8c77c67d214172905cfb4b9fde65e2ef3d32bb7b4ed93f1_Device=CPU_Config=() +441629:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=dynamic_IR=0b30cc6cee9ce5400085a0e78b44763bc169eeea93357f22fd716564f20226db_Device=CPU_Config=() +396559:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=72c58b462f61521af4eab9c890e568b5676c7a3194c4e35f8e04f98596013c47_Device=CPU_Config=() +396011:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=a9b0552d84d057a656080c8e302afa30962dc02105abe7136cfd77f0433eec18_Device=CPU_Config=() +395765:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=3d24c272ca88d4ee24f437a310abc05340e110f8596beb6a1ef96dd18818ebbe_Device=CPU_Config=() +393035:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=b31dbb99720fd5083e5a7e5b1b626bda91455999e2918eb8e658992cfa6588dc_Device=CPU_Config=() +374552:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=396388d4dce8240937c39dcd24e583e775f7b4e84d6c85fa9b5930588dfb9b56_Device=CPU_Config=() +338843:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=77d771a04d8618bf32943e460b714076f7bbc34cd1d40f9a90864af976bea30e_Device=CPU_Config=() +270513:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=77e1c522d9ea4975c3071869b7b485038bb4035c9aae6f5d44291f60ae253a0e_Device=CPU_Config=() +256725:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=6daca83f4b162285c00c695825e255cbafce9cf9c9cea68b969a301105475303_Device=CPU_Config=() +208248:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=97f6fd9998be395222e6878ccaab47f5d50561d1ab8f988987f7f292e784fe2d_Device=CPU_Config=() +189998:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=5bed52483d61091259db863ffcd3b09c190fedde5dac72edad6f1bf37230f344_Device=CPU_Config=() +185722:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=04e25f2a56de557c8da87110ba02c02ae45277d029964d932fe6837acc0f1b10_Device=CPU_Config=() +172531:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5d7273e7772d3578b3c8dcefcce25913c8e843b7a1045722f80f9feed4770ba1_Device=CPU_Config=() +161595:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=327d5120022c693531fe0f1f42429b1ad78f36cd5e414f1c8bab7d0c2ced62f7_Device=CPU_Config=() +156520:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=9d4ebc5a7d195ea0e566695253c38ac5d02fea1f4fbe97396828ef9f7754808a_Device=CPU_Config=() +151346:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e0f4f91a6470af49c5e2497ae8fa917051879c18dd1e39cae18d159b697e8fec_Device=CPU_Config=() +138107:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=2cc50ee05a039bf65fd7be2282284790d9d2e1fabb4cfec509f5bed121152d93_Device=CPU_Config=() +129459:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6eeea9355df867c7fc97af81dae6d02799239ec1e480dc2c975a60761fc5f7be_Device=CPU_Config=() +124864:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d99c03088bad009d9be7f29ec5bad7e3b6c7534fe2649f9670b6f713bf017e7e_Device=CPU_Config=() +122520:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=b7a53df966d640f075cea7421ca5989ca91ca638e7af16aff33bc275eb7dfe9c_Device=CPU_Config=() +117935:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=2538d525d8f11b9f4961c2a4a8cc36fd27d8b3d97271ef7db4f7eac9732b71f4_Device=CPU_Config=() +109657:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4baf5444c85028a4cfdedc5888a7cd403e2491ab694ab65c820dd3c410f8eafb_Device=CPU_Config=() +108857:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=cf334277b64fe023019fb7f007aae9ebf7432b733a1876d6cd61bce6a204e0dd_Device=CPU_Config=() +108610:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=08fa156c3f25fc8836356fd1a8edb73222f9fe2b3476c0ae32a26636b5870247_Device=CPU_Config=() +95941:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=99720c46a11f3e84906fd9327f25b187f328c6910868ac89738bc67ce0d90b64_Device=CPU_Config=() +85631:conformance_LogSoftmax/ReadIRTest.Inference/Op=LogSoftmax.5_Type=f32_Shape=static_IR=38bcc7d745ee21a7c6858a161e269f0281d3f41d62d65d10fde9b0a9b80992c4_Device=CPU_Config=() +82193:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=7d3d30fa9e2a8a839cf42249de3eb8228681229e8b302ff7f290cc0d00c10a1a_Device=CPU_Config=() +81468:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=bda73cc94d837df9fb535743febd300cf0baf7fdf48ff538c079a4a7ca291592_Device=CPU_Config=() +78474:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=f86f86769ec214942eaf1fdcd312a29e26308676419d8fbd98fdc485c2de0815_Device=CPU_Config=() +72738:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a2006e1eaa808a3e78550535058de54c5cd83e9a32a52e488fef1f7883c321a3_Device=CPU_Config=() +70803:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=6838901bafb44e26f73134e2c0eb2be8f1f777ab794ae340d61b62d891ff3d59_Device=CPU_Config=() +70509:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=27d1a1cfdbadd9a8c2d0269f6177d6aabd55320aafe9a0047e90681dcad1cbe9_Device=CPU_Config=() +68993:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=59c0e972ae75900cd8c802aa7be9b6c13c96cb10417ff417eb1aafbc49b891ea_Device=CPU_Config=() +67199:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=287a7562757ef0295cc38442e3d775cff0fb1ea9b27e6897bd456f01ce82d455_Device=CPU_Config=() +63154:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=dynamic_IR=a99a5ab2de2d408c2e40ad5734c9bd5ab4d1d221f4dd24572e05538b134ef88c_Device=CPU_Config=() +62584:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5c33d593e408ad72bf438729a423318330c69c69f1504402420635942050ac06_Device=CPU_Config=() +62058:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6e9fb2accb692c69349a88158442052e6350143ca7dc28f2525d8e8df29f8c78_Device=CPU_Config=() +59834:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=a78437a93ab424a706d064188d1bc0971b2e1afc98a74fea979a6f8b99036597_Device=CPU_Config=() +55761:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b04f836c4ed5b0403f4b7fdf9c5cb8d11ff9f65105ab9bde39f80191a65f7f17_Device=CPU_Config=() +55653:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c5637c5151109c002830514b8b1450092dc52df14146ecee467dc54469a77718_Device=CPU_Config=() +52008:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=978c6fe274296020718998393e7fe94bbe0a0856fc377aa474df0454534824a6_Device=CPU_Config=() +52005:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=92c3646daf445784fceeb022afba2831938fed34660bac5445f033a1efdccc34_Device=CPU_Config=() +50210:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=bb5cb4e2a8cb9be32332ed3255c99de478d8d2e31cfb1747aa322df438ebaa49_Device=CPU_Config=() +49713:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=1457b3d8c7f130113d74f540dfbd2d4062f869018f7b1afb11c743acc0a007b9_Device=CPU_Config=() +49506:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=28e31d83986a1435f11ba6355b98472025fcf2c3c6e090103283d9486356b5de_Device=CPU_Config=() +48827:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6bbd8d7f90e7c210514c28d527eb33bf0889b1fafbd5cf7d9660532f5d6bd940_Device=CPU_Config=() +48749:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e27f0bcb3118a7cdb488f4685707bec982ae54ff8bf7e97aff9ea6ecedd66714_Device=CPU_Config=() +48376:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=29d8ef1a41f51b6fed0300f97d17a3795a97e4ffb3ef3abda37f790f5f53b389_Device=CPU_Config=() +45924:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=59d132b45e2ac60a670eb43efafa43f065bb43d492208ac670fc8234b4f618c9_Device=CPU_Config=() +44983:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=6119edc6e1f969159ce54e6ff4451d96db51485b54fae625a972035414c704ef_Device=CPU_Config=() +44976:conformance_LSTMSequence/ReadIRTest.Inference/Op=LSTMSequence.5_Type=f32_Shape=static_IR=b8e32896d2ab304fb4fdca3924e0110852da92be25307f30709cd7d897c2f038_Device=CPU_Config=() +44579:conformance_LSTMSequence/ReadIRTest.Inference/Op=LSTMSequence.5_Type=f32_Shape=static_IR=1f24aeeef6f9f91272546fca89299c1ce448b0008fe43905db434ae3f28a75d0_Device=CPU_Config=() +44495:conformance_Select/ReadIRTest.Inference/Op=Select.1_Type=f32_Shape=dynamic_IR=f550a37ab884668f47ed232e7119c2a2baa814c98fbbcfa3129e7a00feebde0b_Device=CPU_Config=() +42892:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=a05c6778a396b4eb89a5e112fe505a41f47ff6bef50fa025eee1dfb7ec6a95e7_Device=CPU_Config=() +42101:conformance_Select/ReadIRTest.Inference/Op=Select.1_Type=f32_Shape=dynamic_IR=848de524e27e13a1e5b33e5db3cdf2710ba4566c3219a018e878f998c07dd718_Device=CPU_Config=() +40769:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f13ce39b60cc25991465a0c02e27edcb35af0523cd28004adf6fd9acd8a5fcb8_Device=CPU_Config=() +40683:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=dynamic_IR=ca0d551f3da549b28475d996906bfa5202402be286f59f9bf53ac809c9fceb49_Device=CPU_Config=() +40680:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=3f830d5ee243ca3f56d027f95929bbadd427e4954e286e6c890ddd60f9c5c2d0_Device=CPU_Config=() +39521:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=6289232b1cbbafc963ac3cd787330c81a9cd02def9fefb83d6f6cced249de92f_Device=CPU_Config=() +38647:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=31ce051edcf02344a693eb2d200fa02b53412a5707faaffc2907cadcf81192f4_Device=CPU_Config=() +37717:conformance_GroupConvolutionBackpropData/ReadIRTest.Inference/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=29c89ebfa45163b40be304d7bfc96f3068cd96175db94e6ebda942d3c4af538f_Device=CPU_Config=() +37206:conformance_GroupConvolutionBackpropData/ReadIRTest.Inference/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=74f34c8b7abfe0f7afe021ba5d4861e29f9f3915beba5cdb2af936f1f2409fb6_Device=CPU_Config=() +32780:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6b86bf4f834b297dcb461acb5854aeb9783a381521ea1a8e1cf4fbeb60d6d09b_Device=CPU_Config=() +32716:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=acc81187b83e3de7c3d0903f40daadcadff63455905c00ff2f98498f21bd68ea_Device=CPU_Config=() +32707:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=40c74727a381659b1343c4083d7f903ac2519d5297703fd15979a32f820adfcb_Device=CPU_Config=() +32456:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e4be028a5a300682b79da2f015dd1c1b13381b38b19bb76951e1f26439173212_Device=CPU_Config=() +32446:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e62245706f4242ff86bcb70d4d221bf49aa31db3807698d574125166bff5f8aa_Device=CPU_Config=() +32265:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=ccef47523d9134720116dbd4a37d5038c9d15e2c393ccf1a6d24c3790529c282_Device=CPU_Config=() +31838:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a2ca34430931dd41f08f2b3cb8163ea5c1889a23b53d0f3b7d26b7a8af1acef3_Device=CPU_Config=() +30604:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8c773c776396a2ff506691f857349efa9a4a580f1e046d1f17ff2ab49c73553d_Device=CPU_Config=() +29187:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e58cf21c9c62dd427747021dcf9544157638e0773329eecfb8755a71b24f65a8_Device=CPU_Config=() +27559:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=2f7925a034999529ce07a5c8bed2b2c7aeeb7936f74730d9c8ca5a5086dea4cd_Device=CPU_Config=() +27187:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=cbd851b8c4e89bce3a20b8795b3bc5a0105d26e252a4674541ff630496144aaa_Device=CPU_Config=() +26806:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b3fdb9be3484a0c498bf40f1a102c452eea04caa5b1dd627e8267087df0acc87_Device=CPU_Config=() +26668:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7d706b614d2b5d59c5e152bbb61a8fd558686bb3b8e9fda199c499ca49f03042_Device=CPU_Config=() +26456:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=7625f5af6c70a9d4bccb783dc369a11b53ef1f6492df030ae5404452ea0cdc79_Device=CPU_Config=() +25831:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=b7aebb27d8d2b43e770ade887778c291072210b947b77b1b92e05d3327843977_Device=CPU_Config=() +25671:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d16722dfa770998d9923d09fa1e2a973bac5ae7afc6452a0b5ac21d839720bb4_Device=CPU_Config=() +25633:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8710c3eaa10d25119059f4e15970d8a6381f978cd905fc8eb1b4d43a36d1d5f6_Device=CPU_Config=() +25404:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d6d8f4f28ac34b734cc984f83e8f5f6598c063a6955d00ef4c08252d5d05c276_Device=CPU_Config=() +25192:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=66df22ce11e7009aea35ba6a11b4294eda44815bf041eed0721499a3d2c484b1_Device=CPU_Config=() +25026:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d962e7157ea216206d6c5b11fe5ef6ee162a1f7dc20f84a3b058e405c324a592_Device=CPU_Config=() +24415:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=12b6ad1cd462f676c9add533f2fb2a5d98698e72fc5d0e6dc984abb27f54475d_Device=CPU_Config=() +23343:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ae9604aa3fcfc361f87562022cf6996fb2cdd9c356eed6a6eaddb14e103b6b73_Device=CPU_Config=() +22724:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=10c19142631a9ac6d8026ec82820aa75ba1e14605fe5ea1e017fa4bde4a90c44_Device=CPU_Config=() +22581:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=074ab76268ab5d03283f03f4e228a7cf73ab5a18fc0e7366778cf8c45286f18a_Device=CPU_Config=() +22370:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2ef3273b8c144dedd6cc2d2b8c2d2921d999fa286b10d90aa796fa188dc52cef_Device=CPU_Config=() +22356:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=8e098b9c129ab30efc257d55cfbc737d990d2ff0f7931039d3335c42d5f286eb_Device=CPU_Config=() +22117:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=f2eb693da69b0ad1af3bcef6c4af46ba2b92897f76989c310a65aac5c2027725_Device=CPU_Config=() +22044:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=9a26ec9d1e0c4963016ff36986c79f5faed763ca5189215923d375e43c70a17c_Device=CPU_Config=() +22028:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0182ad6b02d77803accd2ebe55d87b679950570d1dcfef2940adcbb5fb9f1a24_Device=CPU_Config=() +21992:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=030fa97d19aab57ae9eb898fe101353fdc76bbc034d4574971c68ef254006c85_Device=CPU_Config=() +21939:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e250a19bfbe236f81b6715a92beb0c259080e4a5d379ea1187892e8c8d9add8a_Device=CPU_Config=() +21548:conformance_ReduceMax/ReadIRTest.Inference/Op=ReduceMax.1_Type=f32_Shape=static_IR=a3b350b1516cb0391e088317ea67433757a08847710c4a4bff143922873208df_Device=CPU_Config=() +20799:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=f45b24f3bf21a2c94bc89cdc3d20c283d47f4e6ea386444897330e232bd7d90f_Device=CPU_Config=() +20662:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=355bfa53a1f9e712db4df6642a51970e96e3612583b2ec90e7a8170e45b1625c_Device=CPU_Config=() +20314:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a76c4cc0a1f2294a3ceb18dd5d214d842cf37c08d2e34770c66c29b44ee92e48_Device=CPU_Config=() +20067:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=df19449b7a95887e834ba16ebf2e1f08416d6293686a6cb6b6cf39fc82559595_Device=CPU_Config=() +19334:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=65a5483c793396983edaf7f2cc2c13898507525bd84a8469e97b2d662b5df782_Device=CPU_Config=() +19317:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b91f26a0b7b56224c507de772631016119cd0bc3fd49527013f571e2db477402_Device=CPU_Config=() +19108:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=50a0e83d438a3220ed14dd8ae783e92c96381f645b10719669054ea944297244_Device=CPU_Config=() +19056:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=99866ef63c9a2e7e2d9b7f00d11a4c177775bef9cfdf074e83f56318c143e6a3_Device=CPU_Config=() +19032:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=7b1df1422bfecf1fdf9c25f72d938950cb1492ee1c7223d9c0d771f93b1fbdb8_Device=CPU_Config=() +18804:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=5adf6fcb72c0d6086a95fbbc5744e7d02dfb32490e0f42c62b57bc98489b801c_Device=CPU_Config=() +18330:conformance_MVN/ReadIRTest.Inference/Op=MVN.6_Type=f32_Shape=dynamic_IR=85a35059512fed9e0c70cdcbd5e73c1e247ef97821d5193cbc4f7f7c3ebbaef8_Device=CPU_Config=() +18149:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9e0cfe97e08c7b2974ef224799ccaa3fa777802a5fd320a089e527f00a594dbc_Device=CPU_Config=() +17854:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=67ed6a8048424f4e44f40c542faf7a2a2d2419e81aa982fe32a054af05caf309_Device=CPU_Config=() +17283:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=89dcb383b48e2a4423a7c81461f282b74b1d9ab0f48f0a0427cd4c599672f3fb_Device=CPU_Config=() +17094:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=cdc57df56ccf890a00f886c3b83f504d24ea9d4ed5f0ef05f1189879172777f8_Device=CPU_Config=() +17045:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=3374f930d0ffd26ccd7cb542638f2386ae5f803b5bdce4d848ba1e93b4a173a8_Device=CPU_Config=() +16722:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=970f3f48203f3bd46dcd6ca55ad20f5ff8ad2426c3f6f74377759fdddaaf93cc_Device=CPU_Config=() +16608:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=07849f3111a0f12a712cb0deb7ec9c4778e70120385bdff7f17c1af30e31062c_Device=CPU_Config=() +16595:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d2d4814c8ab7cbe5107a556fb3e73998aafae0278b0d304fa07fc4ac9fad4559_Device=CPU_Config=() +16560:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=42fc9a4f8909a26e98a1938054417339bbc3c21668dfa2792da78fa1ed8eb49b_Device=CPU_Config=() +16417:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=6017d3f7ee3d7e667e8e7e4881f9aae335d47c8617c92b18ec370aa0770314d9_Device=CPU_Config=() +16215:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=9360fbacf32f2208bd7f241535752ccaf434551d16bd8fd46d0422cd1cafc3c6_Device=CPU_Config=() +16210:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=61f6b4fbde686888b82614a5d24cac53e835377c4cfa791ace3f3cd3f8ac2dd8_Device=CPU_Config=() +16028:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=37ed85c113d481da6d55c0a820d49090a8b256694e0f1b111feded60fe708279_Device=CPU_Config=() +15403:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f9b090cbcb19663630a1490fe18357b752e430ad793c0e3aaabedcb74ab64934_Device=CPU_Config=() +15230:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=b9581fac6848b0c6c9fc9af5fd17eca3f2f64832fb7205f97684f1cc4c1985f0_Device=CPU_Config=() +15180:conformance_GroupNormalization/ReadIRTest.Inference/Op=GroupNormalization.12_Type=f32_Shape=static_IR=3e0fb4df6ea780921a8ef21a06bd602e97f91baa201096d438de60e9114acfb1_Device=CPU_Config=() +14925:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=29eeefa6ea54ff2530e2e17153db324026e85d4e45432c053ca066699187bbc5_Device=CPU_Config=() +14748:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=8de274a65748ff76511a53b614cfb33651d2b51720714851a16976fc1ee2b6ea_Device=CPU_Config=() +14304:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e23a8faab46e1096894a906794325ff1a8c6001d3b980aa809088385675c77ed_Device=CPU_Config=() +14151:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a0f8789f0f95beb6f28efc829bdf2f99d34a3e9397ad1a80d7831aaaf125b5eb_Device=CPU_Config=() +14057:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c80a104f5912c91154ff9731be5aaf1ce189988eb9689ebc32cf4bb8f1307615_Device=CPU_Config=() +13911:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=7caba2dff8ab10660f66796a39d8d2a78f3e282f0629c2ecbee9b90c34e62aa0_Device=CPU_Config=() +13718:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=fcab2b4b3bf1a04070e3fd3490e6317f2d6870335d302d96c768f40da8565c8d_Device=CPU_Config=() +13664:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d9b3427efacda497c4fb86cebe89023b322722167d0c32de8a2602a80b23580b_Device=CPU_Config=() +13656:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ae0e669fbddc34e8aaaefff248959e3fe53196e68bc1b3a9e66be16a495d7cd2_Device=CPU_Config=() +13630:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=59bac5d30704b81def0385b29fb8d79e459a71b9251b4f6e94116524bd9aa7be_Device=CPU_Config=() +13600:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=78a5e7f340d63660dc0710d0e390dea2d3f68ac98f16e8dbc11b4c28ac0440e0_Device=CPU_Config=() +13576:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a1e0bbe02c433cb144b4825a9f1b2c30c03743f210830db5462736850b6db383_Device=CPU_Config=() +13571:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=3866cad522b1a4da567b64df204a69863faf25dd6e09f85dc5806d3101689458_Device=CPU_Config=() +13536:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7d3a099a5040e70c73014df347c478d0976123d68b6fcab6bf767f90bbdf8e6a_Device=CPU_Config=() +13455:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=2e06088cb191d8d26309843b1285b9ae4a1eb0722e1370875edde7fd2783851b_Device=CPU_Config=() +13422:conformance_ROIPooling/ReadIRTest.Inference/Op=ROIPooling.2_Type=f32_Shape=static_IR=1a0e3f63698678d2e6bb8968fbadc98227d9ce548e77c53021412d80d7711753_Device=CPU_Config=() +13340:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c39e4c1d9cbf5b8730644e1686cc09f36f7e4a4b89cadaf8d8902fdb27993a7a_Device=CPU_Config=() +13301:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a4772901ff77541ae624f89db89901c7d5a502a0dc5d1e0dc21eb8e08c599525_Device=CPU_Config=() +13259:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=776ce5493890837f137a7abc7851ff04164468d7c13ef1022f73f1f68e058c1c_Device=CPU_Config=() +13216:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8ad9f9e5cb26eb922d7d7d80f93be2e9d3a5ef344a013c9dd546df2ef195ec24_Device=CPU_Config=() +13112:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=42f3f3a5b34aacb93147f9c77ad5709cf7436ae8cad9318434a9b6ff6852982d_Device=CPU_Config=() +13108:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d46034925bf5b01e31b5a57911fe30f5dd09a8712432312fb1efd844e69913bf_Device=CPU_Config=() +13078:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0fb6a7848271e000d49d4966647edf55e65f181523883089f43147c14cfb9871_Device=CPU_Config=() +13042:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=dynamic_IR=0f5965e2daa2a1f6b050813850956d9a4bbd771cb234ec814617099e1541ea0c_Device=CPU_Config=() +12852:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d4b1dbc565a45f6c9f60cd4a73bb15c0f9e05baadfd3acdcd5e133d782c54cbb_Device=CPU_Config=() +12629:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6d71ec3285f12c65001e4396546f6c8c02215560675397656d85777f0c9c2644_Device=CPU_Config=() +12604:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f8795aaaf3fb96028b8cdcc963cbdff4c3362d78c4801af4172a73a3cd843edc_Device=CPU_Config=() +12589:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=e77dc4aecdbd4ab3d67fc3c1d9e350a9d259af1d4c0188d680121a31c6ed8ccf_Device=CPU_Config=() +12588:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=5f45e938f9e6d58ccc6bf771049731f2d9c4a8b0ed83e2a1942ac69ab76984b3_Device=CPU_Config=() +12584:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=776b4b6d6b102654bbc08df901869e4d16af505a5dff7f2d27686874bd20ccc1_Device=CPU_Config=() +12561:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=beb6b43d96ce20db13ecf6abc53742fdc20d2221ea66af01e3c945348acf9bd4_Device=CPU_Config=() +12431:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=3c0b9fab07568e0eebb5e5d068cfccdd617ee6e98e4253a0461ea8d3f0f582e8_Device=CPU_Config=() +12372:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=7201a55d869ac6072af38ff89dfac3cfd2e6720d25f7607c6cc5f80040a8e82a_Device=CPU_Config=() +12364:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b2ca18b9d9f9e7c05f66a1f197b65ef9ca1d59319ed5f30d4eadf6f8befcd9bf_Device=CPU_Config=() +12352:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=008176749f0b2cb46830abe910865d8cf1974cd62902ce3e157a03df2b1cf9c3_Device=CPU_Config=() +12342:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=243bd2256612673dd04651521ed8d3fa4087c90af7b85e1a4aa381c074bacd47_Device=CPU_Config=() +12328:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d8fc538fc2b9ca150eb22763e4c7416c002b5c7fa6481314201540178e940a78_Device=CPU_Config=() +12304:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=28bb0064e4cb56c497227ec69899b08dc09cccbf7d390555416aff617a393f81_Device=CPU_Config=() +12224:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=225aaa01462e6e43c0c12cff65f96e7d9c07d368a820ff3c1b2939fefe86d492_Device=CPU_Config=() +12068:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=6e508ca44667fb311f5b6d634584d2751c3fb15fc034626765c90695b7de9619_Device=CPU_Config=() +11931:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=64efb6dd46c36bec02b92148d178bc032417c8c2d999ff7b0a24ba08af365f91_Device=CPU_Config=() +11786:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=83e2d01e24eebe910418ed24fb506852c37576ce70c18d27de197f675f49c9d2_Device=CPU_Config=() +11515:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=eca24a51b737307a94a918f4d03923c1e035a3379c73359515c63ff3ea98be85_Device=CPU_Config=() +11406:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=780fe1f9a82f728f88511b2d8194c4f425144ffb5ae4aaeb1ce90c6fdea3362a_Device=CPU_Config=() +11018:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2f82842bfa510be994f17d9c00d43b6f67b232b3a41c64ae276e243610d927d9_Device=CPU_Config=() +11009:conformance_LSTMSequence/ReadIRTest.Inference/Op=LSTMSequence.5_Type=f32_Shape=static_IR=981b213f0fd1305e70515849fd08553471da63e6bf64827a47cc475fd4ed9561_Device=CPU_Config=() +10912:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=e77468c2881ce0c38c14038151d560ccadc7dcbd5eb5f21b68b8e227c89813a7_Device=CPU_Config=() +10891:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3b3a5cbc6a255792eeeec698aa5a90947164eab96ec744ada9d02b6c7f453f8f_Device=CPU_Config=() +10877:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=f61b45eec10f28e255a0f82842384e1c947830dc5d5618bf00c6385cecbab8d5_Device=CPU_Config=() +10736:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=static_IR=9ca1360242688f494c59b8eb1073a4bf7291ee7b2ff460380bd47248fc591dc1_Device=CPU_Config=() +10405:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=e03d85019ea733c10b7ece4721036f3aeae2e60179d9b044d34e862608fd36a1_Device=CPU_Config=() +10289:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=5beb9762188e985c9554ffb0a05fdc1608fb7d970baacebbbd7118186a324617_Device=CPU_Config=() +10200:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=64551d094520cf00d40fe514f573f5f37f61416bd456474f4b0a21788c4ffd3a_Device=CPU_Config=() +10072:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=eeed611756b048927c290a65dd92a5833ad66d347bbc772abddaa751f2016ff1_Device=CPU_Config=() +10071:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ed872c2ef0d35af97e7f9be84d83eee6d42f2fb279b71f4feaa1aecefb450a28_Device=CPU_Config=() +10009:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0e78ae14fcef33de9637ac99e87f672b3247ea32c221a4b555b2e5bbdff88788_Device=CPU_Config=() +9912:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2a819b46a29c8bd965ec330a28b5c163dd0a06fa2717d71bd16493ad460e8dad_Device=CPU_Config=() +9886:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=eb98c3593d72ffaa01de42caf4832854d9486b4148c57742c6dd72a251f8cb45_Device=CPU_Config=() +9770:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9465b2ea76ea3be1365dfe1255524d4ecce0dff6123e929a2157bfc767396b0c_Device=CPU_Config=() +9748:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=cc5e06594accd8694073f3ebe702319fe0711c3b7d4db5e06072d83eeb7cb096_Device=CPU_Config=() +9638:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2f96ff03126561aa475067ad88e454b2da78fc8f0b816dc6c01ec5c81568288d_Device=CPU_Config=() +9593:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=b0a418fb8ec50f25147079b3aef1b13095ea626a9e52a643600c39972982ff9c_Device=CPU_Config=() +9571:conformance_LSTMSequence/ReadIRTest.Inference/Op=LSTMSequence.5_Type=f32_Shape=static_IR=f36a3f626860d7088b33d97a5a6ce009c89609c142158b256aeb6b5e6dac02d0_Device=CPU_Config=() +9501:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=562ad06104aa1fed1781e5e3438d71855e1ee7e0126457f2d8d8d415f9c30c03_Device=CPU_Config=() +9244:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=80bc3dff7b0901463ccc52bd8e4a8e7522b1e9768421de45e63bdf8db601b9d6_Device=CPU_Config=() +9202:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=1a0639f04a426db13dd7cfac918ec6e2254e1cb8f18e0853e3bd597cdf090421_Device=CPU_Config=() +9200:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f166c58732107cb0c82859af62b8fc0d3d144468ab66ff4615a1eb4bd325d3c4_Device=CPU_Config=() +9130:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=27a43bf8c20a81f1e244ace4c53f7cd9343a2603ba2c8b50bb041a4046ae6ecd_Device=CPU_Config=() +9084:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=4a9237e5cd29f0d2d5e738891752c6f6b29c9dc4c29d130b9c9921ad5787f819_Device=CPU_Config=() +9007:conformance_TopK/ReadIRTest.Inference/Op=TopK.3_Type=f32_Shape=dynamic_IR=fb3cc70d8993f96508516aa7a36cdcb9973edd563c78a7d6d5ac5ca9f816e3fd_Device=CPU_Config=() +8944:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1b59316585dcbdfdbef9fd71e2681207498cc867a2285eff20d125c4fca0502c_Device=CPU_Config=() +8850:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=b7973bf8dd344289b971d9b47575d6793643f503e13bb83c4e9c2a2863570b7a_Device=CPU_Config=() +8743:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=57b104f3a1796c31d59d676d9f6d65789ed72fb21beb382bf418c452b8452d27_Device=CPU_Config=() +8743:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=e020cc29b6ec76cfac0e0b52ed3024458fbeb567c4fe9932eb5257e3ade79b95_Device=CPU_Config=() +8732:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=d671a241de6d46bd5562def47a92602d2c9ba076568feed303765168433ee89b_Device=CPU_Config=() +8615:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=d932ccb58823509e768be954dc85ef1162d9456db17138d650a2a883e31b99ed_Device=CPU_Config=() +8601:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e6aef819fecf54f7c98b15f330074d33ea0ca6c8fc3edd895b362f04449b12a7_Device=CPU_Config=() +8548:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d9771ac46751569172412bbd4495eccdbac435f78a97f8fdfffa9215faa74544_Device=CPU_Config=() +8477:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=95ea118f8053f6bd18c8f34bbc475c00921bab5dc3af177492829d5cba16aa39_Device=CPU_Config=() +8475:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=90cf12798b53937dd1a31daebe5444e1c10c27c5a67fcde6dc61b5feb1df89ec_Device=CPU_Config=() +8409:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9b4725171957a420a98f908742f18062fbcee198871d527ab5b4d939005ac4e6_Device=CPU_Config=() +8390:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=dynamic_IR=a9636e6e43bc01f8b1cfcfcd8e60e4ffba20837d0d3b80429c93f23cd8da89e0_Device=CPU_Config=() +8203:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8eef79ab2081a12ed39f5c6f8f2e917d14685f54ccd0fcb0e19865740ca7d608_Device=CPU_Config=() +8040:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=fb5525d36d14f54eebc5670c06232ca4e32cf920d309b5777e37d3377d386433_Device=CPU_Config=() +7779:conformance_MVN/ReadIRTest.Inference/Op=MVN.6_Type=f32_Shape=static_IR=2a9ba5f3e5a74f05be93e288553139a15242f1500e1eca8317dbd82ee8cf00d1_Device=CPU_Config=() +7739:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=c161ff64d4c506fdbe44d0ee76042f958f5dfce778833653628a026de01a3f9f_Device=CPU_Config=() +7687:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=aa6c3816ce7ce49f40be5edbe957468e80910a8eb5a3956f54d89fdf7c264b44_Device=CPU_Config=() +7640:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=183e5203c7008618a9cfb2680265bb3f588f80c2493bf7fac92eb258e66da2cf_Device=CPU_Config=() +7580:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=c4e2668f98d5c21fc085695c9b6037f08a1e6710e1854fa73b7465a618e99b95_Device=CPU_Config=() +7528:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=5976ce31ff4cf399e87efd691dce3e75dc2de962241a84c09538593c9865b257_Device=CPU_Config=() +7446:conformance_Tanh/ReadIRTest.Inference/Op=Tanh.1_Type=f32_Shape=dynamic_IR=8c78da5f8bf9c1a4cd7f89cde9d61eb6500fa10ea0454e36a585466ed97fb12d_Device=CPU_Config=() +7434:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=26d09bb7dc7ce95aac39023ac90bd083da9101b9e7383af49e7467e4f0571f2e_Device=CPU_Config=() +7343:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=95d9789ef78c733e0c7972738bafd4da289a90f0d9ea00bc9452192173390b6f_Device=CPU_Config=() +7306:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=004b6fd9b060324a42aad296dcb21f5b7eb7586c082f98d23f25a6d882f70c14_Device=CPU_Config=() +7292:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b3e45847dae7906b7f320b6a751727593b35ad8659ee80a11caf445f44f392df_Device=CPU_Config=() +7276:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e2734d3e803c031e2fd56d0c9f7a72818227bc7981d9f7d9d1148f1cf07135fa_Device=CPU_Config=() +7191:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=e894ea68d25e2a0af6fe2e330929c3305710bd07aca8e099b727df78fb26cdf6_Device=CPU_Config=() +7187:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=static_IR=f76da5edfb7a9e3fa7cec034fa43307bce74eeb0629176ae5dd40d154baf858f_Device=CPU_Config=() +7172:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=5a1aa66136ca2be83f714067139e11fcbf672d73f8b28c57d29333b885a17f83_Device=CPU_Config=() +7058:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=65493d18baa6530c757031b74c5fbd51757e2b04bb79149d3acbf6c40bac11c1_Device=CPU_Config=() +7042:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7a3cae38e357ee1e5b0400c7e1256cc8a2d78da81911fbbb3ae6d9e510d78aac_Device=CPU_Config=() +6976:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=d8432e7d31bcf4d95ff7ab845a6858ea67cf751c7ef0fca60a9bab1d187fe3cf_Device=CPU_Config=() +6865:conformance_TopK/ReadIRTest.Inference/Op=TopK.3_Type=f32_Shape=static_IR=a56b3f758c88a5723e4a2cf04ce46c92681ed7fb0d6dd7f4d5b937dbf00b0eff_Device=CPU_Config=() +6823:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=static_IR=ba1b92833f2c8734c5178762b6cd8c847c23027ecf79ebeba295c39b667162a1_Device=CPU_Config=() +6737:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=5953b8e79f64e33e67dd330999ff8e3d8391c8f3fa7eae519b117b1273c8c19f_Device=CPU_Config=() +6702:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=dynamic_IR=fbb85f74ecfa0ffc50b9e6ce637911b406f1fd6ad054a886b9c6ddc6bc898739_Device=CPU_Config=() +6699:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2758266e894d04cd7283921f678a468cc1fced81d1a09a3c95add3ed9e5d6719_Device=CPU_Config=() +6606:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=a7242174afe3f7c2e95d31cd14d56ceb0a566e2e8d65ba97e07d004200f4f517_Device=CPU_Config=() +6529:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=6a7aa747b98a21c0469c7edf7ef78a050e1279d891b0c69ddc071befafd42c76_Device=CPU_Config=() +6473:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=e2ab1cf295df4df47d43e632065bf8a48fa58e6f3a6d1bc971b45fe97a66652e_Device=CPU_Config=() +6464:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=static_IR=a3de81c04a0e7d5cab275045415ab4c294ed3270588c2ef704ab6db5514ed0dc_Device=CPU_Config=() +6429:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=dynamic_IR=166d8442037dcf0469f0b14ab83676b30bce53edd79494c52a575e3744920c4d_Device=CPU_Config=() +6403:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=6513dbb80f00e325d6dfc953d1208c5834199f75a60430fc85925ed6eb0d9bb5_Device=CPU_Config=() +6355:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b2931a4972ae4f946778af45cd5824e6958dcc1fc79cea4da1032590b2663d16_Device=CPU_Config=() +6348:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=138e0258106faf2065b52655adfb8b45d49b677f9cd04850bc5ac9335a9d16d7_Device=CPU_Config=() +6336:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=afd856f31f3a815b84c34b66e1ba0a70a313301ce82fdccc2f1b779ad3157d4f_Device=CPU_Config=() +6263:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=3147f462ceda9b383de633ac08d6014a7779e74b169d3745990fa2b2799b1dbd_Device=CPU_Config=() +6239:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ba28829f211d64d6d4922682b85f1bad6a3c28cc30b4f9651186b1e8fab39fec_Device=CPU_Config=() +6225:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=90f981f067c23b4fd3d2df838af8e6d11ae1c5e9465b566501628c7f3d63674d_Device=CPU_Config=() +6189:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=a895a5053f72560fa5e36ce8b68a8de0cde25ddc1152cb1f647211f1b570d172_Device=CPU_Config=() +6117:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=3c7c072c9e4ee694e049a5f256cf0e72caf85384291ee8d399ce136d22c575a3_Device=CPU_Config=() +6110:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=9fbf4ccaa68a81191afe2432a2212ee1a559df380d602459ebd2d0266053d82d_Device=CPU_Config=() +6091:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=93dee798d72e36c04cf60499e95f84cd6b63d84226d7dd1dc0edcf0875cf301f_Device=CPU_Config=() +6035:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=aa14d6e18f8580015dd7d32b167fba6ee137133b87fd617eab4599f407a51b69_Device=CPU_Config=() +5864:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=deaa5ef98e478a5850df528107031c9c7bfa6305bc7507325c91b98f9337b0b8_Device=CPU_Config=() +5855:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ea8fff2db5032f5015f68d53904354d4bdfbe5288224c7f549a1573794455d80_Device=CPU_Config=() +5789:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=abd733caa05592feccf41344f138de6625efce4afe605efeea57e0748d7b2e07_Device=CPU_Config=() +5767:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=8fdd77d8381b78b82c04360bc3f05a358bd690bd8204e2cdaa2c0a65bff61a41_Device=CPU_Config=() +5758:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=0a16d9d3d8d553c2c747b2c68b12eee3dcc016e29e597992cad8f83aff0aa759_Device=CPU_Config=() +5752:conformance_GRUSequence/ReadIRTest.Inference/Op=GRUSequence.5_Type=f32_Shape=static_IR=556de70b55386fc9a264a24a9000d075a07636de6461cc5f4cd41af639b0597e_Device=CPU_Config=() +5668:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e00cea4f2ea99f32c11ea265ecc0483554192192bb99f36438dd38de09820888_Device=CPU_Config=() +5656:conformance_GRUSequence/ReadIRTest.Inference/Op=GRUSequence.5_Type=f32_Shape=static_IR=9f7a30c4f90df2edf8e70468ac22f325bc97e99613fa6ee2aced93e71ea5896a_Device=CPU_Config=() +5641:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d13d862f7b8306948676388381950639ef433dcc4e38f5a6fa8d50575d1aa814_Device=CPU_Config=() +5611:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=4d2e12e00779d116e2192ca77f2be233d76bdd5ce366ddabcf436cc205a9f811_Device=CPU_Config=() +5600:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=033c6bc337d14053ae097dcbee99ef5de7cb7728b589cc8d64783467505a8ba7_Device=CPU_Config=() +5544:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=90deb33b54746ec16cf8594f8aa0792c6aab2e27ff12ed97523da583402aad95_Device=CPU_Config=() +5535:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=static_IR=d93633fac99f9472435ede6fcdb9c72475b68bf1352d58b33e8cbdf9ca74ac50_Device=CPU_Config=() +5519:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=aecc8a062c16343ac138f351d774858b523e42d5a09ab67b1b61e64fe62e73ff_Device=CPU_Config=() +5412:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4ef9d8687805658001fa7650e660620d74bab09868b356603c268bc8cdf7a5c7_Device=CPU_Config=() +5407:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=5b1fc9693e4e947bc88a88bf1ad22ee2f59c13bf291626eec3e8ed49b0cef7ed_Device=CPU_Config=() +5341:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=4e14d87b7667a7900d4427ec46c72eb3c7bfd2e3d86e5bdf92eb2485059b4951_Device=CPU_Config=() +5324:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5288d099588f5876e907c5cd750c9f0b2191d1ea060881e80af1006cfad259ac_Device=CPU_Config=() +5305:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.1_Type=f32_Shape=static_IR=2b1509d227d4c32fee4bb0b7ac59d4ecf5018afce9fd19714067a20d01933455_Device=CPU_Config=() +5304:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7b42d3a61f732f3639d1ae7011b86158d070acc922308a18f00a01b9c6a60ead_Device=CPU_Config=() +5297:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=f0472c0e5ff8fb82651424269bd9f77e73eff6c43c70b6192f07303c0d35db8e_Device=CPU_Config=() +5295:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=4212a754718adff414309fb1da18c4361792b5478366bfdc0994490c7bc716e3_Device=CPU_Config=() +5258:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=94de295ab12bd6b03bc5de22f9e9c46d5875d111eb942d3ba35f8e2456ece1cd_Device=CPU_Config=() +5190:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9beef927f57c512d381a87a35982fe4ca7a00b9a9d50ede54f7baecc5ec7fa0c_Device=CPU_Config=() +5165:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f83f2d7d9c08aaf30635b39b51c0d7f1f622b4624da59c6cbcdf28d42470f11d_Device=CPU_Config=() +5099:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=ff39aa885f7ecc22a06f668b79fef4ac41b3adf8dea82f428711b241c0fa6059_Device=CPU_Config=() +4982:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=06069a23d29d9bb6910729ac49ce1466e4fc6185c6ca31fa54fe7dd3289c41f7_Device=CPU_Config=() +4949:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=dynamic_IR=516ad610612780fdaf83c5dc151316e83772eda4700882f934c97b2a2bd86dac_Device=CPU_Config=() +4872:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=dynamic_IR=791be312b2af6da6abd2eadadc6185c7052271efbcf314bb678828313fc58414_Device=CPU_Config=() +4839:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=3888863c4f725445490846561b2aef4a5498ef1583903b365fb864e387eb9641_Device=CPU_Config=() +4816:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=3421ca968a9f4061cea0492ac3920fe1a29fb35093314cbb56a78bbb136d8fc7_Device=CPU_Config=() +4804:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=1f8a096633b64512b865ea5e4a57529cbf621afedcb873285bd5e24cdb199a46_Device=CPU_Config=() +4773:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b78ffc69401084763d529e2aee12f9b9793bc92be3eca3df2a97730b9a252ce3_Device=CPU_Config=() +4760:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=1385af2553c7c9b0f9ce2aa4345d8b767d36136a9cd8e2acae79d4970d6b5c8b_Device=CPU_Config=() +4735:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=ee1f9348ff09a058dc09cd63581663590521d463d14b785a23ccd3cd28110b5b_Device=CPU_Config=() +4722:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f4b78bee713f23abfda124ca92d58828eeab6118710d93572a491cfd85cd05b4_Device=CPU_Config=() +4686:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=461dc8aa282946831fdc86d1c024a273ac0f29f5ad615cd55b879feea6d23007_Device=CPU_Config=() +4648:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=92f5c3aa4427a89ad6ef275c0beb2139cbd0c6ce2eb71205117448adf592ad20_Device=CPU_Config=() +4564:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ede6f5f8a1d9bcfd1979965f575c8f267870e0d6a5d3a62d229ea029893525b6_Device=CPU_Config=() +4488:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=b16650eec74ddd46ff3bffc9eedb340b6bad99a338fbe6b11f7eca3098a324d2_Device=CPU_Config=() +4451:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=588ef4d887ae9d8ad432525108c81a9762dc27490a3e01d3e86795c73275148b_Device=CPU_Config=() +4450:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2c20f6aace24bf601953b848c173ad475502b91b667c903638acf41fb9a67d3a_Device=CPU_Config=() +4427:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=089d73af2221696ce3755a9f33124c9af87fd3e860a1d4f229995eb01ff46703_Device=CPU_Config=() +4407:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=fabbe8bc510b0d1afb64f2fbe68d755be05fdccfadec5fe845dc6b3c4e6a2767_Device=CPU_Config=() +4404:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b706dc1dbca4cc6c308f2cadf799fec41a8b3f08251de3a58444f0d760994cbb_Device=CPU_Config=() +4352:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=92ed2f40e1ecbb9a90904cfe8e8ceda94f73154a44ac28a50c0d7acb221e8835_Device=CPU_Config=() +4351:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=29bb3b751638e157d0ba7114cc0e156a4b792a9dbb2bafa3ca124516595f01a2_Device=CPU_Config=() +4332:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ffc3cad64b8bf82ffa4d189a247a9434e71886cacd3582956c5dd98921fd2141_Device=CPU_Config=() +4332:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=b050ebcbd31acbbc43d657d87a54415e0e52d3e91fa95b57aa1dd0451a5bf50f_Device=CPU_Config=() +4321:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=95bbf8a23b19badbde31e9ae7f016aa436d50d797f59bd736e220030f645bd9b_Device=CPU_Config=() +4309:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=57921f181e48af2b294b923633e457650e5ab2a9ac7f5d4d07930974ad5e03e1_Device=CPU_Config=() +4308:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=66a4d902b67742a95e2d41d79b9d2434e57a55c168a88049624a0ccb62df9ca2_Device=CPU_Config=() +4289:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=491b849a7ce8fdb2190df5415fe037ff02fc23814efc520c343e872f539d6e55_Device=CPU_Config=() +4284:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=dynamic_IR=2af646407076eafcc1ed2d628158fc32eac4ef2fb34fb967962c06f81376d61c_Device=CPU_Config=() +4256:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=1f6be1a43c786bfbf35baad6ff643b762e9d63c069c884a69b4ec6e89062ad7e_Device=CPU_Config=() +4240:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=15197edec77da431c491f42f64e86a811d89a337bf44615824226425b1c64d28_Device=CPU_Config=() +4208:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=3a3886301663fd20cf2c8c0f74c11d80dfe8b74ac39e41652f0eac1ec9bfa2df_Device=CPU_Config=() +4182:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=9281a7e3ea8124fdbe416d1f15434752a7e799fc77a63be64babddf60b6f2d8b_Device=CPU_Config=() +4119:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6453d2955ad3344d5e021f97d71691ddd7c27ffc0d9044b724c9a6b5c20cb427_Device=CPU_Config=() +4086:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=71c0c7e3176ae8b233352c89d47a61394cb46695e7879118ed02070a4a23d5e1_Device=CPU_Config=() +4070:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f69e74dc680137ec5ef0b63e38d451da7bf1b61d2acabab77df46b76c9777402_Device=CPU_Config=() +4055:conformance_ROIPooling/ReadIRTest.Inference/Op=ROIPooling.2_Type=f32_Shape=static_IR=556c6863ca3b12d255c4c81d92b4573203f02c5588e064fb22dd4aa23c8283c6_Device=CPU_Config=() +3943:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=07b4c5d05754987a0524385690d79f74988302f437597b7477770e8d062d72a0_Device=CPU_Config=() +3930:conformance_If/ReadIRTest.Inference/Op=If.8_Type=f32_Shape=static_IR=e178ca7afdd75b09f1ee18e50afd30eed0740497637863c3397b5a75c0f8bfd5_Device=CPU_Config=() +3928:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=6eb80b60b2162fc469f652535ee11822ae34c903ca44191dc95ad7f9678b9337_Device=CPU_Config=() +3867:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ec19939673cc58f2511ffd6695a3652f1d724872b0db958a6d667e1e87002b21_Device=CPU_Config=() +3853:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f6f3c6d199a224ee983f6905aa4f72ea4138e6076d7307c72588dda0cc9c6ed1_Device=CPU_Config=() +3848:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=45ce409a7078c7e732a092633cee36d6a0aa80fa9249cc98dce44e5b4bfc1693_Device=CPU_Config=() +3830:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=2a3d6c0476c17897fd4cc6d3623519fc033ac4022a01fbebd40b461f414f6c15_Device=CPU_Config=() +3807:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=3d20de7392465c055c84dc20d0af64ae6d14809f5a6e4bb05e315a2654066f93_Device=CPU_Config=() +3763:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=20af9ae4466332a072f3b04c1219146d272daabf2306b66c755980bfd31f2a76_Device=CPU_Config=() +3683:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3e2e16f3ba7681bebb6b4c06788f38a40fe24e26fa3ec3accd756c87bee7d62f_Device=CPU_Config=() +3668:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=3f87262a458b0dd0a330ab0cfc48c74ee687819228d3e2e1226df3b02de26afb_Device=CPU_Config=() +3652:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f8408a1d4e8c11ebbda01e0431217a5ff4ac6a869cc4cd3208cc9adc59d227fa_Device=CPU_Config=() +3636:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=125ec4e4ba4158d3a6d1a7725cda9a18a220926d5ad6ed623a1433688c79b579_Device=CPU_Config=() +3623:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=f32_Shape=dynamic_IR=dac2c804cd13d69a51906319a3648ac0edd87764c686c99fb47179f379cecf7d_Device=CPU_Config=() +3583:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e0293184207036f6016f557f8df813c6536b18332f589245c5c606a3b36df1e4_Device=CPU_Config=() +3572:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=3fec5c6f9e39d8a15d58c5800a889e1660adb375cb7660af1526cd31e69f7cdc_Device=CPU_Config=() +3570:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=69c68c20edefc8789e62a7cc8a0f8fe7e649f884649ac30833fb5a2ce43c4098_Device=CPU_Config=() +3546:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8e80bbd29743e87a0a6d4158a06249766b6a9cf424cc1c0ed3c6f60e30e6db58_Device=CPU_Config=() +3534:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a4b3740eda9e6bbd3968dd39e6abb33b22a90a811298df6a761958216acb389f_Device=CPU_Config=() +3524:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f3b3afbedffce0d70b40d78f882a0061ba05e26e385c37cf902aec88ea43a649_Device=CPU_Config=() +3517:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f89a1dfd0ef8b50a998962d5a4f4b54451ea4c533476a2e3d42c04e9e645afaa_Device=CPU_Config=() +3511:conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=981b213f0fd1305e70515849fd08553471da63e6bf64827a47cc475fd4ed9561_Device=CPU_Config=() +3489:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=945bd465761a4d9b013b0a5e88a3a9e041d8bd8bfa8df8044f28d71ba26f224b_Device=CPU_Config=() +3483:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=969c6a651dc204576d68d7d893ad2dbff1f7c74803b1763857d41aabdd19a72a_Device=CPU_Config=() +3471:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=0a7b1efc8d314c5e37062e482a9398f718082ba0528c6ca2d2f6c88e7a4a2bb0_Device=CPU_Config=() +3450:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0e5b8f44656b680d14f7b7aa3293d8933ebfa82524d6acc09e41d38e8efda726_Device=CPU_Config=() +3431:conformance_If/ReadIRTest.Inference/Op=If.8_Type=f32_Shape=static_IR=If-8_707_Device=CPU_Config=() +3363:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=81c2956d325aab4a7bfd931d94151e1285083a15326e0890f861b97017a24bb9_Device=CPU_Config=() +3356:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=1d7cabddc96cb4ca2ed111c9f7a9c31b76ed9a052fd0b79db6bdc8fc55f24a4b_Device=CPU_Config=() +3354:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=86d8d42c30e423e801b5d4d832f87cd6837bf9feb3c546f5bf87e04f842a04f1_Device=CPU_Config=() +3352:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=d56533ce961113b2ca0baf02f3ff9f8ff210264343f6bebf26418a35ecf36b02_Device=CPU_Config=() +3348:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=f01fb29e8f5ddc7562e954e46b1d2bdbe6144d6bbe2ed2a0f16610f2812ac721_Device=CPU_Config=() +3265:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=1d8577d7a316c5a2726f3be79b4f8b22d6dccdd5491a4c7896a7c9de37330e77_Device=CPU_Config=() +3246:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=45b3506bf3dbe053fcb290dd1040a9d125c56086b37223e8480647bdd9b9372d_Device=CPU_Config=() +3244:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=87f3815fd73265960ef5910a3b03580b13e96d02784e159a0bf0ebc30bc911d5_Device=CPU_Config=() +3243:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=978c328422e3866322f3bdd52955690a47a1fdd47ddb9db66a4707b36a535dbf_Device=CPU_Config=() +3202:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2fc01b66086ac5d8272dd81ab731188b62bbe8920bff1efe61bf3261a3a8b3e6_Device=CPU_Config=() +3159:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=1fe78f5a88510b70fb39ed088e1418ae09155d179afc3a614a641b4e8f86574f_Device=CPU_Config=() +3158:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f84dc200af2852df01662dfbe891b8ed4abb27db6763f3a2b645ab75324834f3_Device=CPU_Config=() +3148:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=3fbff9f870428a19ed434cdf72834eec251edc3dddd149491c94319d63a8438e_Device=CPU_Config=() +3143:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=09c1f9f81a463477da73d33f00d1321fa5c1f64a9c3c51c6e3c1344e362d4ced_Device=CPU_Config=() +3131:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6a9a72aca963de945d97658e484453191cf6af26cd6838c1603299aff3a49a8c_Device=CPU_Config=() +3125:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=77e1c522d9ea4975c3071869b7b485038bb4035c9aae6f5d44291f60ae253a0e_Device=CPU_Config=() +3101:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=00709ceadeb9692263607310765b0957f34a8af1ebd17a13cc28d9587d360465_Device=CPU_Config=() +3093:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=8ef4d7ceb7d904a084d93d6ede1c15a64d2511b3bf1312d630792eb21c591408_Device=CPU_Config=() +3089:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=6b0b123bc93e799aed7bee84e55ed9def25af4f11d27958d8368983eee9c527b_Device=CPU_Config=() +3076:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8dfd99ad2ffed2573598829ff34a62deccbd70f5337c1fec4c2962cef1992595_Device=CPU_Config=() +3075:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=7d73fec5a605ca6fc06cb014fb723236fd2ddfa1820648acb7fdae8530866f45_Device=CPU_Config=() +3074:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=0f670e49f962b0a7abc6b4f1fbf9592db592a6a78eb3e083dd4027b9f9607430_Device=CPU_Config=() +3072:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=cc3619fbe03f9b98ff07babc5c11f9bd9f26927c8d793abc7188595145bd1371_Device=CPU_Config=() +3046:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0edbc14a5d5ac1265a4b880131348aa16e284012547556ddedb36b185d833284_Device=CPU_Config=() +2976:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=fd10c1c5d33aef77d3428fb5c9789f3c2c2463ab9f6cb51184ad37951578320a_Device=CPU_Config=() +2973:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=14f550bd7e83223ffbf501918141376e6a144484865f03c9768fe9da49a9f06f_Device=CPU_Config=() +2949:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=49ed5fbacb5510d9cb3970dee136271e98ad5322b95217c6dc41026e583f3bcc_Device=CPU_Config=() +2939:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=965ded994c427ec62353194906203c202a52dfc0467196d5f1143759fed94b07_Device=CPU_Config=() +2938:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=5d5dd8756ccd01ee77e0c17d26f248c9e35d07aa812dc64bc39ac1ffe17ae585_Device=CPU_Config=() +2932:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=4bb7bd2471752f1a62dc15dbcacad87dd329443459a90dc6768b1a34fd00c064_Device=CPU_Config=() +2916:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=059046ce67f6b09ef45aaad5724e28fdaaf40afb92613740fd058c974a120d3e_Device=CPU_Config=() +2894:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d7a96943c0264427eb83ab413f6e7b0f15f09f83525de581fba582655d0fa4af_Device=CPU_Config=() +2870:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=0c6a844f626f6628628034d332ccb6d520e0447e4b616048c7efb516d0fd87bb_Device=CPU_Config=() +2867:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=c0c1a43608279d8870258be63005b38e23fe5501876c87840cc16a0bb2cf8dfe_Device=CPU_Config=() +2863:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=ac87d885a27bfd348d3f9fad5a03680b73b7198fad17dfdf08675e6e3d239ca3_Device=CPU_Config=() +2860:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=95afe52c888afd5c641ad2d6d0c3f8491f039af2c6938b91fe6fca613ec0b6ab_Device=CPU_Config=() +2837:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=f31f6d969e04a7a1c964c02f107a7291c85067ac31d935921bc418363c2a7a46_Device=CPU_Config=() +2826:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0ffc7d7836be264b6d9f26daa71a8c7100ae6bc6fa1af23614a2736226fbdf0f_Device=CPU_Config=() +2796:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=49245e23b8c1c485428d0e490a687e48c541bfb833eb7838efd8c112736a076d_Device=CPU_Config=() +2766:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=05690f7225eecae70805d45641cd02c02c46bc61f9fa4cf91d3ec7ce94f6fd3f_Device=CPU_Config=() +2759:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4746fb4d92aab20d21eeb0885d35c88abd50aa250298473f5bd143658eef2316_Device=CPU_Config=() +2730:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=45d612bd5bc0895879f727cffcc13c978977a0aa10dfc726d00d6450faeff068_Device=CPU_Config=() +2706:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=05e9fdd5183bd179e5ef996ebcdc53f239900ca46a8122ee8bb1e885c2c091ce_Device=CPU_Config=() +2682:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e27641fd09143d736ece2166cc3156e80c190d5def706b86358f49fe980cf9b7_Device=CPU_Config=() +2667:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=16ccecc11352f2c476db041adea21d67a96e03cf33902b37f4f6855b5113c202_Device=CPU_Config=() +2658:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=6ac457e9181610da9eb4bf0bec6cd53bf3078e0b84df1211f49921207d81c6e9_Device=CPU_Config=() +2636:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f28013382ca254b4538a5527896cdfcd9d404aa854af83ef1d417abcdd781ef5_Device=CPU_Config=() +2570:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=99377bd11138d36797502d82ac9adddc31dfe1e4cbb8bba8684b1479f8a16f26_Device=CPU_Config=() +2567:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=d34bccebe88a4093c9810d56088e4bf07b55bdab1801d7d830360aea1be22499_Device=CPU_Config=() +2537:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=348254d0e2b145f9e5443b4d4470b2ab29487acbb34a71285a5c0e1bd29cb942_Device=CPU_Config=() +2535:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=847ce287888e882e988cdd5bf41277c32c207e38215e1e7d41439890037216db_Device=CPU_Config=() +2520:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=5d7273e7772d3578b3c8dcefcce25913c8e843b7a1045722f80f9feed4770ba1_Device=CPU_Config=() +2516:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=dc4bcacb769fc4d8f1ef4ff20ca7ba6b3b369d69ea3b1c65733d4cbd2cb0762c_Device=CPU_Config=() +2505:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=9ba199e71a3ff06e6bd330e453a1e1103599902893fc267c60da9ae47575a8a0_Device=CPU_Config=() +2504:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0e9ccd2a8aded784ff21758802648777721176f1d112ff60aaf3f150d6292156_Device=CPU_Config=() +2492:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6ea8e16cab0d6f60ef13562706c941f5ba3c90d3a65447ab3844e100cec5a0ad_Device=CPU_Config=() +2481:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=065b3de2617f318d1376e9610f9fa1a2f2fc04292f9a7cc949780ae41d3539b4_Device=CPU_Config=() +2476:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=4819c2459dd2bf875545cc912152c6751ed5db8ef07aba31d3eae6c3dedc7aca_Device=CPU_Config=() +2455:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=254937408e91c70536c4f3b3f81f1a7aede93b29f142631a46fa7d962c531131_Device=CPU_Config=() +2432:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=4ccf5cecf790d27400fb95526a993f8a1a28cd4f3120b897cf45bbe78f087ab2_Device=CPU_Config=() +2425:conformance_NonMaxSuppression/ReadIRTest.Inference/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=d12f2033cdee7e244afad462ca1d9295c314836b593b2a30730861c2a3c8e9f2_Device=CPU_Config=() +2419:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=dynamic_IR=50ebc9636f3321fe9bc87cbfe301c8ca3ea27f56cf429c983ceed6ae63bb3885_Device=CPU_Config=() +2396:conformance_GroupConvolutionBackpropData/ReadIRTest.Inference/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=68853f0b8867d4ddb5eeb239690f1b41600c05f64ee4d3efa8cc828e72b9bc1f_Device=CPU_Config=() +2390:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5e31c7022ed7bf2adff14876be4bbf6562afdc2239a08ddcdb507e3d1a20071b_Device=CPU_Config=() +2381:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=263843a2c307b91ff7d59d9b21cd8b2126e985d787fc18f44df3525a6bfd71f3_Device=CPU_Config=() +2380:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=6e73ec183893b70ec42a4393f3b1b7c55767a14f630eaab0c3e3b6d22c6b8e26_Device=CPU_Config=() +2376:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=1245c8dbd9027cc56d2eeb58e1bd23774ce945522f66a17ecc3c03ca1ca163b0_Device=CPU_Config=() +2372:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ca3d0cbccca665493e85a757798ab5e12399ad295466cea744c7a2d278c86c97_Device=CPU_Config=() +2361:conformance_Loop/ReadIRTest.Inference/Op=Loop.5_Type=f32_Shape=static_IR=7ad6fe3ff1472399c9c0e12aba1db89105e1e4a243cd092dc43ee763a2571fa9_Device=CPU_Config=() +2323:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=254aa036653eace9f3faddc8f2fb69e04ba0c788a2070c054b4c9fc059d33845_Device=CPU_Config=() +2308:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5b6503944921be5fa3feb0b7647c6715465af16702c645dec4e2f2556d8d679c_Device=CPU_Config=() +2296:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=949611ba6617b054b828175c04452b8fcbd109c99cb25d5d8827a872b4044fd3_Device=CPU_Config=() +2296:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=053d601716750db93af5ae01d67213086ed987370f9ff59723824dcd0a6c2462_Device=CPU_Config=() +2289:conformance_Loop/ReadIRTest.Inference/Op=Loop.5_Type=f32_Shape=static_IR=35c61b2251b78ad9f9804bd3f9e301e1f974c6dc138ce0466b8b940d106ddd72_Device=CPU_Config=() +2282:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=1d52baa348f922bf85866fbfaa488c1ca33e01f0b79bd6a25fb430e8b7fc8b06_Device=CPU_Config=() +2229:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=b46a2ee4f7042328b2496382ed2bb9cf39621c3e3e27fd1d355c9682543effc2_Device=CPU_Config=() +2228:conformance_GRUSequence/ReadIRTest.Inference/Op=GRUSequence.5_Type=f32_Shape=static_IR=860decd2bf091a335f6f820b2c6b6acc58618fbb6027e30484470ce899bb1591_Device=CPU_Config=() +2203:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=d507892556310f7fe85cbf9245ddf040b219ec8cfe9c779809180a011caab9d6_Device=CPU_Config=() +2196:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8fa841d409e36b6665e289f4963330eaff4124d5452c93b75d779937cabe14d8_Device=CPU_Config=() +2191:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=4fe6c9c924477957512c3d32086ca167fe5a4ddd5cd1b90d5d32452f6de8317e_Device=CPU_Config=() +2178:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=d066432a0ddac020441582a98f139d063cf5f4e9f34deaa0be5ab9b9f048aa0b_Device=CPU_Config=() +2175:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ad4c3d2d3f258a4be14846d9d26203008e01b2832ff004bb8a23ff05c72747b5_Device=CPU_Config=() +2165:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3d73edb68da4aee1c052b79ffce030b368f204c04bffd9a9dc01a9b54de932e7_Device=CPU_Config=() +2158:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2435ff5e2ac06afcf99563821fa2a2a5e4a9456cb3f74154b3eb364a6f0e450a_Device=CPU_Config=() +2157:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=125693eeea442dd24dd812dd2eaf8d2154274f5975d68b0132d2bf9bedfe0ee8_Device=CPU_Config=() +2131:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=506b15e531d5a643d3276fd84af8e10eb2a62ce20fe3aeda90c50cd7442e0a88_Device=CPU_Config=() +2112:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=0a16d9d3d8d553c2c747b2c68b12eee3dcc016e29e597992cad8f83aff0aa759_Device=CPU_Config=() +2094:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4722375e4770c972f87bc89a8ca16871aa57251a9c01ab2a14adc11f885cac91_Device=CPU_Config=() +2090:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=dynamic_IR=f42d85c8e1388cf2cb69f9efb2970255c6535f1c3f904a9b08cc18cbea6aa6c3_Device=CPU_Config=() +2084:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=eace26dff7f6f0403126e78a4c93920ee5e54a721cd580b4b18c2c9989baef86_Device=CPU_Config=() +2083:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=bf235091db192c86756347e70771b4b00a6ac2c8852b93079749ba718d57d022_Device=CPU_Config=() +2064:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f49b212b59261888a5ea4652f9a4cdfe25657c7a0e4d3b6ecc16255e8d2e8cd5_Device=CPU_Config=() +2056:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=6d705ecceb3a026a9be0b5963705b0c3c6be0123fb7d25885d3ae21213f1716b_Device=CPU_Config=() +2043:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=6adce7c66c1630295ec8938bcb429f20b628b0ceed938bf81ac0fca8580f8d34_Device=CPU_Config=() +2043:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=aeabe9639d6dcd5ab6e09f9905ffa8bdfe7cafcc7f5c8598e20e4ff39bdb50a6_Device=CPU_Config=() +2042:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=6b87ee29001d1d3b17ec72a66638e954796b7d6ec1d6f6be86890c7d5a3bcceb_Device=CPU_Config=() +2028:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f55c473236715e5c4e6ec21a9e07d1c73b14d529b57fae0cb38ef9d6cd383b53_Device=CPU_Config=() +2008:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=dynamic_IR=7a1d8cbdb446c3330ed845d7a81e20d9b7c7662d532f4d816d4fc1c56822fa81_Device=CPU_Config=() +2001:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=439308ddb64edf02f96ade09e7888cf89f422fbdb8c8242521ecc3f93e61bdd7_Device=CPU_Config=() +1997:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=bf802641cd9b20a23b73202f401f4b32903ac7083d0ac7026098cfb4311b35c5_Device=CPU_Config=() +1975:conformance_SoftPlus/ReadIRTest.Inference/Op=SoftPlus.4_Type=f32_Shape=static_IR=443141d6914003828f76ac1de39cff68ee8ae96b2524fc41e9f5f95707b834b0_Device=CPU_Config=() +1972:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=539610c21b2839b71cfecbb15b7b7145f9fee8bfef8ed9e1d73aaad2de661496_Device=CPU_Config=() +1957:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=dynamic_IR=70c260fea7c5ff6d2d1e9580ecf6c6a8a26c0e688b4f8dc4540888526bc13e76_Device=CPU_Config=() +1940:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=910dee337e395f94d7673f664a3e58647ead8bcedf50ea1439250bdfe8da25dc_Device=CPU_Config=() +1938:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=66bf131d73ad3116d698e15ac3c9e48bde66e096228138eb865c0807295c0d4d_Device=CPU_Config=() +1936:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=eaac8b3d6a4920fa2ac101965805d140502fb409e230821d5c2a370aec15eed8_Device=CPU_Config=() +1916:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=486cda3fac543c53e385e5b26f0932be2c2c67d937dce02e9376ba2956321e5f_Device=CPU_Config=() +1894:conformance_Proposal/ReadIRTest.Inference/Op=Proposal.4_Type=f32_Shape=static_IR=ea8cc682a9a36cc61498573e967ec64d289af84a9e3da1911085b1de4fea4c82_Device=CPU_Config=() +1891:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=3effc90c24c0eb76bbc89809d34c6541654366a02e21378a668dd932a6cc7756_Device=CPU_Config=() +1884:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=447c546ed54e81edcfea77cafa8d18261923bf25c050666029828ea72e3a875c_Device=CPU_Config=() +1884:conformance_GRUSequence/ReadIRTest.Inference/Op=GRUSequence.5_Type=f32_Shape=static_IR=98a6da6e0972a1b70caa5df788a6921d4e470565dc3880faa59e913fdc15f460_Device=CPU_Config=() +1879:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2386bb6412e51aa72e9426e12f9f2b2646e7074413b33fff8d95dde141ee12fc_Device=CPU_Config=() +1876:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=c8bb865a43a3782b3b85e05c3e86388fac07473697ed45a7b04f60010555a3c9_Device=CPU_Config=() +1874:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=b4fecfa9b5d565a02a9f0d0ed19a11127ea9c8c4e70a0e5f7b920701e0665d51_Device=CPU_Config=() +1867:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=f9f031e1fb61fcf87468eb1f4b2005e7cecc5f073eca95c161fe62fbbfc983f4_Device=CPU_Config=() +1860:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=37a75b89894d8a024fe6d1808e0674b4fb59534cd319f4bcd07c6d9caaaf97a5_Device=CPU_Config=() +1855:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=bf7feb979b2eab03afc780965804a3f6b8471b574c36125654fcaf3ebc2c30f5_Device=CPU_Config=() +1852:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=a110c620d27325938e9febcd9d757a5525c421bc29450fea960403fbca3507f4_Device=CPU_Config=() +1844:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=0e58762b5cd9926391cba6f63db3c7db49285b900ad0abc93b4d05d4baec800c_Device=CPU_Config=() +1832:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i32_Shape=dynamic_IR=6a0218ea2e7eb0329e4915f2f6a7c215742d2469e868a4a8e43c683c2dddc01d_Device=CPU_Config=() +1829:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=fc8b85b03281a7e8532a130a70fcfce5b6c40b1c8863eaea3910013a0bc4e769_Device=CPU_Config=() +1820:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=cd389fc4a9417c7136f75474e42dfb43d1f9cb35fa0e104632ffa69fce2b7e57_Device=CPU_Config=() +1802:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2ce1f8773e871f8aed0d3541cfafba0bb079e1765f04c1336af8a47f354cd766_Device=CPU_Config=() +1798:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i32_Shape=static_IR=7aacf3576c3d114915bc3aa48c8ee4ac9e94bc00928709d86461877a8d2d84fa_Device=CPU_Config=() +1793:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=dynamic_IR=fbdf008803736374dd213f1d7e0a041fc0e9b3f025c212a588fa05842ee5ee56_Device=CPU_Config=() +1780:conformance_Abs/ReadIRTest.Inference/Op=Abs.1_Type=f32_Shape=static_IR=5713be8dd761def00c701c74d0aa913d259206eff1103b9fa6de0f6f1a25e566_Device=CPU_Config=() +1768:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4e6262ae12e4f9470a87cc4f1cc1ef2a817a8080e25a79ca4ef67cb60a558b41_Device=CPU_Config=() +1726:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=8ec74565f16a2ee1e322b4549ea19aa0b30719787abd90bd957e121705edb268_Device=CPU_Config=() +1720:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=04db488d856ff6cf4f04ad155967df95830796ad733e589f42c3862224acd874_Device=CPU_Config=() +1707:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=139cc84777f1e0d489245d058877303e72a93eba3cffbf5f919de21b4514bb0d_Device=CPU_Config=() +1688:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=d0aad85620a1b97486758b17c69043a6a9cf75a459bf6e283b28ca132e917dcb_Device=CPU_Config=() +1685:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=fa2eea1b545d6b876282ed0165fb935f0af249c713e3f20fd97cc06118e615eb_Device=CPU_Config=() +1685:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=eba756a8d0ce89c9a8df50baeaeb82d5b719461bbaa06386db7e1be10ec535f3_Device=CPU_Config=() +1683:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a093f44f22a851366eec46b6ed80fcecd2a4a96ca797c2caf288922a2fae1fd1_Device=CPU_Config=() +1679:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=662ca1fd253f0a0c29b89eb1310ea5c7c87895533130ca1a8b76f791ef1ad99b_Device=CPU_Config=() +1670:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=static_IR=d50dd7c19583071293118e8b98f2bc749ef3e34ab8eb0149138e6b9fe49a153c_Device=CPU_Config=() +1668:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d0bade0811581a6fae53c343866f1bdb63acfe07776fd60b7e791f8edd3f88b2_Device=CPU_Config=() +1665:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c9352ef8b6aae01025051f9c73f023e7b5a13f8987f81bfff4ce0ff9725c21b5_Device=CPU_Config=() +1661:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=14f4dcbc8e714fdb791d15b62646db0da2cf647d431dd6ea044ca6976ef51753_Device=CPU_Config=() +1660:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=3bfc4cff938f4386af23d87ce10f8680a62a25ce1fa9178874f212edf45ee045_Device=CPU_Config=() +1646:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=3c03ae2ab13dfccc85d9909840eafb6a291b978e9bf859f27886b4a0d3e87ffa_Device=CPU_Config=() +1644:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=234277ecce31161bea52cf4aa2a37aa8cd43f1bbeed281a79a6aa1d07368872c_Device=CPU_Config=() +1640:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=30f4b90114764377dcd8e010019eefe0ec9c21dc6f0503b52323dfe867a51df5_Device=CPU_Config=() +1633:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=30b790c0018dfbf2d699b7518dc62d7b1d8944cfe0375174e03f00dbf33f1c19_Device=CPU_Config=() +1629:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=b729ddf6b689006067cfce88ec7d9e89268dd6cd904e4596717016541632b13b_Device=CPU_Config=() +1618:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=cb67c5d0b8712ebac00fe4169f0cad2e0a8c71d7f9603d5d2ce6ff6dd6bc055e_Device=CPU_Config=() +1615:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a4e797de860d6e4dcec00062050168ba9745d3da953b9c644de654f4d2818b77_Device=CPU_Config=() +1612:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=351e48db45e09ca6c4bc54a271eda4cb2ddd69ba43f361b9915a6588913768b0_Device=CPU_Config=() +1612:conformance_Erf/ReadIRTest.Inference/Op=Erf.1_Type=f32_Shape=dynamic_IR=e6f95710a782b6c7df8397480e5cffbfa773fdf4ef11c93b2b1ac4694313b080_Device=CPU_Config=() +1599:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=af1f864a9f4bc94bdb713b0fed3f4c39dbd290cf7464f3cee8f1aded11981d4d_Device=CPU_Config=() +1596:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f9f701a7d26d77a2b1eb3cc822efb5da95f1edbe614469f725a381ce892d8d91_Device=CPU_Config=() +1592:conformance_BatchToSpace/ReadIRTest.Inference/Op=BatchToSpace.2_Type=f32_Shape=static_IR=f118f5911730937f9dab91ad5eb6f78cb1af6de7bae1dc745dab2d4f02257fff_Device=CPU_Config=() +1587:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0bc70791680aff885fa6a5903cea30fdb2386e7720403a8e6698362c5491a877_Device=CPU_Config=() +1585:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9991a1b4140ee8e6ed0460fb384b7729f681bc1068315a4d970eea59dcc89950_Device=CPU_Config=() +1578:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=33d8f6d258ae8dfd09b8e6fd39f0e74384eabfb685e0e72a3c798101ea56a1d2_Device=CPU_Config=() +1577:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=154d7358887845b8f2a661e79ef57318fa9499ee5c19b7cae461b6f798c57b36_Device=CPU_Config=() +1573:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=97181a6433949eaef7277fdfec4f8f94b27463ee3ed4a6aefc678fdaf7eab4db_Device=CPU_Config=() +1567:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=992d8967c619d96c75985952485fcd79b943ac5e71c40457eafad4b71bf56a4a_Device=CPU_Config=() +1565:conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_Shape=static_IR=7798cef9c8734d0908103b3c42fd7fc791806ad61d35dc680dc43d9597c6f1fb_Device=CPU_Config=() +1558:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ab760f0d90b0fef133a0555cb2a5d40fb525aef88e6568c5387a87d7e82f67f8_Device=CPU_Config=() +1555:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=c0c3d43548fe20fc4e63bcfc8ee6d0a70a6076dfc0ee79e31fdcecf6cf35921c_Device=CPU_Config=() +1550:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=92c3646daf445784fceeb022afba2831938fed34660bac5445f033a1efdccc34_Device=CPU_Config=() +1539:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=1be99c00176df777bd8cdbd9f74ff064237f55053dc7490050d692274182182d_Device=CPU_Config=() +1535:conformance_Proposal/ReadIRTest.Inference/Op=Proposal.4_Type=f32_Shape=static_IR=c0884ce897724cace24b30df395a33443364f8494f1f8495d212f2db20fc49e2_Device=CPU_Config=() +1534:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=bca72a16df5bcf81d10dfbbb0e53aceb2a8a70ec94d4247d47333679de7214c5_Device=CPU_Config=() +1533:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=bb5cb4e2a8cb9be32332ed3255c99de478d8d2e31cfb1747aa322df438ebaa49_Device=CPU_Config=() +1528:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=90f882a97d637e527900edfb1b7c277b65544832793d08efdf8454be21a2f496_Device=CPU_Config=() +1517:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0f623457008d91f7fcaead549e4a3f90a5ca77dd7c52fba19906f559c34b333b_Device=CPU_Config=() +1513:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3c7d4160bf883d550620e8d1ceb54b3d78bf1512388b5ee57e1a380949d441e1_Device=CPU_Config=() +1507:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=539610c21b2839b71cfecbb15b7b7145f9fee8bfef8ed9e1d73aaad2de661496_Device=CPU_Config=() +1506:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b2fc03d707298e863f83bd3912617e76e63d0fd922c87edf912c17bf51cc1fcb_Device=CPU_Config=() +1499:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=ed75de35729f20a3285506937672f78d2d5137851a3043d15f4eafc040768fc8_Device=CPU_Config=() +1497:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c3e5791580edfc2b522c8a3aecd33445b3fa8d771e2b5a8387ef0f303773c848_Device=CPU_Config=() +1494:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=966eae58d5c103f24a598a5143d7b3a3c40a12fa2606a65431f0d1aef855cd32_Device=CPU_Config=() +1486:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a0b3d7813e380f287a758c35e56e8e8edbb72b8c64fab6194a8890dacd5e2f16_Device=CPU_Config=() +1482:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ef702f626a20bec33a58f2596e4e6e15f105860ebfff1d6f42116a514d853c4a_Device=CPU_Config=() +1469:conformance_Range/ReadIRTest.Inference/Op=Range.4_Type=i64_Shape=dynamic_IR=0d660483dfd9c9975f102d300ec98da49785fcb6484b379c45df8a61e1292797_Device=CPU_Config=() +1456:conformance_Range/ReadIRTest.Inference/Op=Range.4_Type=i64_Shape=static_IR=9402d607ff481567bf322dcea9aa597387a195b9d3756ff46de81c3ac2737a49_Device=CPU_Config=() +1446:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=dynamic_IR=fb6a053d244fc1bdea6fd5e69e0c05025272ac0da2f676e077c598239b6493c2_Device=CPU_Config=() +1444:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=42062545b0991e415aad8d29d47de2a278e5791996ea55974411694aa821b54c_Device=CPU_Config=() +1443:conformance_SpaceToBatch/ReadIRTest.Inference/Op=SpaceToBatch.2_Type=f32_Shape=static_IR=8acd95619121cb22760fd92815b1ba85f541f282d3860e910f73036ed335a9ee_Device=CPU_Config=() +1436:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2bdfd42ec67d330dec8ea2817499b4c2d32a3d91deccede902acba057b050c49_Device=CPU_Config=() +1430:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=7c8594e723d769f8817c58fc16146033afb91d821bc941dff944223796029f8b_Device=CPU_Config=() +1429:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f0c4dee4dcd8f03dd599ae04d7dd6ccfafc4d900d052a62f232a5507ffc006f0_Device=CPU_Config=() +1427:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=c3f8bb35f2f4473c563c3e5171a8fdc6f7a0ae20e4acde31a578bd20630952fa_Device=CPU_Config=() +1416:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=43ba20ec70e156f4782e1f11a30f02daaaafb2039912a373620d845e995c97cc_Device=CPU_Config=() +1415:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ad640e033c11cf7d81ab237630f0ba656968f620eb4ed77f38cd79c6cbac42f6_Device=CPU_Config=() +1415:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=351e48db45e09ca6c4bc54a271eda4cb2ddd69ba43f361b9915a6588913768b0_Device=CPU_Config=() +1411:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=634db7c7a580a605f3375f671b3bcb2a1baf5856b32032d2786a5f8061df63c3_Device=CPU_Config=() +1404:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=e7b65875a7e2d88532271dfb93a4a0fbe4c41963fee3193cb3de547c19121f78_Device=CPU_Config=() +1404:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=923032e47821636c4c8098a7a9afa97b331a47d47357c780b7bced2e46ea9921_Device=CPU_Config=() +1403:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=a05c6778a396b4eb89a5e112fe505a41f47ff6bef50fa025eee1dfb7ec6a95e7_Device=CPU_Config=() +1384:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=8484c5c087ca8317588ef676a0cafb63ded379be5bad862e4d0504f43bc6fb45_Device=CPU_Config=() +1380:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a08cb2499595ed0de5c51e3b0feae24d9d5462d227572e771862564e1875b6ef_Device=CPU_Config=() +1374:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=df8ed5b481f6b03ca63572f2059d20911d3a7757f4c032455bef9933f2c1dc35_Device=CPU_Config=() +1371:conformance_GroupConvolutionBackpropData/ReadIRTest.Inference/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=3e893f54d0ed092823ca8e256e66c367f53e466f30573a7b5911a432d88299a2_Device=CPU_Config=() +1360:conformance_Sigmoid/ReadIRTest.Inference/Op=Sigmoid.1_Type=f32_Shape=static_IR=e939c4d2a27e1d7dba93827ab807881c32e47d48b726fec701712bc85c3404a8_Device=CPU_Config=() +1359:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=4a3c7edd1efc847f3d1255738c19cdaa682c9348c0b0bfc466ea9d5749d5eca4_Device=CPU_Config=() +1354:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=f2995592ad35fbaf52873e0180081397916db8165b9596166e8d449e44b57169_Device=CPU_Config=() +1345:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=fe80b80ced0033aef6f7f97abd22de1271430f700d7dc9aad9a2a819f91e11a5_Device=CPU_Config=() +1330:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=fb8064c0836e50254162e2a9cab01514f76b19f78084410b6d1b69bd54f93168_Device=CPU_Config=() +1329:conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=dynamic_IR=cd6084826e0efefc7f1c9c3c7c9f8c1cb35b9a5f61d1a2c8131ecec5babf1af4_Device=CPU_Config=() +1324:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=dcfe0aa2fab0afc3b370be59184a5e59c7bc0e8b2930bb671d1d6b38f55234ea_Device=CPU_Config=() +1321:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=486cda3fac543c53e385e5b26f0932be2c2c67d937dce02e9376ba2956321e5f_Device=CPU_Config=() +1312:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7340b50308272b86e1b98e6962ee280e9575fc0d7042b9cc076c530268e2ca74_Device=CPU_Config=() +1311:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=5176d95c14be776a4247f25a469708ba7976378b7aa8860a115a28a8bf2c2902_Device=CPU_Config=() +1308:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d27e8ca8280dc9219f4b76a2c8f47cf526b32a58710126c7549e2c04026944de_Device=CPU_Config=() +1305:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=4bedf955c6ec574258a05f59e5397225e1360ba68ea49d4fe105d6a62ccb3e97_Device=CPU_Config=() +1299:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f1f52703006b7d81ccadfa1c54db42d8b19ac7b8beb3ee88f2d7252170358d90_Device=CPU_Config=() +1295:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=7cbd5676618d9b507238807c281801b8a817202b0ae648a44cfa32fc16c02547_Device=CPU_Config=() +1292:conformance_CTCGreedyDecoderSeqLen/ReadIRTest.ImportExport/Op=CTCGreedyDecoderSeqLen.6_Type=i64_Shape=static_IR=117fa486a51d9715d9ba1ad90cb5d6741e762cb36ea55a91129f1947b4886649_Device=CPU_Config=() +1290:conformance_Ceiling/ReadIRTest.ImportExport/Op=Ceiling.1_Type=f32_Shape=static_IR=1484c3d0a5a8b6d1daa002e27b07bb8ba0b5d83aae50b0a3b3bea08483815d55_Device=CPU_Config=() +1288:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=dynamic_IR=b91ccf96246dcf055dd9122c823ccc54ea572f1ad8fcbad3a98c88edb7e454c4_Device=CPU_Config=() +1287:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f99caac2fbfafe61a686cc29c0df0779eae1a0a1826f5bcb820048ec3c148207_Device=CPU_Config=() +1273:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=a714d51739b3d420cf27f476e338bacbeabb40d0ced1e1527587756265253d8a_Device=CPU_Config=() +1258:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0f623457008d91f7fcaead549e4a3f90a5ca77dd7c52fba19906f559c34b333b_Device=CPU_Config=() +1257:conformance_Split/ReadIRTest.Inference/Op=Split.1_Type=f32_Shape=static_IR=dbc3b2f724614a68d750ae4adfd7d8239c77ced05d30f89deabe272f104a5e75_Device=CPU_Config=() +1253:conformance_CTCGreedyDecoderSeqLen/ReadIRTest.Inference/Op=CTCGreedyDecoderSeqLen.6_Type=i64_Shape=static_IR=117fa486a51d9715d9ba1ad90cb5d6741e762cb36ea55a91129f1947b4886649_Device=CPU_Config=() +1251:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0ffc7d7836be264b6d9f26daa71a8c7100ae6bc6fa1af23614a2736226fbdf0f_Device=CPU_Config=() +1249:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=72eb2887828b5b14b41d001b6c7277d395f39c8003b9461730a938833899aacc_Device=CPU_Config=() +1244:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ae538f87e4d49bbdc53184fcaa6082eee131a79b480dab9b46e12976d01ea913_Device=CPU_Config=() +1244:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=1ceb1c4ba1a45cbb5cabe7cb4b416cbfeb93f24533c8123e4c2315cc7e9f40a5_Device=CPU_Config=() +1242:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=64551d094520cf00d40fe514f573f5f37f61416bd456474f4b0a21788c4ffd3a_Device=CPU_Config=() +1238:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=d7e3ea8c5ea46f1b0430b6a2763c85395235c0ac58652e1d269e1257f6dbf7c8_Device=CPU_Config=() +1225:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=bf235091db192c86756347e70771b4b00a6ac2c8852b93079749ba718d57d022_Device=CPU_Config=() +1220:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=4694d5512c7f6b97213ae6c93eb6f547e57922279edf34b94a8e45b7f6a9a980_Device=CPU_Config=() +1215:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e6aa73efa73e8b557d46457037aea3d6ba037b67ac1b52437354c2823abf2be8_Device=CPU_Config=() +1215:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=bbe05f014b2e4602f4e44d9c07795321404d2459bf782d2dd406de14bd2bd523_Device=CPU_Config=() +1212:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=185e849a9d8fec26bd81b2098d63bd842d34dc7a8ee7e47086a208e4b8bd9298_Device=CPU_Config=() +1203:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ea604e7edf80c14a14bf7fcb042125f4d666d0d69ce3c0209c2f9dce26d406fa_Device=CPU_Config=() +1198:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=97f8a2367c5590d5fe7e405d32ec48e5318a6cb3c0862f2b0e8705a7842e8105_Device=CPU_Config=() +1192:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.1_Type=f32_Shape=dynamic_IR=7cb8f8f3f3b4335221f85190d4bc29dd28a6b99133ab630a5ee04640af0843a0_Device=CPU_Config=() +1190:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=dd6dc9060efbe22735c22c69f0323c7e6a77a30cfbaae7b79670b9b26fb2be70_Device=CPU_Config=() +1189:conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_Shape=static_IR=8c82cead166c3db4616f034b66c4795cb4bed653de41d2b6dc71b48ce76a296e_Device=CPU_Config=() +1177:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=9cb8bb36dacdb562fddf77e93890fba560c6cdf038921e057e21f3e5e458c88e_Device=CPU_Config=() +1172:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=922699707423c4110bf8a551eaf7dc3689fd3673fff79cca21442cda90c22dda_Device=CPU_Config=() +1160:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=94693638ec4742dea16dc168eb9323995f1b2a35a53f577cf58ac3a08096892d_Device=CPU_Config=() +1154:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=319c7b312e9074a43819b034ce82eddf1c8f9e51d4eba3fbc7a112cb6393debf_Device=CPU_Config=() +1152:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=8ef34b5ce0dd0100a8efad53b3b71e87f76ed69496cb6f030e76478d7daddf69_Device=CPU_Config=() +1144:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f729a1e882f1894319a357f6c5474552e883ae9322cc3dc399b3a292b13e6de4_Device=CPU_Config=() +1138:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=a45c7a05cac7f403aae101f22fac53b2697d90dcade1bb550200ce439cda7914_Device=CPU_Config=() +1136:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=5b6503944921be5fa3feb0b7647c6715465af16702c645dec4e2f2556d8d679c_Device=CPU_Config=() +1135:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a48d232b00b4d4a735d6b9999c29b413a32cd7f05c104610a11cab01465a3887_Device=CPU_Config=() +1135:conformance_ConvolutionBackpropData/ReadIRTest.Inference/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=dac1d1bb4f11cef03519894a2853742d914abb0e3225b7caa3bc5f23d167cdaf_Device=CPU_Config=() +1133:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=313740a93070bb3cb89143685b7521ea0ace30c3f6d510a4d83ed809808caeac_Device=CPU_Config=() +1131:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0f61e4837d11be2b01f69947cd0b424a45d2e548d9c70ae53b07c43fa1237cd0_Device=CPU_Config=() +1130:conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=f32_Shape=static_IR=8919e05ab2b0d545cabc2e2732828fa693c8f364e9d4d03faf7097f787d4f628_Device=CPU_Config=() +1128:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=bd3ed1b35506cb92c8e587acb102c70abbe02bdaa75f76e5792d48d8e1f2f33f_Device=CPU_Config=() +1126:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=4fb0809c5cf2945a097d18f445de6f4f5cd2c124cdb495e6f0a12e9d937e2b80_Device=CPU_Config=() +1126:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0b4b74693c2ec96e714901b1acc772655accc3b29170cdb64ae934003338b296_Device=CPU_Config=() +1122:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a2e1d1400763fcb89889255855a5c99dbbb17ee5e390e891c94211308fa2d725_Device=CPU_Config=() +1117:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=577ff3f9c8d226d1899056073c0223ae2d81dcc940c5fef8b9ce9cf63931e9e2_Device=CPU_Config=() +1116:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=0a16d9d3d8d553c2c747b2c68b12eee3dcc016e29e597992cad8f83aff0aa759_Device=CPU_Config=() +1116:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=8108f6881c436dfa59a0c27d173054c885f082306ae5af1694cdede13718bde2_Device=CPU_Config=() +1107:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c6abba035865ece7c6c44b0284ab7c6b8f735bc1ad1f75a9ee3bae6ce26c58fa_Device=CPU_Config=() +1104:conformance_Relu/ReadIRTest.Inference/Op=Relu.1_Type=f32_Shape=dynamic_IR=43ceadf05184954dd8697d4f737de323ec2ee75f93e0d33d60dab2acc995f3b6_Device=CPU_Config=() +1100:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2cc5f8b67a407507c1d59a08981887766d377c7368b53cb0a18ec71df291b1f2_Device=CPU_Config=() +1086:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2f8ee6adb1573c51bcffdd8c24455ecd6b6fbf04f171e9aa5de36c5d6f18babe_Device=CPU_Config=() +1085:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=25ae6295f4d206fa9069e20bc659dbd87c20aaa15c3f149ab25d003641c738c5_Device=CPU_Config=() +1079:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9f8fca1ab855d5a71d7acabdefda202e270bf16b559fd581f9e663caa301ffd7_Device=CPU_Config=() +1078:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=1457b3d8c7f130113d74f540dfbd2d4062f869018f7b1afb11c743acc0a007b9_Device=CPU_Config=() +1073:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=d141b35e277394511f5635b2e395039c986ac392e6f49c2415da6a5071bee96a_Device=CPU_Config=() +1073:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b3cb0ba09807204990d7e1635ef35fc96aa10330de2ffefd95f6483e68dca532_Device=CPU_Config=() +1071:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=1e9f662cfa263a98c546e69de318268918914f2ddd0ee87cba23c2690a81ec19_Device=CPU_Config=() +1069:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=93788242c06d787e33afa50ecbef5372898e50024d0c88624056a752535572bf_Device=CPU_Config=() +1067:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4baf5444c85028a4cfdedc5888a7cd403e2491ab694ab65c820dd3c410f8eafb_Device=CPU_Config=() +1064:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=269ec3789c76e21789e01e31f13f0f1a4895905b3f131e710e663ed2a0d8f632_Device=CPU_Config=() +1062:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=62409191ca760efe019eed9d1923c8df9ab545d39f90b1230a58d1747d3143b1_Device=CPU_Config=() +1061:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=3ade42cfc9d970963d8f162b001075864e6967034198986f408ec09ce4093d18_Device=CPU_Config=() +1059:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=de01a0d560bebb0018927f02409922427ef35b59a96f0aef8f18991ee0d9542a_Device=CPU_Config=() +1054:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ede6f5f8a1d9bcfd1979965f575c8f267870e0d6a5d3a62d229ea029893525b6_Device=CPU_Config=() +1051:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e62245706f4242ff86bcb70d4d221bf49aa31db3807698d574125166bff5f8aa_Device=CPU_Config=() +1051:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=68dc9d01cbbb3546ce77dbc77d705f33a6a48cb6dca9a323f5bcf02b9d589993_Device=CPU_Config=() +1051:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=165dc8f683138c4d731ee850aa6212a70851b91630cc42e2b4e9d46e0ab15b57_Device=CPU_Config=() +1046:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=641b1e74512a5cdc87bcd63515a28a409f155a3475fa923e440868e563daaffd_Device=CPU_Config=() +1045:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=5d522332a7166265867b633721d8bd8ff23a233e7c8bff59a245bbb24d7be234_Device=CPU_Config=() +1041:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=14108fac0139d5bb39f6b2106857e1ac91c8d44ef9156e4e0873facf9d932316_Device=CPU_Config=() +1040:conformance_Mish/ReadIRTest.Inference/Op=Mish.4_Type=f32_Shape=static_IR=64374638dfe8bed8e9432c51d92d23b807172fc490c0dfc76428f2c49be92400_Device=CPU_Config=() +1038:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c90ac17f02f16c647a0a206326f24ac348a0f8a7787037486e52ecc8c091818e_Device=CPU_Config=() +1036:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i32_Shape=dynamic_IR=9ec266d6550d7e0c9f4d6114272d7afc80ad822b0bf5078654598b3d623f356b_Device=CPU_Config=() +1033:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=3ebf4d995c8af299693b32b6adabb6a261a3761137ec6c5e68b35bdf0942bd85_Device=CPU_Config=() +1033:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=3c03ae2ab13dfccc85d9909840eafb6a291b978e9bf859f27886b4a0d3e87ffa_Device=CPU_Config=() +1032:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=f18fa21106120cecd81f50d635b1c42cbd641877ffbf78e746ef7375ff546d7d_Device=CPU_Config=() +1028:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=6903ceb67d029d79d90687340dee0204830d5df1f1ea6fbb09f14a6eca234739_Device=CPU_Config=() +1027:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=d242e8ecc8ae0239fc2e7773fe0f8a1d50792a71ae4aaac4fd439174e87e95b1_Device=CPU_Config=() +1026:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=45959eb5eb391b2bc86455cb1e86aca76799c6b082437e72b15c171037a6206d_Device=CPU_Config=() +1023:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ca3d0cbccca665493e85a757798ab5e12399ad295466cea744c7a2d278c86c97_Device=CPU_Config=() +1019:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=dynamic_IR=b7b0a0b3921a1e1434a3fef630e32b124c810e8bd15a3e861fe7da79158947b2_Device=CPU_Config=() +1006:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2dd63d58c85301d765882b95995de97f4eff14bbb3c933c4e4b8ee5fbc2e9e71_Device=CPU_Config=() +1005:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=f2403b7d119fabadb1609250bbd0959aeef2cd68c62a4036657518ebfbcedf71_Device=CPU_Config=() +1004:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9ea20be5797b5ab937555c69751a5be584c73a191b3fe3d6fb96a5665e26fcbb_Device=CPU_Config=() +1001:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=5c33d593e408ad72bf438729a423318330c69c69f1504402420635942050ac06_Device=CPU_Config=() +994:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=997a090766babacae10464bab19af5db238eb28704c6d463cfcba48767a90c8b_Device=CPU_Config=() +992:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b63e04695c1e6145a3fa9835130a4919df52ff3a420d3c800bddff65af7dd76e_Device=CPU_Config=() +992:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=622add2dcd72d2e1560e983ef4aad56fd35b48b71964ea8204137026f445d37d_Device=CPU_Config=() +992:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=1891282a9bf460336bad3c354519aa0d87ba6ef40876d4a07592194d2d678e25_Device=CPU_Config=() +990:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=178677f6c6e3857b2c3aa8765c8e3186bd25b73154ba6463ff33a9e1c911e6bf_Device=CPU_Config=() +985:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=59eaeea8102953f8ffe85ed1ced2a44ddeed77ec237608b45be0573bb32b1104_Device=CPU_Config=() +982:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=dynamic_IR=f1e43476084575ad240db6631f433a61ba2076d1ca95e44a0e4471ea9d6f66df_Device=CPU_Config=() +981:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a3e1aaa7054d4d046bab4614737c931b25a574051a2f8b79799aaf6fbbd2c2e3_Device=CPU_Config=() +981:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=1f71810b04667907bc88c4a1ecc28b9325fde04026b5e56b5eb0e2d6608f3742_Device=CPU_Config=() +980:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=2620e86e1e6ce8f0ecb3eebce969f3e7df11f7f86c6f97309aa24993f9036033_Device=CPU_Config=() +977:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=017d4b1dac18731e05634414942698ecbc750e306eb86e773ffe5007bfa9feee_Device=CPU_Config=() +974:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e00cea4f2ea99f32c11ea265ecc0483554192192bb99f36438dd38de09820888_Device=CPU_Config=() +971:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=9cd66958dfe8db471d48d6ea35f1b4547a413fcdc6c61c804a456befcbb09d15_Device=CPU_Config=() +966:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2058548f687014df36b4da1b2644f07fa117d5a1d303a13c4d913a3f979d3ed6_Device=CPU_Config=() +965:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=4e6db028c1ff414e411bc09accf3b7c20cf81e530c903e14586eaad4c21fa111_Device=CPU_Config=() +963:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=35525421f74fa15c49098ff1c7faed4fe65763d72ed13add33c6fe8d4dcfb0ed_Device=CPU_Config=() +963:conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=1f24aeeef6f9f91272546fca89299c1ce448b0008fe43905db434ae3f28a75d0_Device=CPU_Config=() +958:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=5976ce31ff4cf399e87efd691dce3e75dc2de962241a84c09538593c9865b257_Device=CPU_Config=() +957:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=acaf36c12445c608b306074ac4e2be9cfde2f5550905993d4b5bd1714dc96aaa_Device=CPU_Config=() +955:conformance_Clamp/ReadIRTest.Inference/Op=Clamp.1_Type=f32_Shape=static_IR=028177a440f430edc5dfd7a7f0f2c0dded422876a98b6da66a647ad9aca10e57_Device=CPU_Config=() +953:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0dafd9117cb3fba3a335f7cd28aaa3fbd9276878383657b357210e135a93d916_Device=CPU_Config=() +951:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d77f317dd01a80955f901d0da2930aa1f82531848f4bf22d839c60a84941e6c4_Device=CPU_Config=() +948:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=caf20ebc8d39cb23a107a03e819e8ee5b2807fbd311fe65453446251e4b6a611_Device=CPU_Config=() +947:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=7373e7e64fbb2fabed337c09be0d6b42c5cfad39b26d92c6dd74810499863448_Device=CPU_Config=() +944:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=285bcc240dec2c32e171f3866ea33107a109566fb8ef39f0dd84e99664aaf8df_Device=CPU_Config=() +943:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=b6417017678573faaf72824d1bec40bcccd73ae0007aef24b089dc3743276b14_Device=CPU_Config=() +939:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ba15b8b85609531d91c7809eb90c3a0079d19d36b83c8767306cb276c9d67ace_Device=CPU_Config=() +934:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=7ab51e173b82572bfb29cac5dfdc326e3689e466c68cf91590dcbdddf1f530de_Device=CPU_Config=() +929:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=f9e738e5e947a25c9a0d18fe47597f10526e8a74e9d72b35fd848b73f4c80b0f_Device=CPU_Config=() +927:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=45c9fd0289649c455939587c623f1884a4e675e2f970192d9ac2f60a65e6da9a_Device=CPU_Config=() +924:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e86061c75b7e9a65644e82de6b8fb2a532ebdfb302f46f378b6ff20af8d1d14b_Device=CPU_Config=() +924:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=9795aaeb71c115680b567eab0877df338c0d8971858b489a2636c4483f3512cb_Device=CPU_Config=() +923:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=6587874c50811a2ca7e27f84cb4381e9a06eb4465e940ea877c76dfaeba02753_Device=CPU_Config=() +920:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c6b8f476c9b5cf1a102cb33d5e68033bb074a520d01e360ff46b3e479addf407_Device=CPU_Config=() +920:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e7e10785757d3131ebc375ebfd83c556e2c34a72be20965d9dd3e4f24a5ee2f9_Device=CPU_Config=() +920:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=58cd9ea3d8db317b6ff7fca55bebcbc6846aebdbe309b1b621f5535b18a70320_Device=CPU_Config=() +919:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a4ab938f33d0b58425ed98a56789d0ee94beeca13ec7fe3358c9d3751ef136a5_Device=CPU_Config=() +917:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=574e53e574b1a6e0bc16a7296aadd78785cac535293e956b008b0a2274b7cb36_Device=CPU_Config=() +916:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b42c98b6313e56a7a012553eeabae92f0672c0bde6f9895d10fb459796448b75_Device=CPU_Config=() +915:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=bea169459345470ab5d89e5ae9a8b67d6e9401caf7dc35f5060805152e20d6cf_Device=CPU_Config=() +907:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=6624c22e3b5d72c4e8d21df59af6f3759fa4d8fa68f2b5f3f92a98d6a943d0b4_Device=CPU_Config=() +906:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=776b4b6d6b102654bbc08df901869e4d16af505a5dff7f2d27686874bd20ccc1_Device=CPU_Config=() +905:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=8832b317ba58dd0efd1e8fa5238d35644d8468a03c9b35809a20ae64098dc986_Device=CPU_Config=() +905:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=a58fb7847e59bb119656b143af0c6f65e29f8211034fe7aab03666cdb95d7fe1_Device=CPU_Config=() +903:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=516b04726c16c5c01fbeb1c97f8f9d9376b80e9341d2029c634f7fe4975cc4be_Device=CPU_Config=() +901:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=750599c4cdfcbe7468328647a8760c7249a9f5dba8bc33ebd00c151d9f3b13f6_Device=CPU_Config=() +899:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=9650ac479045f70fd763f5c95d0c27c3b3cc4d6fc00b43e8ad627d16f817f342_Device=CPU_Config=() +898:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=43d0f2c259414c3e23105e2f5a13e8faaf322904d9b70ceb8a056bdb51677ef6_Device=CPU_Config=() +898:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=1180dfe50d43ef6b95980bafd3b84816f6d249f8341b03a6f67d20bd8f8ba6a4_Device=CPU_Config=() +892:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=81954ff76e3fd04ec3b3e3c26e28a79ac259c9b255f90ebe3cc0772fb673874e_Device=CPU_Config=() +890:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d8574c324ded923f1ea3ab0d8e09c626f3e8a04efe08258b665539c639b7958b_Device=CPU_Config=() +888:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0973b76264164ca52a9883a69ff5f7df977e28c33a0dbe9095e7e92acd7854bf_Device=CPU_Config=() +884:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=afc2cb913bcb4e4badd203c9cdf491ea1e6ed4f1cd835e7507889a9bba25b958_Device=CPU_Config=() +881:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=06069a23d29d9bb6910729ac49ce1466e4fc6185c6ca31fa54fe7dd3289c41f7_Device=CPU_Config=() +881:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=87523dfccb2a9c8334d6810e33c2a2d3b6bc09db7623e7ae93ba4cea89b66a06_Device=CPU_Config=() +875:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=27b03da9a0155039856b1bebe424d10d1b8ad768747cbeb851bfc0463edd5cb6_Device=CPU_Config=() +875:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0ac57f7cc81a683585f810885288fdaa174de2497d00156b85e067653aad3a56_Device=CPU_Config=() +874:conformance_NormalizeL2/ReadIRTest.Inference/Op=NormalizeL2.1_Type=f32_Shape=static_IR=acdcf37615b571d8a1275b71cfe0c43a6410e56f5f18db8e9d795e46aac73d0c_Device=CPU_Config=() +870:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d12581f68d14d140f4b982b47b97000f6b666cd115483247d369fed87267556e_Device=CPU_Config=() +870:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=52ee24baa14f302174ce3b13a119ccb6a54994413daa1f052a75464528b07088_Device=CPU_Config=() +868:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=43a00b4dc097228af52c00054951dd5b57d8e0086207f11a8996e5ac880c8980_Device=CPU_Config=() +861:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=20c2030cdd180dbbfad1e5b8a4f865d1757a9d427c3d5ff21651a429369f4341_Device=CPU_Config=() +859:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2b59c9f67435c46699dc1c66ee7ddbdd333bfa544d0aef7bd1389db2635868c7_Device=CPU_Config=() +856:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=c9352ef8b6aae01025051f9c73f023e7b5a13f8987f81bfff4ce0ff9725c21b5_Device=CPU_Config=() +854:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b01e9e819c2e610a4fdedcb693f536f99b9dbdeccfcf9b0e70dc37c19c365421_Device=CPU_Config=() +854:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=31e75a7408a46928e1a3a8babe3da21bccc6d442f87291c0b2bf57b29e18face_Device=CPU_Config=() +853:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=839faaa50aafa2e3ed38fc682d0759304b694043dac1a242a085e2973aac8091_Device=CPU_Config=() +852:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=40c74727a381659b1343c4083d7f903ac2519d5297703fd15979a32f820adfcb_Device=CPU_Config=() +851:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=46282ba6f0eb5aac6acc1e114a2408cc301300a027c6d7a05691928b5e6dd9dd_Device=CPU_Config=() +850:conformance_Split/ReadIRTest.Inference/Op=Split.1_Type=f32_Shape=static_IR=f5807b455d9031305e8225f2b65fd5cc289f61785d762f19a275280085a2e5e8_Device=CPU_Config=() +850:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=1eb25d18fbd1070f2a8ff803d76077d092d493f9e9df80e93e2f58f3621a121f_Device=CPU_Config=() +849:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=ab760f0d90b0fef133a0555cb2a5d40fb525aef88e6568c5387a87d7e82f67f8_Device=CPU_Config=() +848:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=f3d84b4cb7f301c6b64c64927dd1e8c20e144671419843ed3d20692f0773445c_Device=CPU_Config=() +847:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=ae9604aa3fcfc361f87562022cf6996fb2cdd9c356eed6a6eaddb14e103b6b73_Device=CPU_Config=() +846:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=429b91023f3ae9a323e40ed372fc29926fcd6aa7a8e77e4ddaaf68fa648c43b7_Device=CPU_Config=() +845:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=874c0fa19029457645c4cff20769f66ba7aaa1a35ade84c948f83aaa9c1ead19_Device=CPU_Config=() +845:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=09683cb2a0a44acb804a2791ca93bf004bfc3882c11af94ea67a9fc1eb1e5052_Device=CPU_Config=() +838:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9e0cfe97e08c7b2974ef224799ccaa3fa777802a5fd320a089e527f00a594dbc_Device=CPU_Config=() +837:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=5288d099588f5876e907c5cd750c9f0b2191d1ea060881e80af1006cfad259ac_Device=CPU_Config=() +836:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f729a1e882f1894319a357f6c5474552e883ae9322cc3dc399b3a292b13e6de4_Device=CPU_Config=() +835:conformance_BatchNormInference/ReadIRTest.Inference/Op=BatchNormInference.5_Type=f32_Shape=static_IR=8f1629e9b003409304f12c3e315e8ae8246b3bc80208c3f612d5c5c179082a7b_Device=CPU_Config=() +835:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=9d26a7c321db2d87b29b93baeca20dd25357e7777261ea6a4cbf968a203969ea_Device=CPU_Config=() +834:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e044b25aa265a98dcd0a5cf5f7132fdac5f36074068dc2210e04dd4c459aad61_Device=CPU_Config=() +828:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2d38082f8971dd7614234070dc9cb8c9b6b12fee7dc918503f0e256ab32d2fef_Device=CPU_Config=() +826:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=6fefc3626ba6ef60433d3635bd5abeb3e7025277a86e2fd9d92234ff099c303e_Device=CPU_Config=() +825:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c599f8f5de2a73e08727a5e27e2f77989b4c5ce9a5e70e6b98ce4c87e8aa26f5_Device=CPU_Config=() +820:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a7ad8306fe632a2d0c45a492ad2d21dbe40f2f9ea55074d602beb6f8dde17982_Device=CPU_Config=() +816:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=12c56cc6ebb22e8e31d97e0ef640fecab5f93e5c5b2810c4dde56b09a7ac7f48_Device=CPU_Config=() +816:conformance_Tanh/ReadIRTest.Inference/Op=Tanh.1_Type=f32_Shape=static_IR=2b026a0d21a35251b07099e31ec58c459b848602575d2afa67e55830e8f3f411_Device=CPU_Config=() +816:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=23654f4a28ae697d81f49d72568e7f0657d5c15b82e173fd7381760ebcb61cda_Device=CPU_Config=() +815:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a50644dc2d133df429ff4aa6a19ca9bafbf41d2948522e584fc5f417ad16d76c_Device=CPU_Config=() +815:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=7244cd4799e0eab987f823edc7d6038b76afa7585e4663278be826124c5596ed_Device=CPU_Config=() +814:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0bb9a29f02d37ba32dc29b4284f58e10ce59571799f58381d449c77655c795d6_Device=CPU_Config=() +813:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4104a7840dc96c214be896cac75911b70baebb902a42a26f12b281bc2cd87318_Device=CPU_Config=() +812:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=59eaeea8102953f8ffe85ed1ced2a44ddeed77ec237608b45be0573bb32b1104_Device=CPU_Config=() +808:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c9fa96695ebc82ee5e83b4cde8910e54ce09611f304f24fb6b3faa692a21c60f_Device=CPU_Config=() +807:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=4a80814933ec1c6198745b1caa4d5b7c9171395b6d8a53cd791dcdf64fa6c91b_Device=CPU_Config=() +806:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=55d83e2240e88295a78084f92162888c9b0beef46ae468cd7ab93a1c0a432835_Device=CPU_Config=() +801:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=93a9a06d14c3b4d51891ff0e704c74dae5905db9b5de06d31379f33fa685c80c_Device=CPU_Config=() +801:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=28dbc474828462a812108c43a47aa4e70fa0d2e8e814bef5916092f3e8c7a2fd_Device=CPU_Config=() +801:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=bbb0129fbafd6d1874ccef37a1bb60379733012c502d58326dae70f413e387f2_Device=CPU_Config=() +798:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=83d90ef3fac993f7efba4a8ed369781571b1b536af03ceb0267ae979379e1dd9_Device=CPU_Config=() +797:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b0dea4cb6a0cd2380e8657b0b64caab43819c0f8182ed73b2cb12eec608bfa7d_Device=CPU_Config=() +796:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=09d4b4ea324f91ba6006bad4c82ca08e723c83c1b862d8075475e986696220da_Device=CPU_Config=() +795:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b66a71c512cd06f5dc1d1a254ba0128b606c1c41b860f272dc1d2514502c2350_Device=CPU_Config=() +795:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b3fdb9be3484a0c498bf40f1a102c452eea04caa5b1dd627e8267087df0acc87_Device=CPU_Config=() +793:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=65afcce29f554c2dfbbb4449ea6e11f1f1b9b96aa5c8bf73a55796de849b58bd_Device=CPU_Config=() +789:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=f286960ead5b83e3e4015ee5751b114a9d70e90aa788e0fb004ac50b95a8fa2d_Device=CPU_Config=() +788:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=5f45e938f9e6d58ccc6bf771049731f2d9c4a8b0ed83e2a1942ac69ab76984b3_Device=CPU_Config=() +788:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=c0eaf7f2465de396f92db5829a30b7d887dc26bc8d49b86f0fd0d688c7129e18_Device=CPU_Config=() +787:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a52a8e6ef7bbeacbc1435cde72a1a70bdb8a3abf78b5b971c2ecb1135cb4c136_Device=CPU_Config=() +786:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=1f29402ea664e850ea05d5f2e500f087a6165f1f4c9b3e5102b5509c020f0f6d_Device=CPU_Config=() +785:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ae0e669fbddc34e8aaaefff248959e3fe53196e68bc1b3a9e66be16a495d7cd2_Device=CPU_Config=() +785:conformance_Pad/ReadIRTest.Inference/Op=Pad.1_Type=f32_Shape=static_IR=fbb53c04f3cfadff9d6543e2fb4eb88d882c3189b4212e77a6ca6e50bdba6e07_Device=CPU_Config=() +782:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=5bf1e9348ae0ec7106a2231d8940acc74464f5ecf0cbc6a682defc3a9bc5c2c2_Device=CPU_Config=() +782:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=43c8e8300f01242788a8cfdc37b48779f51f7ee7aef5b28e8de542320ba86e4e_Device=CPU_Config=() +782:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=073dca39b0ed99c8af202a5e272db241f95de1f64a7a1611e83853b92e7f7f09_Device=CPU_Config=() +781:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=a65e17fc28c74df4f3b1bad89635ccfc376a857f2d92ba646ca830b03eafab7c_Device=CPU_Config=() +781:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=bba92f0e1fe2ee647564aec64223ab2c5b32d3defae9bad5daa5a24df76aac48_Device=CPU_Config=() +780:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c777366b6b37df3f4a3b19b637f66b707fbbb113972a9eff7eb4d793731f8c9b_Device=CPU_Config=() +780:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=9efd5749a1591709057d6e97334c9b5b89f5864d705c91774e0196d42966d1b9_Device=CPU_Config=() +779:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=517a5eeb2f1f21304b8a1d5971f89bfc93aa678252180bdb05144657b1a8619f_Device=CPU_Config=() +776:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a9311932565e68fff052e15c1a0522e1c09270d06521541ca28b67c34184b1c5_Device=CPU_Config=() +774:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3a17c045930ed967b45d1606b78fdc92e736731b198465e95ed7268d99eed246_Device=CPU_Config=() +773:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=35525421f74fa15c49098ff1c7faed4fe65763d72ed13add33c6fe8d4dcfb0ed_Device=CPU_Config=() +770:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=bee11d430236dcbd0fb5efbae712d8d89d84beeb89e0ee60e0ba3ba9512079f8_Device=CPU_Config=() +770:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=ef6e4b3910cac801199b1f6be74902b42105d23de549d426b1c4bcdd7361f79a_Device=CPU_Config=() +767:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=df97761393479b4c56cc923a2b89888b7c3fb949f5c3a93f4bba0ac8a44178aa_Device=CPU_Config=() +763:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0aa7024ee856fc832b1e639fbed60e1382c8e1b84f7cf2d33447f4bbd9ce75ec_Device=CPU_Config=() +762:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=dc350b3fec164adcb096b8fc922e342cf7b0c6f7a4aa25074bec5566225cff01_Device=CPU_Config=() +762:conformance_IDFT/ReadIRTest.Inference/Op=IDFT.7_Type=f32_Shape=static_IR=cf47311b142dabf10271ebf5c2e359455d9bcea82d95ad2a1a2d58915c77bb16_Device=CPU_Config=() +762:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=8b8121ebbd51ee995f98531f595145a01ba70ce026ad0bee588733c33e70272d_Device=CPU_Config=() +761:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=30eb0edc699f72085fb77a6cc31ad4aa9e62cf97befb64273493d234494fc64c_Device=CPU_Config=() +761:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=eeeaf32688af20dbc39dd3705dc09fc804c0636d4d5807b003c002eaab1e79dd_Device=CPU_Config=() +760:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=ade98724a678a73bf789fc539dfa277031242ea3a694227dae29c11b45cdfb9e_Device=CPU_Config=() +760:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=2027d5da17dab73d23b4984fe88696fb770ba2fa479a194b3531d30ac75dc840_Device=CPU_Config=() +759:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=c7998d9fa7e16dedd52f8cbe3d0814f2f3b30ee6d728881d64c4743e0ff6fae0_Device=CPU_Config=() +758:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1c73b4d05053065f5c37954586376ae4e1cf9e220959363b7c2cb381f489bee0_Device=CPU_Config=() +756:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=f5d63cfc40e19fff35078633a3354fe5e3a8b6dbadbc89e20747398d87e02176_Device=CPU_Config=() +756:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6e9fb2accb692c69349a88158442052e6350143ca7dc28f2525d8e8df29f8c78_Device=CPU_Config=() +756:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=4946bdb7dec06c2bc8eae33d5903d6fa41bbf3654b13a0cb5cfa4af5a4720426_Device=CPU_Config=() +755:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=cc2f28d736d3c67fdd13fbea9b8cef7c0b075f06b37034581fc732966421802f_Device=CPU_Config=() +754:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=15dd996f113d962d9bb21424d1006af0aa28376a2af63d791a80f0ab95a604fb_Device=CPU_Config=() +754:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=876a77d1e2efb758a87bce1dd2fe35cd8e455c6f3dd7cd2bed8e10504c426de4_Device=CPU_Config=() +753:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=14f15558b2c7699f7877a9e04e1e0e7d2a2d7e1307aaca519a98ea5f39afc415_Device=CPU_Config=() +753:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=bfd899e1dd2a03f99d8b55d9fa5ab04c6e4576358c910e9bda97cf497f0418a4_Device=CPU_Config=() +752:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=86decc829c047a5febe7e5d047c689075810441a2f4725088317ef68d6c31239_Device=CPU_Config=() +752:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=150b1e03f5e8abf76f88e68ae56a3afc3cb3ae110fcb12af35192aaf93b20f5b_Device=CPU_Config=() +751:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=879bb4767167c3e9c45eacd08a14fb7e01b072864013784f924d62aad7b37c56_Device=CPU_Config=() +748:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a7b2c196b6ae12252522b2571af40b540eae94513bfbd88e15708fee816869f8_Device=CPU_Config=() +747:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=dynamic_IR=b0376bbdfc6560184c2eb15a9cff7fc6d6b39c47dd22936fb64629d345e227d0_Device=CPU_Config=() +747:conformance_MVN/ReadIRTest.Inference/Op=MVN.6_Type=f32_Shape=static_IR=fe615eeceb735b046b190d844931c56223d45439021da3b6b23227a1f9cb73c7_Device=CPU_Config=() +747:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=87a966d3d3b90cb32db3454c5dfb2f67af86b68a5e45fa1c5f4a75c3b5cb452b_Device=CPU_Config=() +746:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=969c6a651dc204576d68d7d893ad2dbff1f7c74803b1763857d41aabdd19a72a_Device=CPU_Config=() +746:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=d435aa8d2d045d69b2d187147f90c879205f27346ac991765ba97bd47d4fe0f6_Device=CPU_Config=() +745:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=78db1c0e2c0fd4f0d351e66ce9cd31f7a6ee804cd23bc686b8c9081125b7142e_Device=CPU_Config=() +745:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=54a5630072fb0e0127611a4ae63db14b7c0fa0979f4d2be7bfec548b5291a0af_Device=CPU_Config=() +744:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=50c46a070e458a716446dafab20580095bfe902eeb4ad96c39bc2c617964c1d8_Device=CPU_Config=() +744:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=005e1b57ad768f4c8efb3116fe51bc85661c377e6632518b9172e8862d1c3edc_Device=CPU_Config=() +744:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=08fa156c3f25fc8836356fd1a8edb73222f9fe2b3476c0ae32a26636b5870247_Device=CPU_Config=() +743:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b339277c7465442a5163600e784319030de12cab4005f43c0b903bcd0c46e87f_Device=CPU_Config=() +742:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=008176749f0b2cb46830abe910865d8cf1974cd62902ce3e157a03df2b1cf9c3_Device=CPU_Config=() +740:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=319f74dd5b7a959d0e5443c76051fa5958463cd18ec11c275ef92b77321bb93c_Device=CPU_Config=() +740:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=6964f870fd6bf44d1d5ee5925eee8892230b8928aeee1966db73b6c4fcd5acf8_Device=CPU_Config=() +739:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2fda32f5fe8957d151306845ffd0f877b2efad70f7bd4921fab2fd770d78c2a8_Device=CPU_Config=() +738:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f43df065734a36674b3fdc7a47fddd1cfa5c1b36bf73e7de86a100c645fbc7d3_Device=CPU_Config=() +738:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=b077af9b63e937fc64589d3007372d5fb2e4accc392ea09889a2519e3885413d_Device=CPU_Config=() +736:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=547fea7da34d5e65ad7ea069be003753e9ef281110c80dde11520bc350c4ca14_Device=CPU_Config=() +735:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=120b0e6b0c1f7bda754d62ac7c88e7c8bd9e96ddb85e7e5f29decdaa7c1cde96_Device=CPU_Config=() +734:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0ffc7d7836be264b6d9f26daa71a8c7100ae6bc6fa1af23614a2736226fbdf0f_Device=CPU_Config=() +734:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c4ae9be783990e398b3e8f0af76cab50d72c40c705677a3fe1c5dea592952d1e_Device=CPU_Config=() +732:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=0f3e035b6571da159267ff1f89b5f2b2d3bbd599760dc5d5721a1fb2ab2ea75d_Device=CPU_Config=() +730:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=65a5483c793396983edaf7f2cc2c13898507525bd84a8469e97b2d662b5df782_Device=CPU_Config=() +728:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=68c3856ae6a30752004a5ebfabb93bd0d98446a91ba7fd84e686226f45d326b9_Device=CPU_Config=() +728:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=static_IR=f645a2275ff33ad614c801a8f2f262ce1ca95417e0ca59e28d4b87cf3289c00b_Device=CPU_Config=() +726:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a5e5b588f6223da1508413c42c21c3945994f492b039511b7ba2e576a052a52a_Device=CPU_Config=() +726:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=dd366f3f5b63fbfce3d9378cf0d8bfa4a909a973bc3e5e97eaa9d346c5cbf1d4_Device=CPU_Config=() +725:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=fa88ad79fad41544d799f0333f83b91322f2bb408689e27e53bd175786ed0979_Device=CPU_Config=() +725:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=246f55d43a6e986a8ba35f711c43dd32cfb1ca097598b0a01690d4765e0d5019_Device=CPU_Config=() +723:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a7eb49934c05ef49a453b19adf40a9d4c2ea9477464e8d42858dc9517c30b88c_Device=CPU_Config=() +722:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=64d7467cf7785e52814a8c25f96c1a5d82c071ced27dea8302b5cd69b464ac65_Device=CPU_Config=() +720:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=c0413244803edff103b95dbbcab27b2c714740372ba215264371a9474355a8c4_Device=CPU_Config=() +719:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=8978b8e985b54cc12e2cefa8d9097f4a3a03d477129230b6c7e3daf8112e2c0e_Device=CPU_Config=() +719:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=71010d034cbc059af32ae6066fff1f27834db480e76042d1ef7bd1e7bc426a08_Device=CPU_Config=() +719:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=d575b00d2b6e155319fe7120133d8e0c3dcb5c79bda710b0650fa48543dc5c84_Device=CPU_Config=() +718:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=1a9779319a9cc5f21b6005ebb9b4517e0bb1f868ef8e568453a58c44474c40bf_Device=CPU_Config=() +718:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=deec30214c79ceb43a503bf521937a2bd554588775195d0e6302c521cd2b55ab_Device=CPU_Config=() +718:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=45d612bd5bc0895879f727cffcc13c978977a0aa10dfc726d00d6450faeff068_Device=CPU_Config=() +717:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=bd99ad9e1d756435cca9c6309caf45043f34c6c3c844f60e17deb8dfef4234f4_Device=CPU_Config=() +715:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=acc81187b83e3de7c3d0903f40daadcadff63455905c00ff2f98498f21bd68ea_Device=CPU_Config=() +715:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0ce1ec496e5d71728fc5daaba87809c5922406a65e85823913381de0d2112e01_Device=CPU_Config=() +714:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b83a85737c23e279f8878f6795581dc2b003c55e4eb8baadfbfd73fb0e98758f_Device=CPU_Config=() +713:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=dda009a1f3191e35286b7515f5741905e303f27287041248e2ce15f6954af810_Device=CPU_Config=() +712:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0e78ae14fcef33de9637ac99e87f672b3247ea32c221a4b555b2e5bbdff88788_Device=CPU_Config=() +711:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=9ba199e71a3ff06e6bd330e453a1e1103599902893fc267c60da9ae47575a8a0_Device=CPU_Config=() +711:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=2e586703f4f8e9285249881d509a2a0b96d4758be5f97d75e7ee4f78951c58e9_Device=CPU_Config=() +711:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=176c218db11ea18f367fdf98a3de14e9a9c65152bbcc39783c38772b37f6e9c2_Device=CPU_Config=() +710:conformance_If/ReadIRTest.ImportExport/Op=If.8_Type=f32_Shape=static_IR=e178ca7afdd75b09f1ee18e50afd30eed0740497637863c3397b5a75c0f8bfd5_Device=CPU_Config=() +710:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=7754523e2d3739481e051eb21a4347f2d157e94db3c37d47f0006ecd8d77d512_Device=CPU_Config=() +709:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=7d3a099a5040e70c73014df347c478d0976123d68b6fcab6bf767f90bbdf8e6a_Device=CPU_Config=() +709:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=b574ee57274a9f27f6d0908cef2645c458983225e3cb82c455148e83337ee3ef_Device=CPU_Config=() +709:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=c1c38223834d99f4481cb74db2bc302710629de5807b4f08381fd01655b9d44a_Device=CPU_Config=() +708:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=7d706b614d2b5d59c5e152bbb61a8fd558686bb3b8e9fda199c499ca49f03042_Device=CPU_Config=() +708:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=dynamic_IR=49b05f6b6a636d84beca451fdc1fc81e3411a100ea105fbcd49ef72ef1fa0934_Device=CPU_Config=() +708:conformance_HSwish/ReadIRTest.Inference/Op=HSwish.4_Type=f32_Shape=static_IR=ce108d9befa5ee87b0161e969c5ac986c176e468ecae9f66895cdc4fc6bad940_Device=CPU_Config=() +707:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=6e6c053ee1974a5d036c6d549508f6d43586d501c72db05df9930639ad745bc4_Device=CPU_Config=() +705:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=88e65a668c1bbccdf69927ed3926a7c273c97f72a7059d1d748ba6b0da8492e7_Device=CPU_Config=() +705:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=69c68c20edefc8789e62a7cc8a0f8fe7e649f884649ac30833fb5a2ce43c4098_Device=CPU_Config=() +704:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=45e4a607b0964915174f6a14de049a61a5740f258a4a71817e5aae1b93be5ae7_Device=CPU_Config=() +703:conformance_Sigmoid/ReadIRTest.Inference/Op=Sigmoid.1_Type=f32_Shape=static_IR=697bdfc59094203ea1616203d64759a40193f1a23a4a51f11340a7912e355cd1_Device=CPU_Config=() +702:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=21a3318957d920b39d8b3d84c76cfd2a5ad98515824f88145326deead0961486_Device=CPU_Config=() +702:conformance_GroupNormalization/ReadIRTest.Inference/Op=GroupNormalization.12_Type=f32_Shape=static_IR=139730a541ba475f22b71d8bbe850f280751594db3560e15590939b2f017fc02_Device=CPU_Config=() +701:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=e5092af5c0f683044b1df5a45f211f4a692436d1112181a5d613bbf335941684_Device=CPU_Config=() +700:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4c794e0e6b27bbef5d21922537d8b23d0d2b5955622c1f5ee724a4d8faf2c86b_Device=CPU_Config=() +698:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a917525b3e5a37fc2be5f35fd5a3d50b57627cd9b985333e082b169c29f848f3_Device=CPU_Config=() +698:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0495648ac153ca7bb07160aed49b620b855a89b368d363a22fb45ff3428349eb_Device=CPU_Config=() +697:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=e7895756d4bbd8fc1d5f9794410daea2a42c1df95f57627cbad46e6787e6aa5b_Device=CPU_Config=() +696:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=cb7ad9dd22a7bccd73ade4d4aa78f9a25cc2bb7f0c08a01064491200089b3718_Device=CPU_Config=() +696:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=789949951bc3afd20fdff943ca2a706f79eb4f95be60086ddf632b43c3e401e6_Device=CPU_Config=() +696:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4694d5512c7f6b97213ae6c93eb6f547e57922279edf34b94a8e45b7f6a9a980_Device=CPU_Config=() +695:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=29bb3b751638e157d0ba7114cc0e156a4b792a9dbb2bafa3ca124516595f01a2_Device=CPU_Config=() +695:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i64_Shape=static_IR=5be0b1c69be525cbddd7996b695c1a4a9f380173d03f291e8570df76c050678b_Device=CPU_Config=() +694:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=bee11d430236dcbd0fb5efbae712d8d89d84beeb89e0ee60e0ba3ba9512079f8_Device=CPU_Config=() +692:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=cd2470c72fa7d2238d2eca4d067e49a02340ad187681be2fa7e0bac6eab3500b_Device=CPU_Config=() +692:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=717ea579a24839ee9c5ba7c59a07af667fea4fd44ee18bf60e8970264852bde7_Device=CPU_Config=() +692:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=13e9472dcdeb5e6ce2928191ed13dde08b6cdd62c82c94e77469d8a3ed94e39b_Device=CPU_Config=() +691:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=be720054cd6d960249271114344ef2f4f36e2a2208376df70d4395a82386dd01_Device=CPU_Config=() +690:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=d9db827de158568b8a10347c13216e92b37ec20d8eac92c38aabd86690114805_Device=CPU_Config=() +689:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=662ca1fd253f0a0c29b89eb1310ea5c7c87895533130ca1a8b76f791ef1ad99b_Device=CPU_Config=() +685:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=a3370e3b46f385ea6e46137d49d5f1b4158fe08d0a3e9feb47a162f6b3640951_Device=CPU_Config=() +684:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=cdc57df56ccf890a00f886c3b83f504d24ea9d4ed5f0ef05f1189879172777f8_Device=CPU_Config=() +684:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=65afcce29f554c2dfbbb4449ea6e11f1f1b9b96aa5c8bf73a55796de849b58bd_Device=CPU_Config=() +683:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f01152d615a3092ffd4ad1059779ea183d7a62c1ab5b970d940f3f537e6f12db_Device=CPU_Config=() +683:conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=f36a3f626860d7088b33d97a5a6ce009c89609c142158b256aeb6b5e6dac02d0_Device=CPU_Config=() +681:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=0d413b2d40036984ce2b85933c4b5ffda416e8779a20b602095d2654db296d58_Device=CPU_Config=() +680:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=e7e985d4d02762d236131e74fd867acff1828bcd4c4eb32e190de20eadb831fb_Device=CPU_Config=() +679:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e2d2eef3e776af9379eb35540d8f4c888491082d8333aeb70f58822aa5cee878_Device=CPU_Config=() +678:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=47423c3e9443249e3dbbf58ee0f5b69b15e677f84de44ddb9d2851d1341dae96_Device=CPU_Config=() +677:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=76ef553ce6e6b782a200e030fcb744ed737623fc3a8c9c8faeb0e05691c5a55c_Device=CPU_Config=() +676:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=ff96b044b0064dcc13dc7c1d80f2b2cddde0ead8c4501d5d741034833079d47b_Device=CPU_Config=() +675:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=6289210c93bab9199850c9aef5ac3144ad0a900007dbca3e889a9f875318e9b5_Device=CPU_Config=() +673:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9e21c0af425c90066d92577a0b8aadb6e9fdee50c197b15eea040b89eb715a6a_Device=CPU_Config=() +672:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=3209c1cce78c7b856203c0a5676f6fad4d098a3146c7305ee3c0471b3be2e3d5_Device=CPU_Config=() +671:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=bf4d5291899ea4eccf6584f62d4ecdfb39de79edd102e509f840664838f59d19_Device=CPU_Config=() +670:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1e56a3e2379d29d81af93174e56ef91408af41dfc085d4851ff58dbec781b8fa_Device=CPU_Config=() +669:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=97a94ab826d2992013df32a4f93f6adbc38ad17a26503005046f68904adf53d1_Device=CPU_Config=() +667:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0d40552a1b6c1945765ada16284a0c03f5c1454fb12f226a34dee8a07b14f17f_Device=CPU_Config=() +667:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=db85fabcfcf049a7225468036e29c949eb779253ba145485205596e72cb8cc7e_Device=CPU_Config=() +666:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=6a05cd292e71af9d96e456cbc515097d5224a9e41cd9c3d48cc73f1a4e6e2164_Device=CPU_Config=() +664:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=a6b95dd49e84f2860b57f1f1ab6fe2baa265bb757112e53def3004a360053aa8_Device=CPU_Config=() +664:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b83a85737c23e279f8878f6795581dc2b003c55e4eb8baadfbfd73fb0e98758f_Device=CPU_Config=() +663:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=1be99c00176df777bd8cdbd9f74ff064237f55053dc7490050d692274182182d_Device=CPU_Config=() +662:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=851aa3cf931a01e0188758055b866fd14280bc344f548da6166e4a57ca7c9254_Device=CPU_Config=() +661:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=562ad06104aa1fed1781e5e3438d71855e1ee7e0126457f2d8d8d415f9c30c03_Device=CPU_Config=() +657:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1ceb1c4ba1a45cbb5cabe7cb4b416cbfeb93f24533c8123e4c2315cc7e9f40a5_Device=CPU_Config=() +656:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=36783f31e83ed0f978f00a1cdd87a25b4b881c251fe059e5d2829be3d0b45c5c_Device=CPU_Config=() +655:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=e2d1f4fde3dc1889d4f86004173ea34a9d9836f645730727f5cdf90bc0738361_Device=CPU_Config=() +654:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=910dee337e395f94d7673f664a3e58647ead8bcedf50ea1439250bdfe8da25dc_Device=CPU_Config=() +652:conformance_Clamp/ReadIRTest.Inference/Op=Clamp.1_Type=f32_Shape=static_IR=cc989fde083332a75d3066112105028a711bdac4fc44463d098022774da752b7_Device=CPU_Config=() +651:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=8978b8e985b54cc12e2cefa8d9097f4a3a03d477129230b6c7e3daf8112e2c0e_Device=CPU_Config=() +651:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=965ded994c427ec62353194906203c202a52dfc0467196d5f1143759fed94b07_Device=CPU_Config=() +649:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2acd53645519bc460dcc71923563fd462ed997366cc7ae08cb5a30245302a859_Device=CPU_Config=() +649:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=3b4dbc6facc24173723b52757e4ee60953d7a639e1fcb6e70236918d6a40b3a5_Device=CPU_Config=() +649:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d4b1dbc565a45f6c9f60cd4a73bb15c0f9e05baadfd3acdcd5e133d782c54cbb_Device=CPU_Config=() +649:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=970f3f48203f3bd46dcd6ca55ad20f5ff8ad2426c3f6f74377759fdddaaf93cc_Device=CPU_Config=() +647:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f06ff28476f886d4298a83d39f88aff34399d5cd589e0a6d6395e00b0ad96876_Device=CPU_Config=() +646:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=5f43b4d027388fff204c9c64df9f62bd2a72034143bd655e45121ca886c5d15a_Device=CPU_Config=() +646:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=6513dbb80f00e325d6dfc953d1208c5834199f75a60430fc85925ed6eb0d9bb5_Device=CPU_Config=() +645:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=83d90ef3fac993f7efba4a8ed369781571b1b536af03ceb0267ae979379e1dd9_Device=CPU_Config=() +644:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f208ab78a0ef0497856952f499578a17818269d066f4281183ef92ac2f9ce449_Device=CPU_Config=() +644:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=e8c2981885674129fedb6fc6a376f3fd3db7bf6f9867ee8a3f4e5aede63ee168_Device=CPU_Config=() +644:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a3d6337c1ea3e8b67256696ea4231da4fc0e9d9f8bea169607a1287233086b3f_Device=CPU_Config=() +643:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c08b3d30c1b4f1b5456e4791d4d7fab1d21f743dff0dac1ae5d09abc6764fca8_Device=CPU_Config=() +643:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9beef927f57c512d381a87a35982fe4ca7a00b9a9d50ede54f7baecc5ec7fa0c_Device=CPU_Config=() +643:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i32_Shape=dynamic_IR=60bd170e816e0c2345a1658fd88459775fe8b7cce5de31a16e4e6cdea199f264_Device=CPU_Config=() +642:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=cdd7ce044f231ae39fc0f7460a55473c0de6934124cd263444a5912b8cbbc0ce_Device=CPU_Config=() +642:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=a3d8e1343e43c666358304b530278c73bc7c52a0d7fff38977154b6f7c456731_Device=CPU_Config=() +641:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=327d5120022c693531fe0f1f42429b1ad78f36cd5e414f1c8bab7d0c2ced62f7_Device=CPU_Config=() +639:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=6adce7c66c1630295ec8938bcb429f20b628b0ceed938bf81ac0fca8580f8d34_Device=CPU_Config=() +639:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=eabe482de99e120ef1260cc91a746df95f8db04fa1cf6832dc45b3ee1b38f9c5_Device=CPU_Config=() +638:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=d8441d8bc521ac390fb58cb882a952618ebf5892d40e8768a9051f852a9dcfc6_Device=CPU_Config=() +638:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=adabeb9321a0770cb065345aca787cbf7d1adef68132cc5c7d8df885ea64ab2c_Device=CPU_Config=() +637:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=f0ae8e6b136d1db7e5e7748c03eeaed6907460d3d3941fcb1a6651cff61be113_Device=CPU_Config=() +636:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1891282a9bf460336bad3c354519aa0d87ba6ef40876d4a07592194d2d678e25_Device=CPU_Config=() +635:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=b6e76f65817017d4cbe3504634568430a419a30e418a5febf75b89b566ca3631_Device=CPU_Config=() +634:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=be4634976e408d799217398ce693fe430fd46cdba6c78e01e9b824c208856128_Device=CPU_Config=() +634:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=6e73ec183893b70ec42a4393f3b1b7c55767a14f630eaab0c3e3b6d22c6b8e26_Device=CPU_Config=() +634:conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=i32_Shape=static_IR=6650e462a4f0086329d8576eb6352979e89825517f48e264fe719c7c5ca276fc_Device=CPU_Config=() +633:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d1b4dff28b71e41d8106d3730f2705e537487aafe0dd53ae7dfba9ec21724287_Device=CPU_Config=() +633:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=d7ce9fd9d99a7ce9ebb5fdadb4db39f4ea66f74788704b2b9f96660c7403c031_Device=CPU_Config=() +632:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=aecc8a062c16343ac138f351d774858b523e42d5a09ab67b1b61e64fe62e73ff_Device=CPU_Config=() +631:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d9937a6c3eb62ad6328d7367f15e45758ce5f2ebc0488931855a5b1925574d36_Device=CPU_Config=() +631:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=dd575df40c907e85f7561296f2b1b5bb9786bf44bc27f26e33f235ba57391e26_Device=CPU_Config=() +630:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=64358a022d0c072ff89427a2f3acd3a3afb49b8f76e57353eb95962fd2572ca9_Device=CPU_Config=() +629:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=i64_Shape=dynamic_IR=c117722add2db4a6eee4dc2fbfb99174911d54eb3896c65097d31d656fdee639_Device=CPU_Config=() +628:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=243d5b44a22822e90c2f6b7c2817f8110bd6a060331e686c1fde1869f3392db1_Device=CPU_Config=() +628:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=1b46ce72aadab0dcf92991f242e971bbb36689e1bcafecc68d646aace43291ed_Device=CPU_Config=() +628:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=cf02be750ce25545f7bfd694603192667eb3fdb07a186eaa7f3ecf5767547651_Device=CPU_Config=() +628:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1180dfe50d43ef6b95980bafd3b84816f6d249f8341b03a6f67d20bd8f8ba6a4_Device=CPU_Config=() +625:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=8c43b49d99c64bec883205ca15c7b2d9dbb47b9fe5140fedaeb8eb7220a36f6c_Device=CPU_Config=() +622:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c6e38c3297ab303b166e2a613203a1f09f4ba5a15659c8d2b233febd8fd09d9d_Device=CPU_Config=() +622:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=79a6d2a402cdd74cf1277a57ff95b71d61384da394ad2a4d9ebcf422eb5c3258_Device=CPU_Config=() +622:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=c5f54dc9ad0b693c13c07d44fe5572bd91852b0edd57f8f06314df3e71f3659b_Device=CPU_Config=() +621:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=static_IR=d04bc06efa76ef2937aa1539893ec9c79ac61c765cb50cd4a26dbf5586bfc904_Device=CPU_Config=() +620:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=89ed1d3c7fa6e15c01df3b792a183ade5b90edbb87886e1d58db075566b60a92_Device=CPU_Config=() +620:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=83e2d01e24eebe910418ed24fb506852c37576ce70c18d27de197f675f49c9d2_Device=CPU_Config=() +620:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=362638bf065f1917d2b4dac3008a8f46f8f8d64a80d2442c1ad98f4fb943cff9_Device=CPU_Config=() +619:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=42062545b0991e415aad8d29d47de2a278e5791996ea55974411694aa821b54c_Device=CPU_Config=() +618:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=37f1a0a9bb9b948ed78217a65a5a2de7f0234b1e000fe5ee11ede68767240f1b_Device=CPU_Config=() +618:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=6cf01dbf95872b3fc0c914e73415ed8e4dd52cb355031002a65e3e974559d6d6_Device=CPU_Config=() +618:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=8f731757a7c32fa8e4d602d7197af81a1a82ea228ec05f4baeae7c59eba11f2b_Device=CPU_Config=() +617:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=61f6b4fbde686888b82614a5d24cac53e835377c4cfa791ace3f3cd3f8ac2dd8_Device=CPU_Config=() +617:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=4d2e12e00779d116e2192ca77f2be233d76bdd5ce366ddabcf436cc205a9f811_Device=CPU_Config=() +616:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e58cf21c9c62dd427747021dcf9544157638e0773329eecfb8755a71b24f65a8_Device=CPU_Config=() +616:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=d98330d8f9f03556036d103fb4ca3f8436be42fa4f0b21b185aaad3abb2fb53c_Device=CPU_Config=() +616:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=cda3b9bda63d065b5c27e6bce5ffe20968024d77efe5e174a9f4395db56a30c0_Device=CPU_Config=() +616:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=7f30f8f46d999a18110b8f8f9235b3534249be45e55f1aacb419126ed1eb5851_Device=CPU_Config=() +616:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=15d323a190bbeb1834cfa08a3afc633a2c203e44e2660bff4e98453c02ea4cfc_Device=CPU_Config=() +614:conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=f32_Shape=static_IR=431db89311a543581d104e2a2c498fe021da2e4026323817834670bf5bee67a2_Device=CPU_Config=() +614:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=131fa1ed3ff9df038bbed73979ab906c3d84fea9dd2cf5dedc82b3222d511b1d_Device=CPU_Config=() +614:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=3e4364d93433ea741efe178b0c83cfb13c46259888aec468f59f77cd3f1bb39f_Device=CPU_Config=() +613:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a29be1e2e5f78c12657221f33e5309470a7a4dbb9061a8100d7c454215198f7c_Device=CPU_Config=() +613:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=1e04d36f6e56abacf8388fad66368b15355eed9d216d5771b650b0b058db3a76_Device=CPU_Config=() +613:conformance_CTCGreedyDecoderSeqLen/ReadIRTest.QueryModel/Op=CTCGreedyDecoderSeqLen.6_Type=i64_Shape=static_IR=117fa486a51d9715d9ba1ad90cb5d6741e762cb36ea55a91129f1947b4886649_Device=CPU_Config=() +612:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e5249d5630503351688090f1a9d0143b02e750045924aee8f9003072446583f4_Device=CPU_Config=() +612:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0b7d6fb137555d6fde92f0c9b3e6278715adaeb38cf760236070b17bafb5babc_Device=CPU_Config=() +612:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=8bc8753f4d26c5d1f2ea481937dcce0f5b78971f18f5ebb258f49d4a0d86a333_Device=CPU_Config=() +612:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.1_Type=i64_Shape=static_IR=26d97c755f660ed8ee08a0de8d6ab88598391cc79b239bfaf0a102722ffc4bf7_Device=CPU_Config=() +612:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=41ea59b807081adea7869609c65776a42f88079ec22180807905d5c2e8ca0777_Device=CPU_Config=() +612:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=27d1a1cfdbadd9a8c2d0269f6177d6aabd55320aafe9a0047e90681dcad1cbe9_Device=CPU_Config=() +610:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=eaac9340f5625cd59856645684fd84a5f1f0703dd3748eb85fdff2eedd8ee64a_Device=CPU_Config=() +609:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=6289210c93bab9199850c9aef5ac3144ad0a900007dbca3e889a9f875318e9b5_Device=CPU_Config=() +609:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=4b00183255fde45d5c3b815b552e5a4279284bfe1ceb31389560260ad5546c14_Device=CPU_Config=() +609:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b15fd62115a849e0b5226ebe9162cda9371ad2783637a518f2a8724d24710253_Device=CPU_Config=() +608:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=351e48db45e09ca6c4bc54a271eda4cb2ddd69ba43f361b9915a6588913768b0_Device=CPU_Config=() +608:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=2ebbd25d315f10aa32cd8251ced4e269c1688202ee64b4fb5245e4ab53cba16b_Device=CPU_Config=() +607:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b3e45847dae7906b7f320b6a751727593b35ad8659ee80a11caf445f44f392df_Device=CPU_Config=() +606:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i32_Shape=dynamic_IR=1af860b153ea667f413c7de4c98752d48ed8ac1fc7f90889009a2916e2ab1026_Device=CPU_Config=() +605:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=dd575df40c907e85f7561296f2b1b5bb9786bf44bc27f26e33f235ba57391e26_Device=CPU_Config=() +605:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=ec60ac68ad3b748ccd56a7c91b3a2461510f05d66e4b64e12a2069483d8243ae_Device=CPU_Config=() +605:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=ce2bcc21fba106cc8be4846179a73cb30f650e7ec48d443fed591f6b479fa9d1_Device=CPU_Config=() +603:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=dynamic_IR=6b70264ed3eb3831e0e034230813ce1a1e71c157a302822b56335e587bd200b3_Device=CPU_Config=() +602:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0534fdfa97228a6aacf4ed196a9ace8e09d8e4decdcce058176b0312500b6c07_Device=CPU_Config=() +601:conformance_ReverseSequence/ReadIRTest.Inference/Op=ReverseSequence.1_Type=f32_Shape=static_IR=a5cc0793d73f7f76fc02b5ae04ef2a29bf212ce5c59f9bbef91e0aa5ee17785c_Device=CPU_Config=() +601:conformance_MVN/ReadIRTest.Inference/Op=MVN.6_Type=f32_Shape=static_IR=25f55a7cb5f72689bff67eb95af15c64b31c2d29bcde97611e74917fa6724ff3_Device=CPU_Config=() +600:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=356e2a728749d3970a85939d23344315d0ff533567c35a559caa3bef173b76f7_Device=CPU_Config=() +600:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d46034925bf5b01e31b5a57911fe30f5dd09a8712432312fb1efd844e69913bf_Device=CPU_Config=() +600:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a4772901ff77541ae624f89db89901c7d5a502a0dc5d1e0dc21eb8e08c599525_Device=CPU_Config=() +600:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=64d3761db7bdfd0de19878c66fa4465d084f7462c332fd978de458e328f97875_Device=CPU_Config=() +600:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=99b432aa5821136994e06b4e3c690a4e298bc5a496740ea2c5fe6aa300edacf8_Device=CPU_Config=() +599:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0b4b74693c2ec96e714901b1acc772655accc3b29170cdb64ae934003338b296_Device=CPU_Config=() +598:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8c773c776396a2ff506691f857349efa9a4a580f1e046d1f17ff2ab49c73553d_Device=CPU_Config=() +597:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4d569fc3e7d2fa1724c99fec62e4f31fb000a6f5c306273c404e2b449761feba_Device=CPU_Config=() +597:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=80bc3dff7b0901463ccc52bd8e4a8e7522b1e9768421de45e63bdf8db601b9d6_Device=CPU_Config=() +597:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1e04d36f6e56abacf8388fad66368b15355eed9d216d5771b650b0b058db3a76_Device=CPU_Config=() +597:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=059046ce67f6b09ef45aaad5724e28fdaaf40afb92613740fd058c974a120d3e_Device=CPU_Config=() +597:conformance_Abs/ReadIRTest.Inference/Op=Abs.1_Type=f32_Shape=static_IR=083771171646a2eadcbb3384bd457e04d74ce8ea771813cdf67c56f7bbf20c69_Device=CPU_Config=() +596:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=fe70e0ee3f24f0bfe4391da7797647a01f66fcb109b481ca859c9f8f7dc7b411_Device=CPU_Config=() +596:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=fb83c1c4a2ce0a8860479916f23f3961a5c20481e62de79390573dd7859c09f0_Device=CPU_Config=() +596:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=29d8ef1a41f51b6fed0300f97d17a3795a97e4ffb3ef3abda37f790f5f53b389_Device=CPU_Config=() +595:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b2ca18b9d9f9e7c05f66a1f197b65ef9ca1d59319ed5f30d4eadf6f8befcd9bf_Device=CPU_Config=() +595:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=dynamic_IR=a3add607f5e37633f3298794f8e32e409e3403666af3c0fc57c7d4427b714eca_Device=CPU_Config=() +594:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=13c78a6d628bed4392d2577f132f924d9e17a7e29a2171dafebc0a596d2ade04_Device=CPU_Config=() +592:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=75bf24e3b7a4c4374c5c92331d9e48423d734d35b5cafb951222e39ea4c29613_Device=CPU_Config=() +591:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=eaac9340f5625cd59856645684fd84a5f1f0703dd3748eb85fdff2eedd8ee64a_Device=CPU_Config=() +591:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e4b374c3afdeb45605c3ac745c03fc9eb938cf3f3828c119917ca92a6e9135f0_Device=CPU_Config=() +591:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=c202ffc0e1805a36e48ee4b06d06b68a9f179eef00dc353a092a13818e8ebbe9_Device=CPU_Config=() +590:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=dc350b3fec164adcb096b8fc922e342cf7b0c6f7a4aa25074bec5566225cff01_Device=CPU_Config=() +590:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2a819b46a29c8bd965ec330a28b5c163dd0a06fa2717d71bd16493ad460e8dad_Device=CPU_Config=() +589:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=f8662769a2f3a5fb20582ccbb1931b7e3fa73ec7713eca30362b0e7c0baf829a_Device=CPU_Config=() +588:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=962d8a421369e4dac96b6d89d05053f63c9e5fc8b7b82a60c922432125da80c0_Device=CPU_Config=() +588:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b0dea4cb6a0cd2380e8657b0b64caab43819c0f8182ed73b2cb12eec608bfa7d_Device=CPU_Config=() +588:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a7eb49934c05ef49a453b19adf40a9d4c2ea9477464e8d42858dc9517c30b88c_Device=CPU_Config=() +587:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=cc3619fbe03f9b98ff07babc5c11f9bd9f26927c8d793abc7188595145bd1371_Device=CPU_Config=() +587:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=81973bc511c12f7470f620b3484f6f7c82077975f916e080091dcd4757268b17_Device=CPU_Config=() +586:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=c1ffd0690c9370725a30028d2915ec798aff173f86a1864f3dc92a4defefef85_Device=CPU_Config=() +586:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=dynamic_IR=e46ec3487f18188d1da4c029a2981033018c1f8f273f60d3f7d1bcbdae18c2c5_Device=CPU_Config=() +585:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9360fbacf32f2208bd7f241535752ccaf434551d16bd8fd46d0422cd1cafc3c6_Device=CPU_Config=() +585:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=839faaa50aafa2e3ed38fc682d0759304b694043dac1a242a085e2973aac8091_Device=CPU_Config=() +585:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=30466048a7da9db59d20a210af1979341f7b9552362e64a89357d650102a213e_Device=CPU_Config=() +584:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c990afda81456723598f8f4085cb476376b1789d7f755e340e1d5498bcf02080_Device=CPU_Config=() +584:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=dd6dd181ad2875cd08679b8554d2a85ea0fd15d7f09f733a8290f677fed6c757_Device=CPU_Config=() +583:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=boolean_Shape=static_IR=4541365c567e68739f0733edba54e889f231026025e6866f805446392c575960_Device=CPU_Config=() +582:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=3e1e1cd684c1bcfcf06febedcb4eb0f4f62b5c0920098fa0715c828e9a9761a7_Device=CPU_Config=() +582:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=deada5d69a05cf27af659254f89b4e53e6685c517fdc2bb8a250cb5d4ba0a3dc_Device=CPU_Config=() +582:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=a815b68b6a8d36546d3ac0112c60283bd69ae1059e8deeb98b21f538c8089beb_Device=CPU_Config=() +579:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=030fa97d19aab57ae9eb898fe101353fdc76bbc034d4574971c68ef254006c85_Device=CPU_Config=() +578:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=ba4f511cc4a0870c64cc5027fa39b2bf91a6e7f39ea36cd43a693eb59de6d836_Device=CPU_Config=() +578:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=12e7ea655764a32069a93a3f7ab147983bceeacc8a2bc88fbb2def005a1596b3_Device=CPU_Config=() +577:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=77e1c522d9ea4975c3071869b7b485038bb4035c9aae6f5d44291f60ae253a0e_Device=CPU_Config=() +577:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=f66bbeb796e4da5d462ef573e38fe52db5bdaf2367b2a07aeedae6ce33c6704f_Device=CPU_Config=() +577:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=4479acfb061c41832bd1f2ff0de0141dde3a3c496ee4471523fac0a37451311d_Device=CPU_Config=() +577:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=41bcf70f8013164bdfeb7e348c05e6d43d9a1afc49087c49745679bc3aaf1e10_Device=CPU_Config=() +576:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=fcab2b4b3bf1a04070e3fd3490e6317f2d6870335d302d96c768f40da8565c8d_Device=CPU_Config=() +575:conformance_Negative/ReadIRTest.Inference/Op=Negative.1_Type=f32_Shape=static_IR=c29451ffff103b5e965a1bbea7994ef6da6394060855ee071b9e7a3a4702141f_Device=CPU_Config=() +575:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=df62dbfb022ab001a9df6b16311f57e744e8674fa8751b4e3a7ffee872fecc20_Device=CPU_Config=() +574:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=fe5cbe273830f6a09e3f18eaf8e9410f9f7f1083af508a9dcaf5f0f22aa3ac1f_Device=CPU_Config=() +574:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=13f3d097d5e17c2add48d6f9b6f86454a1b521408d7fb8252e3638d9f17ea6fb_Device=CPU_Config=() +573:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=2ce56cfc77884dfc61f7e9fab9a0ce04a4b016f9b3d13465cde1576b9374a2a6_Device=CPU_Config=() +572:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9575e384c23faea27b9011de8c0093099fbe0ee6462baaebaceb075529664665_Device=CPU_Config=() +572:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=1269afc1a9f9a4f71ca2167cc59274b7a3bead8cca474162919619b810eb9c1a_Device=CPU_Config=() +572:conformance_LSTMSequence/ReadIRTest.QueryModel/Op=LSTMSequence.5_Type=f32_Shape=static_IR=981b213f0fd1305e70515849fd08553471da63e6bf64827a47cc475fd4ed9561_Device=CPU_Config=() +572:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=00709ceadeb9692263607310765b0957f34a8af1ebd17a13cc28d9587d360465_Device=CPU_Config=() +571:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=259cf71b937e6d184948130afa5684d7539769988cee7a74b06138ad4d09c689_Device=CPU_Config=() +571:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=a9d3d025df92369ee1f1a81fe676bb00d7d6cc488868e04d0e713fb9e42451a9_Device=CPU_Config=() +570:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=7dcfe3f43645f6b9f3290b524024a1a3d48efa3ce346eacc2330be7e27a046fd_Device=CPU_Config=() +570:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ea604e7edf80c14a14bf7fcb042125f4d666d0d69ce3c0209c2f9dce26d406fa_Device=CPU_Config=() +570:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=739517c4c613063fc5ef734443f0a599400dec31cd5a56686735f3165b2dc2d0_Device=CPU_Config=() +569:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0f623457008d91f7fcaead549e4a3f90a5ca77dd7c52fba19906f559c34b333b_Device=CPU_Config=() +569:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ad4c3d2d3f258a4be14846d9d26203008e01b2832ff004bb8a23ff05c72747b5_Device=CPU_Config=() +569:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=94ad9524c175a0e0d2fe22bceeac82b0dc66006caa0942d343d551268e03afec_Device=CPU_Config=() +568:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=244310d1092f478729162ea9a4da5660b066ad7ca70a65d8a205cb03787eb73b_Device=CPU_Config=() +568:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=09683cb2a0a44acb804a2791ca93bf004bfc3882c11af94ea67a9fc1eb1e5052_Device=CPU_Config=() +567:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=60ab42bb613fe785777ed45bc99044f41dae00316065ed5e5f07e69f5c861fc4_Device=CPU_Config=() +566:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_Shape=static_IR=35ab7a27cb56964d974f5e1b55c1ed76d7f9443f97da0b977370ca9fc414e093_Device=CPU_Config=() +565:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=static_IR=40876e66f31053b621aea004baaba7607b9131d4fff8e8b00ed7e1e58204988c_Device=CPU_Config=() +564:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=4d9f16ede014da56824607d45502439f71b57275c332fbf15c6ba2ec1496466f_Device=CPU_Config=() +564:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.4_Type=f32_Shape=static_IR=214e4e8f7de64e9cc8c77c67d214172905cfb4b9fde65e2ef3d32bb7b4ed93f1_Device=CPU_Config=() +564:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=117fd22d36b97216edb2112c043ba97872b9b7915d7909dfc395406e8ad91e4d_Device=CPU_Config=() +564:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=static_IR=d296b02cead8f38f8a2c9fa73ab8103d3050549c92fb807b040dd6e3bbd7e2ff_Device=CPU_Config=() +563:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=65493d18baa6530c757031b74c5fbd51757e2b04bb79149d3acbf6c40bac11c1_Device=CPU_Config=() +563:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=21a343909133e844b3d88a967b2f6c948e4c9c9eb96030b936f9517dd9bec865_Device=CPU_Config=() +563:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6587874c50811a2ca7e27f84cb4381e9a06eb4465e940ea877c76dfaeba02753_Device=CPU_Config=() +563:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=180e9c4ce23626df8010b5b79a28ecc4c6c75b65dea91938fa99025a65411239_Device=CPU_Config=() +561:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=984e628a0090ff9d04bf8f41b795f0682dd3083fb78b71397a51cc2efacee247_Device=CPU_Config=() +560:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f8795aaaf3fb96028b8cdcc963cbdff4c3362d78c4801af4172a73a3cd843edc_Device=CPU_Config=() +560:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b53fa2c9b93d3750c17dfb8ef75e51c43881ee79fddc863d6c1c2adfeaeaba2e_Device=CPU_Config=() +560:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=45959eb5eb391b2bc86455cb1e86aca76799c6b082437e72b15c171037a6206d_Device=CPU_Config=() +560:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=b8e37f2c8e2574b3f3554578b72e9df771c290c1bb47238fc4de9754c6e6f126_Device=CPU_Config=() +560:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=03e7b025285b1369ca39bcf887783a843fe06ea29f7f394efc8201d1b7ad3a09_Device=CPU_Config=() +560:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=769e7bb56fd0d0fa75fed14765279f68841e300b1450909cdcc802d347446b52_Device=CPU_Config=() +559:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e1f0357795d5676c5e4a38b6639cc90c924880ab961eb73e407b5ad0142ac0b4_Device=CPU_Config=() +559:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6a7aa747b98a21c0469c7edf7ef78a050e1279d891b0c69ddc071befafd42c76_Device=CPU_Config=() +558:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=5d7273e7772d3578b3c8dcefcce25913c8e843b7a1045722f80f9feed4770ba1_Device=CPU_Config=() +558:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=1f429cd9b96a0ae8b336e874e911d2cdb79820b76030c61de8a1c057a0c33168_Device=CPU_Config=() +558:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=0f670e49f962b0a7abc6b4f1fbf9592db592a6a78eb3e083dd4027b9f9607430_Device=CPU_Config=() +557:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f13dcb47235a9516298088a0c45ff56fdb7f95144da257a3dfa1c618c7373ce9_Device=CPU_Config=() +557:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=75bf24e3b7a4c4374c5c92331d9e48423d734d35b5cafb951222e39ea4c29613_Device=CPU_Config=() +556:conformance_ROIPooling/ReadIRTest.Inference/Op=ROIPooling.2_Type=f32_Shape=static_IR=baa256d53878b528f6bdba95bf1837cc570dd83b577220f95d9c24cb26d37c35_Device=CPU_Config=() +556:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=bba92f0e1fe2ee647564aec64223ab2c5b32d3defae9bad5daa5a24df76aac48_Device=CPU_Config=() +556:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9fbf4ccaa68a81191afe2432a2212ee1a559df380d602459ebd2d0266053d82d_Device=CPU_Config=() +556:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=d294c71f3796d2e2b88f819f6512ed03942eab440681a5bc5b092e5a34192107_Device=CPU_Config=() +555:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=1f7f2d40b938416773b13282d8ac09d81a50e4d5d7548f42fc5fd575f84e1385_Device=CPU_Config=() +555:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=0c491c1a451ad85717879e05678f7d1c85cc35d95b108087dd241b9b24b39ddc_Device=CPU_Config=() +554:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=43d871d4b2b3346c08f8582b892ba0c0017d77688e16fd6d69f83f8101e12a69_Device=CPU_Config=() +554:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=43aed1509066aa7c839a82c9865228ce3ebdfbe519061649807875ec6e86d715_Device=CPU_Config=() +553:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=4555fb7029260c7e46403e1fbc99a3815a94373b7b08d2408277976173facc37_Device=CPU_Config=() +550:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f2293320b7533e95bf000229d2458244fb9af573cd737ca0088a00674df1ac52_Device=CPU_Config=() +550:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=7a3cae38e357ee1e5b0400c7e1256cc8a2d78da81911fbbb3ae6d9e510d78aac_Device=CPU_Config=() +550:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f069cbce6f4c3276869b6d9c4a6c843d7a1e1c9d299e8680218636b04339a9dc_Device=CPU_Config=() +548:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=023f3573ef77fb592345c68ee5e6a79191b120f9cb68f81194381da2cf68f21a_Device=CPU_Config=() +548:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9650ac479045f70fd763f5c95d0c27c3b3cc4d6fc00b43e8ad627d16f817f342_Device=CPU_Config=() +548:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=8f3e3716e8a1e8647454d124d7538ac1faacdc1b95873ccc1a760e09d48c30d3_Device=CPU_Config=() +548:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=static_IR=315fa20f952b6c7678cc93dbfd340097847826fea7928eabcec46d7ccacdb224_Device=CPU_Config=() +547:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=f61b45eec10f28e255a0f82842384e1c947830dc5d5618bf00c6385cecbab8d5_Device=CPU_Config=() +546:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=899cf50d8feefa9c5e02f6fe88b79e66b59c4a53478755d51b3e82570683613b_Device=CPU_Config=() +545:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a917525b3e5a37fc2be5f35fd5a3d50b57627cd9b985333e082b169c29f848f3_Device=CPU_Config=() +545:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=94ad9524c175a0e0d2fe22bceeac82b0dc66006caa0942d343d551268e03afec_Device=CPU_Config=() +544:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=53108cff3836c47360380f3898c5de245a566a5d98040820d78befd46e56955b_Device=CPU_Config=() +544:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=7f30f8f46d999a18110b8f8f9235b3534249be45e55f1aacb419126ed1eb5851_Device=CPU_Config=() +544:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=593116ea16692c8f5a8994c0562c47e1c627f9088c519b752a635a7d91973085_Device=CPU_Config=() +541:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f1f52703006b7d81ccadfa1c54db42d8b19ac7b8beb3ee88f2d7252170358d90_Device=CPU_Config=() +540:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=576ef0e9eaf8fefade547928d4592bc2b341ff1100c3de5104f0a63b2fbeeca0_Device=CPU_Config=() +540:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=43e58b152a871421132d25894025e9f4e2b5294f4b22923ca549bb0f2b8ab50d_Device=CPU_Config=() +540:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=508a961c358d40ddb6906694a24f87dc24f74cb4643aab58ee1d6fa28f099e6b_Device=CPU_Config=() +540:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1e56a3e2379d29d81af93174e56ef91408af41dfc085d4851ff58dbec781b8fa_Device=CPU_Config=() +540:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=83cdc5670c74aa0db5a1c14e70c45552cdba1c9e1f4d55c83398ce51abf80393_Device=CPU_Config=() +540:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=243bd2256612673dd04651521ed8d3fa4087c90af7b85e1a4aa381c074bacd47_Device=CPU_Config=() +539:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f1ffa9874732c1aa88e04fd55fbc864c9c6986877d3d52045fa6ae7f18dba62b_Device=CPU_Config=() +539:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d9b3427efacda497c4fb86cebe89023b322722167d0c32de8a2602a80b23580b_Device=CPU_Config=() +537:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=8c3b386463bd59945d3c15512b26409dee7da9b1940f153e3ff62d47d6f79d2d_Device=CPU_Config=() +536:conformance_ReduceProd/ReadIRTest.Inference/Op=ReduceProd.1_Type=i32_Shape=static_IR=e34207bf06e51dbf322bc0db76f3a9828ae018b02dba2b1826ed97004bee8125_Device=CPU_Config=() +536:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=dd366f3f5b63fbfce3d9378cf0d8bfa4a909a973bc3e5e97eaa9d346c5cbf1d4_Device=CPU_Config=() +536:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=b99ba096eea2f3725fa98eabc2a941fa895c0a58bcd7a8ea68d2a245ce913113_Device=CPU_Config=() +535:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=952a43f4c368721e0c69418b71fe89982ef1eb2be0671653cb1200e34cb4bda3_Device=CPU_Config=() +535:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=i64_Shape=static_IR=def60f5f3fb7a0d22cb3d23253e7c8e502aa9dd2d3756c54dd4343b66c2682ca_Device=CPU_Config=() +535:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=static_IR=2a8596cca11141e34e75c884b1be9a75be19663caf4c0b1b4275f6035a73d62e_Device=CPU_Config=() +535:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=651e5fbc222577151cf14e9c8e9bdf9e155f1e0d277206887160d65b532caf53_Device=CPU_Config=() +535:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0f61e4837d11be2b01f69947cd0b424a45d2e548d9c70ae53b07c43fa1237cd0_Device=CPU_Config=() +534:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=180e9c4ce23626df8010b5b79a28ecc4c6c75b65dea91938fa99025a65411239_Device=CPU_Config=() +534:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=dd9fee8f7cd289b97050e22cb465637c6439230d0d3ebcb20452eb544b40617e_Device=CPU_Config=() +534:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=0bc2bfc4481de7733f5503750d21376d00de6bfa699ecff3ee0c4333d9515db8_Device=CPU_Config=() +533:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=fd10c1c5d33aef77d3428fb5c9789f3c2c2463ab9f6cb51184ad37951578320a_Device=CPU_Config=() +533:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.4_Type=f32_Shape=static_IR=f645a2275ff33ad614c801a8f2f262ce1ca95417e0ca59e28d4b87cf3289c00b_Device=CPU_Config=() +533:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=81973bc511c12f7470f620b3484f6f7c82077975f916e080091dcd4757268b17_Device=CPU_Config=() +532:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8710c3eaa10d25119059f4e15970d8a6381f978cd905fc8eb1b4d43a36d1d5f6_Device=CPU_Config=() +532:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0d40552a1b6c1945765ada16284a0c03f5c1454fb12f226a34dee8a07b14f17f_Device=CPU_Config=() +532:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=59c0e972ae75900cd8c802aa7be9b6c13c96cb10417ff417eb1aafbc49b891ea_Device=CPU_Config=() +532:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=cccecd6fd3e8f3d84fb98f219b212cd2b55ae0e4e34c099a25a1028e9e2f83e7_Device=CPU_Config=() +531:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=e77468c2881ce0c38c14038151d560ccadc7dcbd5eb5f21b68b8e227c89813a7_Device=CPU_Config=() +531:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a52a8e6ef7bbeacbc1435cde72a1a70bdb8a3abf78b5b971c2ecb1135cb4c136_Device=CPU_Config=() +530:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=3500be960a489d618c1ff6345c1d6788d17c43786c10a7e7b630586920bce356_Device=CPU_Config=() +530:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=6903ceb67d029d79d90687340dee0204830d5df1f1ea6fbb09f14a6eca234739_Device=CPU_Config=() +530:conformance_LogSoftmax/ReadIRTest.ImportExport/Op=LogSoftmax.5_Type=f32_Shape=static_IR=38bcc7d745ee21a7c6858a161e269f0281d3f41d62d65d10fde9b0a9b80992c4_Device=CPU_Config=() +529:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=34784838e98e93a6b024109ef3a8a5d4e1fc7f89b98ca23c81cf085f19acc663_Device=CPU_Config=() +529:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=30eb0edc699f72085fb77a6cc31ad4aa9e62cf97befb64273493d234494fc64c_Device=CPU_Config=() +529:conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_Shape=static_IR=f6f85e9282e58756d40411508d6edaacc75c0f4e64d4e25021ade07ba17bd8ce_Device=CPU_Config=() +529:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6b87ee29001d1d3b17ec72a66638e954796b7d6ec1d6f6be86890c7d5a3bcceb_Device=CPU_Config=() +528:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=8484c5c087ca8317588ef676a0cafb63ded379be5bad862e4d0504f43bc6fb45_Device=CPU_Config=() +528:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=19d36388bdf9535fef89243d6dfce670fc91377062ed4b3095ea55b88e4f296a_Device=CPU_Config=() +528:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=df62dbfb022ab001a9df6b16311f57e744e8674fa8751b4e3a7ffee872fecc20_Device=CPU_Config=() +527:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=e4388b1379e224ea4849e6052827ef17b490cab3718159195ea2b2986719bb4a_Device=CPU_Config=() +526:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=fad6766f10f7a0ffee665be437521766f5dd56b673293920d8b469bdcef8e7f8_Device=CPU_Config=() +526:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=46a3135a1078cd8732e84754fa66872648997791d16caa379a179e1a90960608_Device=CPU_Config=() +525:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9403397dde8b4f6a240bdc928d0f080dfb42f6442f281d6b3fe8b6e348ccacfd_Device=CPU_Config=() +525:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c219261f655fdb1bcfbcc367ca8f6c4bdf0dc1fbeb7413343a3f0bdd74a70857_Device=CPU_Config=() +525:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=a895a5053f72560fa5e36ce8b68a8de0cde25ddc1152cb1f647211f1b570d172_Device=CPU_Config=() +525:conformance_HSwish/ReadIRTest.Inference/Op=HSwish.4_Type=f32_Shape=static_IR=1c38a17a13c5c03cfc1eeb147ca2474debea05ae1d6f2357ce40ce23552286fa_Device=CPU_Config=() +524:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e0f4f91a6470af49c5e2497ae8fa917051879c18dd1e39cae18d159b697e8fec_Device=CPU_Config=() +524:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b63e04695c1e6145a3fa9835130a4919df52ff3a420d3c800bddff65af7dd76e_Device=CPU_Config=() +524:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=b5a1e5c47a0597ee9c9d0c0aca9909c596cbe71ebb069254460c2e97acfc1c0c_Device=CPU_Config=() +524:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=0c491c1a451ad85717879e05678f7d1c85cc35d95b108087dd241b9b24b39ddc_Device=CPU_Config=() +524:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_Shape=static_IR=d296b02cead8f38f8a2c9fa73ab8103d3050549c92fb807b040dd6e3bbd7e2ff_Device=CPU_Config=() +523:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c14aca93b401d9d2325a5396c1489e1fa29aaa57f592cd2b4e6792ba5af90a90_Device=CPU_Config=() +523:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0e9ccd2a8aded784ff21758802648777721176f1d112ff60aaf3f150d6292156_Device=CPU_Config=() +522:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=06069a23d29d9bb6910729ac49ce1466e4fc6185c6ca31fa54fe7dd3289c41f7_Device=CPU_Config=() +522:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=f5d3b4ec51e032e4df5dae00ecba1a3198c29cba96c72b8c89126c4638b715d3_Device=CPU_Config=() +521:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=c90b6f528b750f144ddd29be0059c202d46b3bac799c0d70893f2f4f9f05f64c_Device=CPU_Config=() +521:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=2c2cec03b3ec1da29ad4d5fbb3530ee7343a436e27be923ee1f9dd97d29731a3_Device=CPU_Config=() +520:conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i64_Shape=static_IR=7dba7222be56b8fcef943fc63ab00cfb3c7e0fb4467aeac96fd43aa4421cba86_Device=CPU_Config=() +520:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f13ce39b60cc25991465a0c02e27edcb35af0523cd28004adf6fd9acd8a5fcb8_Device=CPU_Config=() +520:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=adabeb9321a0770cb065345aca787cbf7d1adef68132cc5c7d8df885ea64ab2c_Device=CPU_Config=() +519:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=64551d094520cf00d40fe514f573f5f37f61416bd456474f4b0a21788c4ffd3a_Device=CPU_Config=() +519:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=2f7925a034999529ce07a5c8bed2b2c7aeeb7936f74730d9c8ca5a5086dea4cd_Device=CPU_Config=() +517:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=bf235091db192c86756347e70771b4b00a6ac2c8852b93079749ba718d57d022_Device=CPU_Config=() +517:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0fe2c94f8e2ed43edc0deb92ffe044a089c6920f886dcf6985ee910e7a4ffaed_Device=CPU_Config=() +517:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=b93daedfdba7331025c12a5eb4b881bd7df445d80bd4fac34833087fe6d65bf5_Device=CPU_Config=() +516:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=e05af92d21ebd869cf6e9554a4aa0bfc60c8b0c64baebee798f0be5a0a01019e_Device=CPU_Config=() +515:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9730f247ba4a13fb03274850f295de500156107d33db957188846fe49c2f4566_Device=CPU_Config=() +515:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=95ea118f8053f6bd18c8f34bbc475c00921bab5dc3af177492829d5cba16aa39_Device=CPU_Config=() +515:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=5adf6fcb72c0d6086a95fbbc5744e7d02dfb32490e0f42c62b57bc98489b801c_Device=CPU_Config=() +514:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=3dcf824c36c868d06d715e3fe24587c31eb7cad18ae9f9e044c7f6abfd261651_Device=CPU_Config=() +514:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=dynamic_IR=68dc9d01cbbb3546ce77dbc77d705f33a6a48cb6dca9a323f5bcf02b9d589993_Device=CPU_Config=() +514:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2bb16e2257294e3f7d905f66a483a8210f392ea822836e4edcf8910a7fbb4277_Device=CPU_Config=() +514:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=04db488d856ff6cf4f04ad155967df95830796ad733e589f42c3862224acd874_Device=CPU_Config=() +514:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i32_Shape=dynamic_IR=1af860b153ea667f413c7de4c98752d48ed8ac1fc7f90889009a2916e2ab1026_Device=CPU_Config=() +513:conformance_If/ReadIRTest.ImportExport/Op=If.8_Type=f32_Shape=static_IR=If-8_707_Device=CPU_Config=() +513:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=e15d2825807b2c7fda150b7b7b4e2c6914fab2d4af4313e959abaff56dffe6d2_Device=CPU_Config=() +513:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=f8662769a2f3a5fb20582ccbb1931b7e3fa73ec7713eca30362b0e7c0baf829a_Device=CPU_Config=() +513:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=2ce56cfc77884dfc61f7e9fab9a0ce04a4b016f9b3d13465cde1576b9374a2a6_Device=CPU_Config=() +512:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d99c03088bad009d9be7f29ec5bad7e3b6c7534fe2649f9670b6f713bf017e7e_Device=CPU_Config=() +512:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2d38082f8971dd7614234070dc9cb8c9b6b12fee7dc918503f0e256ab32d2fef_Device=CPU_Config=() +510:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=1f8a096633b64512b865ea5e4a57529cbf621afedcb873285bd5e24cdb199a46_Device=CPU_Config=() +510:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=dynamic_IR=346617ba1990b67ca1fec8ec219645b16aafa6c94a4a0f752c2f3633b85df679_Device=CPU_Config=() +509:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=32537f045cce3d13cb28dd292a0ebe06e13002877d9ed2e5b25d3ebdf5afcb58_Device=CPU_Config=() +508:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=af1f864a9f4bc94bdb713b0fed3f4c39dbd290cf7464f3cee8f1aded11981d4d_Device=CPU_Config=() +508:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=69c87ccfa0080f65ed28b9a088343db5ceef524ae917b8e259b1865a017df22f_Device=CPU_Config=() +507:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=9bae5a53011ecba6327961e6496f3312134c81e148523434968c3c56b5e0c491_Device=CPU_Config=() +507:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d8fc538fc2b9ca150eb22763e4c7416c002b5c7fa6481314201540178e940a78_Device=CPU_Config=() +507:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=b0e3e542180f521cfd4651ae18d3a58962751d3c6de9265240be6d4fe9745bf0_Device=CPU_Config=() +507:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=83cdc5670c74aa0db5a1c14e70c45552cdba1c9e1f4d55c83398ce51abf80393_Device=CPU_Config=() +507:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=718d6ff3b19f498cf4edeb9f7f4a7528fef578dd6fc7edb0796d476505472e46_Device=CPU_Config=() +506:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=bc1a7618e707ddd2c4773d1a2234e6dfb39954ad872abdf38a18d653ec35b26f_Device=CPU_Config=() +505:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=eb33c5485ec10ae4f1268ab19db6a4ef86812d4c92680b43791274bb055e2220_Device=CPU_Config=() +505:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=2606bb36cbc87d845c9f678ac84e47c0893f0b86a3b675e70018d1e535234875_Device=CPU_Config=() +505:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=5953b8e79f64e33e67dd330999ff8e3d8391c8f3fa7eae519b117b1273c8c19f_Device=CPU_Config=() +504:conformance_ReduceProd/ReadIRTest.Inference/Op=ReduceProd.1_Type=i64_Shape=static_IR=7dba7222be56b8fcef943fc63ab00cfb3c7e0fb4467aeac96fd43aa4421cba86_Device=CPU_Config=() +504:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=81c2956d325aab4a7bfd931d94151e1285083a15326e0890f861b97017a24bb9_Device=CPU_Config=() +504:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2bdfd42ec67d330dec8ea2817499b4c2d32a3d91deccede902acba057b050c49_Device=CPU_Config=() +504:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=4a64918e1c0c648268ad4a1c2147889b2578b4513693737ec2ea1c7ff81dbc52_Device=CPU_Config=() +503:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9ce6a2f4787ef120c486a68cc02bacb95d6cb1c4cdb5e2054275cde409a39803_Device=CPU_Config=() +503:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=9c66c6a6d93c10149920c3e034d9a0765afbef45dab66083fd5e3d796a57e406_Device=CPU_Config=() +503:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=8c3b386463bd59945d3c15512b26409dee7da9b1940f153e3ff62d47d6f79d2d_Device=CPU_Config=() +503:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=5176d95c14be776a4247f25a469708ba7976378b7aa8860a115a28a8bf2c2902_Device=CPU_Config=() +502:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e6aa73efa73e8b557d46457037aea3d6ba037b67ac1b52437354c2823abf2be8_Device=CPU_Config=() +502:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=01d609bdfca9f2a499a564f66ab9dd71b394310593d27b8739283b19980e2dc2_Device=CPU_Config=() +502:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=ad5c5df7cea37955709ef71d9967828ce3f0011e68aa1c6085984f1422944058_Device=CPU_Config=() +502:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=11477a6d571446d4e895d1cc6b0155c36606963d5c4a3a0a516802063a60906f_Device=CPU_Config=() +502:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=8f3e3716e8a1e8647454d124d7538ac1faacdc1b95873ccc1a760e09d48c30d3_Device=CPU_Config=() +502:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=68ae288409f3972b9f52f4ea76573a81d764758059915949e76dc5f20e6952bf_Device=CPU_Config=() +501:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=a1862e486a20c8de71dd94c12a157098ac5f222ba8ba3e1d3edaf9362331e185_Device=CPU_Config=() +501:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i32_Shape=static_IR=00d8728cd472d05b1eebf4b4d0ffa4a5d7c7dd34b3a99055b0f8ff5b0173af53_Device=CPU_Config=() +500:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e4523b73661dc593224b91713f8f20f1c87513a62e3b8ee8265e1136eb74f9ed_Device=CPU_Config=() +500:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e4be028a5a300682b79da2f015dd1c1b13381b38b19bb76951e1f26439173212_Device=CPU_Config=() +500:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=7c8594e723d769f8817c58fc16146033afb91d821bc941dff944223796029f8b_Device=CPU_Config=() +500:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a9fdcbd778622e442a42d8d2a1a12a1be0cf7e9d79c4d7ad56d5802c7a84d337_Device=CPU_Config=() +500:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=boolean_Shape=static_IR=0702c04c1d16f65b7d552044e66732886a0b389702aa43f4c845e2460ddff1c4_Device=CPU_Config=() +500:conformance_Clamp/ReadIRTest.Inference/Op=Clamp.1_Type=f32_Shape=static_IR=4d14510ef37733d7ca3d69697626c173feb05638f5036c49b060f6a80aea9ada_Device=CPU_Config=() +499:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=afc2cb913bcb4e4badd203c9cdf491ea1e6ed4f1cd835e7507889a9bba25b958_Device=CPU_Config=() +499:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=6dae5ccb2325826167ff4ec57e51280b4e125801e6405a33f4d95fd9ab9f3fc5_Device=CPU_Config=() +499:conformance_BatchNormInference/ReadIRTest.Inference/Op=BatchNormInference.5_Type=f32_Shape=dynamic_IR=694ab408745deafb90f8515e002a393e790a8b1f83e58519081b983067d76967_Device=CPU_Config=() +498:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=582f7347a93cb2c9e51ade6c405ff25b23d009bdcd3d7a3c49902e627a041252_Device=CPU_Config=() +498:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=aa2c77112641e46dd617356a9cae765813b93353cd8a0f0508b915e0b03eede4_Device=CPU_Config=() +498:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=fbd54c37e1db9cd3cd3fc7c571117f65c26d9f5ff0674711a326e02ebd3f9d57_Device=CPU_Config=() +498:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a848753a720bf9791ee4c239cf08712d714b877bfb6df23805590ad690ceaff7_Device=CPU_Config=() +498:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i32_Shape=dynamic_IR=60bd170e816e0c2345a1658fd88459775fe8b7cce5de31a16e4e6cdea199f264_Device=CPU_Config=() +498:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=bc52d884c8eb9ffc1a5c6af9467b8f285933b715def03c4e5cadf426ba186c3a_Device=CPU_Config=() +497:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=f9f031e1fb61fcf87468eb1f4b2005e7cecc5f073eca95c161fe62fbbfc983f4_Device=CPU_Config=() +497:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=5d522332a7166265867b633721d8bd8ff23a233e7c8bff59a245bbb24d7be234_Device=CPU_Config=() +496:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=df8ed5b481f6b03ca63572f2059d20911d3a7757f4c032455bef9933f2c1dc35_Device=CPU_Config=() +496:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=8b8121ebbd51ee995f98531f595145a01ba70ce026ad0bee588733c33e70272d_Device=CPU_Config=() +496:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=9ec0516350dc25e0dff22b12b65f761cd4e2744881c1f356f9ab50680eee1a69_Device=CPU_Config=() +496:conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_Shape=static_IR=8f1629e9b003409304f12c3e315e8ae8246b3bc80208c3f612d5c5c179082a7b_Device=CPU_Config=() +495:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=315fa20f952b6c7678cc93dbfd340097847826fea7928eabcec46d7ccacdb224_Device=CPU_Config=() +495:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=55d83e2240e88295a78084f92162888c9b0beef46ae468cd7ab93a1c0a432835_Device=CPU_Config=() +495:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=be4d557c62a3a43e7f309d2276cd7549bf1976ca8593bf2be752e60c42237a19_Device=CPU_Config=() +495:conformance/OpImplCheckTest.checkPluginImplementation/Function=Divide_opset1_Device=CPU_Config=() +494:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_Shape=static_IR=98932a2171e1c93b2bec3991892faaac027e1c319e91b9008ef0d0f469bcb0e7_Device=CPU_Config=() +494:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=e7e985d4d02762d236131e74fd867acff1828bcd4c4eb32e190de20eadb831fb_Device=CPU_Config=() +494:conformance_Pad/ReadIRTest.Inference/Op=Pad.1_Type=f32_Shape=static_IR=f735a44db0a337a22f5ebed052a5718168765287ff4e0eca961c3f9fd68586c0_Device=CPU_Config=() +494:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2e586703f4f8e9285249881d509a2a0b96d4758be5f97d75e7ee4f78951c58e9_Device=CPU_Config=() +494:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=073dca39b0ed99c8af202a5e272db241f95de1f64a7a1611e83853b92e7f7f09_Device=CPU_Config=() +492:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=525ed9b2af76610bf0ee3d11cb1dcfd46059335968359c143d0da7465736ac2e_Device=CPU_Config=() +492:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b53fa2c9b93d3750c17dfb8ef75e51c43881ee79fddc863d6c1c2adfeaeaba2e_Device=CPU_Config=() +492:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=622add2dcd72d2e1560e983ef4aad56fd35b48b71964ea8204137026f445d37d_Device=CPU_Config=() +492:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=1f29402ea664e850ea05d5f2e500f087a6165f1f4c9b3e5102b5509c020f0f6d_Device=CPU_Config=() +491:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=37a75b89894d8a024fe6d1808e0674b4fb59534cd319f4bcd07c6d9caaaf97a5_Device=CPU_Config=() +491:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2b59c9f67435c46699dc1c66ee7ddbdd333bfa544d0aef7bd1389db2635868c7_Device=CPU_Config=() +491:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=e4baf41ae9a77441993eb0f95c3d7335e9a719e5eac8b1ffaf60d8f515f769a1_Device=CPU_Config=() +491:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=df085870336c57084e22afa8b52ece7149abc21b5d1784965a7d36d5ada91e8b_Device=CPU_Config=() +491:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=de3245d77d2e004bea85af29c91e1668ae1b6905fe2cdabb92711adbde6406a9_Device=CPU_Config=() +491:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=be59de0f93d8a22736d98d0aab618839905eb9a04f79c8d88d7ef08c7267f4ec_Device=CPU_Config=() +490:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a2006e1eaa808a3e78550535058de54c5cd83e9a32a52e488fef1f7883c321a3_Device=CPU_Config=() +490:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=78db1c0e2c0fd4f0d351e66ce9cd31f7a6ee804cd23bc686b8c9081125b7142e_Device=CPU_Config=() +490:conformance_LRN/ReadIRTest.Inference/Op=LRN.1_Type=f32_Shape=static_IR=c1a0f6661ad306b82e66063988835c1a17072608792f3423bb058fe38c4b14d1_Device=CPU_Config=() +490:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=6faa91bd8e7037c9233825cde9313cfd2afafa21ff423a00544eaa36d734332e_Device=CPU_Config=() +489:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6453d2955ad3344d5e021f97d71691ddd7c27ffc0d9044b724c9a6b5c20cb427_Device=CPU_Config=() +489:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=8de274a65748ff76511a53b614cfb33651d2b51720714851a16976fc1ee2b6ea_Device=CPU_Config=() +489:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=017d4b1dac18731e05634414942698ecbc750e306eb86e773ffe5007bfa9feee_Device=CPU_Config=() +489:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0182ad6b02d77803accd2ebe55d87b679950570d1dcfef2940adcbb5fb9f1a24_Device=CPU_Config=() +489:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=14598e053d7dee616de43f2b160e780b4bb53decaea53b31db58341464b82e48_Device=CPU_Config=() +488:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=f5a74749f6c90dccecbb5e4a7d0fee72cca6247f0084487b5ca7d94d098c9b9b_Device=CPU_Config=() +488:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=445a2c47e85b116d03e5f6fe43863a39778b78ca5175fba1bb0eec669f7610cf_Device=CPU_Config=() +488:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i32_Shape=static_IR=00d8728cd472d05b1eebf4b4d0ffa4a5d7c7dd34b3a99055b0f8ff5b0173af53_Device=CPU_Config=() +488:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=1d8577d7a316c5a2726f3be79b4f8b22d6dccdd5491a4c7896a7c9de37330e77_Device=CPU_Config=() +487:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=95bbf8a23b19badbde31e9ae7f016aa436d50d797f59bd736e220030f645bd9b_Device=CPU_Config=() +487:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=3374f930d0ffd26ccd7cb542638f2386ae5f803b5bdce4d848ba1e93b4a173a8_Device=CPU_Config=() +487:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=static_IR=d661093ec9006177e5d47e7f666d7c98353f9c3d5290ba6284145f60822f2573_Device=CPU_Config=() +486:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ef702f626a20bec33a58f2596e4e6e15f105860ebfff1d6f42116a514d853c4a_Device=CPU_Config=() +486:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8198512c1849e0efe931509147ac4dfed4ddc7ea8d0736a7defb4fce81e2ea28_Device=CPU_Config=() +486:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=089d73af2221696ce3755a9f33124c9af87fd3e860a1d4f229995eb01ff46703_Device=CPU_Config=() +486:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=bb610d59221e7c5d8e96f971519b7ef27bda7bbb9be329b873a901a1e749b9cc_Device=CPU_Config=() +486:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b3cb0ba09807204990d7e1635ef35fc96aa10330de2ffefd95f6483e68dca532_Device=CPU_Config=() +485:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e27641fd09143d736ece2166cc3156e80c190d5def706b86358f49fe980cf9b7_Device=CPU_Config=() +485:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=45b3506bf3dbe053fcb290dd1040a9d125c56086b37223e8480647bdd9b9372d_Device=CPU_Config=() +485:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=4e6db028c1ff414e411bc09accf3b7c20cf81e530c903e14586eaad4c21fa111_Device=CPU_Config=() +485:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=b12c40f6d576482396a94e28e0814488b87eb6844583bc87384ed385d45bd6e0_Device=CPU_Config=() +484:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b42c98b6313e56a7a012553eeabae92f0672c0bde6f9895d10fb459796448b75_Device=CPU_Config=() +484:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4722375e4770c972f87bc89a8ca16871aa57251a9c01ab2a14adc11f885cac91_Device=CPU_Config=() +484:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=1ab723c2a389a999b3b01158b82719358d802c6d62767d6dcd91b5d7fe5531fe_Device=CPU_Config=() +484:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=static_IR=f03721e9c346ede7ba78d0a2466e38cec6d1e08b3395b38c8f47ebcbfba35d3e_Device=CPU_Config=() +484:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i32_Shape=static_IR=7aacf3576c3d114915bc3aa48c8ee4ac9e94bc00928709d86461877a8d2d84fa_Device=CPU_Config=() +484:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=30680a7972de02e47d59c768730b8a64a06b011dc8b5be4fd25f190662cf1c1d_Device=CPU_Config=() +483:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=df8ed5b481f6b03ca63572f2059d20911d3a7757f4c032455bef9933f2c1dc35_Device=CPU_Config=() +483:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=37337436d0d481c689caabec3bbc8f21ecec65560c70de4dd1f5b0ed9e444bf9_Device=CPU_Config=() +483:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c2539b8a06e5dd0e01933c6861e366f8ed565e5956b8b2546647b55e966e7755_Device=CPU_Config=() +483:conformance_ROIAlign/ReadIRTest.Inference/Op=ROIAlign.9_Type=f32_Shape=dynamic_IR=7260d5fcecb95f9632da5784702239161bdcab6bee60e0c1296a46e5120d5ca0_Device=CPU_Config=() +481:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=349d64660bcbb9269f88067431a4b8fc31fcfd09ffb1afa9f3ecf4bc37e8c4ca_Device=CPU_Config=() +481:conformance_Clamp/ReadIRTest.Inference/Op=Clamp.1_Type=f32_Shape=static_IR=785551399ba4bb8eb76271bf698b3ca795b8388338f110843d5c78c03009625d_Device=CPU_Config=() +481:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=a3032224f3e9c096102921fd8571966d23c21cba931b9d5e31ba41e9698d07b6_Device=CPU_Config=() +480:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=81954ff76e3fd04ec3b3e3c26e28a79ac259c9b255f90ebe3cc0772fb673874e_Device=CPU_Config=() +480:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=3f830d5ee243ca3f56d027f95929bbadd427e4954e286e6c890ddd60f9c5c2d0_Device=CPU_Config=() +480:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=152333527a542f3e2228bac5d0fd4ed288dde9205632a318b9b22b64e43be329_Device=CPU_Config=() +480:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=fb5525d36d14f54eebc5670c06232ca4e32cf920d309b5777e37d3377d386433_Device=CPU_Config=() +480:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=23ad83652d315aa08ee781b0fc81c0eb737265280c85a86a4f08cad71b33e74a_Device=CPU_Config=() +480:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=static_IR=488c8e933df63c1368e021869a92fd48929ac252863ed4c2acfab7174b449581_Device=CPU_Config=() +479:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=033c6bc337d14053ae097dcbee99ef5de7cb7728b589cc8d64783467505a8ba7_Device=CPU_Config=() +479:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=f110ef35c9642ecd941cd85a67a12b616353d4a8cd33f9770d532759e2846255_Device=CPU_Config=() +479:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a093f44f22a851366eec46b6ed80fcecd2a4a96ca797c2caf288922a2fae1fd1_Device=CPU_Config=() +479:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=2aa586a55098e1960c204572ca9704bb3b8b9a3baab5fcf08200594261f7bef7_Device=CPU_Config=() +479:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=0da39d97a2f46fcbdf524727d0283243d3bf0c3fab75f76f529b6480c84f67c1_Device=CPU_Config=() +478:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a956d2fb1fd17e2d864b3eaa8915cc0c4f9a768e35fdf5bf20cf6bc7f41aa130_Device=CPU_Config=() +478:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8198512c1849e0efe931509147ac4dfed4ddc7ea8d0736a7defb4fce81e2ea28_Device=CPU_Config=() +478:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f01152d615a3092ffd4ad1059779ea183d7a62c1ab5b970d940f3f537e6f12db_Device=CPU_Config=() +478:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=49ed5fbacb5510d9cb3970dee136271e98ad5322b95217c6dc41026e583f3bcc_Device=CPU_Config=() +478:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=53108cff3836c47360380f3898c5de245a566a5d98040820d78befd46e56955b_Device=CPU_Config=() +478:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=cd5756749d3d73dc7b666f7f41dc292c73230e5d31ddbbd43aae77210b86220a_Device=CPU_Config=() +477:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=62409191ca760efe019eed9d1923c8df9ab545d39f90b1230a58d1747d3143b1_Device=CPU_Config=() +477:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=90f882a97d637e527900edfb1b7c277b65544832793d08efdf8454be21a2f496_Device=CPU_Config=() +477:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=355bfa53a1f9e712db4df6642a51970e96e3612583b2ec90e7a8170e45b1625c_Device=CPU_Config=() +477:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=dynamic_IR=8b9cabc6a44ece744453092791ef63b8d6ca4d83af7e8635f2f4ad78186e5184_Device=CPU_Config=() +477:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=df62dbfb022ab001a9df6b16311f57e744e8674fa8751b4e3a7ffee872fecc20_Device=CPU_Config=() +476:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f0145ffb8d2846d866b1a89c8217d54209830e6d3d0d10913e75af42f2510c74_Device=CPU_Config=() +476:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=8a5bf21112b4a458a3323e615dfce41a8627c89ac692e1d568786634667849ab_Device=CPU_Config=() +476:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=d9231cf5e3e491e318f16514e771cfdee4b781b42fc9d45088da850ab48079cc_Device=CPU_Config=() +476:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=c5c5d09465cec7f1477d5e02f3f1c4cf593c71aa090532c4e43451fedde7c2c5_Device=CPU_Config=() +476:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=074ab76268ab5d03283f03f4e228a7cf73ab5a18fc0e7366778cf8c45286f18a_Device=CPU_Config=() +476:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=dynamic_IR=e46ec3487f18188d1da4c029a2981033018c1f8f273f60d3f7d1bcbdae18c2c5_Device=CPU_Config=() +476:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=c7998d9fa7e16dedd52f8cbe3d0814f2f3b30ee6d728881d64c4743e0ff6fae0_Device=CPU_Config=() +476:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=dynamic_IR=ca0d551f3da549b28475d996906bfa5202402be286f59f9bf53ac809c9fceb49_Device=CPU_Config=() +476:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=68ae288409f3972b9f52f4ea76573a81d764758059915949e76dc5f20e6952bf_Device=CPU_Config=() +476:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i64_Shape=dynamic_IR=edf223c654667e60869d97d2fb6a2bdf356db8d7e997b4b9a66e56445bc24f30_Device=CPU_Config=() +475:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=53828d433bfa231cac709949db0e4ff72010e5cf9df167ecda7ac72bd5a69e10_Device=CPU_Config=() +475:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=a110c620d27325938e9febcd9d757a5525c421bc29450fea960403fbca3507f4_Device=CPU_Config=() +475:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=dynamic_IR=9337e101d74f6d35bf81e9be895ffba9e972cdab9d79b2802f1c1ec0f4d34a83_Device=CPU_Config=() +475:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b7983ae70a4e7868ccbf4b25a5d8e795620182c29817ad1151d89f2e932d770b_Device=CPU_Config=() +475:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9cd66958dfe8db471d48d6ea35f1b4547a413fcdc6c61c804a456befcbb09d15_Device=CPU_Config=() +475:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=225aaa01462e6e43c0c12cff65f96e7d9c07d368a820ff3c1b2939fefe86d492_Device=CPU_Config=() +474:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5ba879b46e93286e4c880a726e28d6956a1c8415508733b5349079f899462679_Device=CPU_Config=() +474:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=abefab3b34ee5f7da347f3c86a1a0b7b17617de416051dc18c3aee80862c3000_Device=CPU_Config=() +474:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=u8_Shape=dynamic_IR=4d2c49ebbc46b60233510b63e280442319496782da33185f7c2d6003611f937e_Device=CPU_Config=() +474:conformance_ShuffleChannels/ReadIRTest.Inference/Op=ShuffleChannels.1_Type=f32_Shape=static_IR=46e851dee1f7bead1a6e2459157df33266c45559375a1caff90a2732cacaf881_Device=CPU_Config=() +474:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_Shape=static_IR=26d97c755f660ed8ee08a0de8d6ab88598391cc79b239bfaf0a102722ffc4bf7_Device=CPU_Config=() +474:conformance_MVN/ReadIRTest.Inference/Op=MVN.6_Type=f32_Shape=static_IR=98526403db7eb1f67a41aed2c34fea684d99d8cb8225313136e55be7d326aaaa_Device=CPU_Config=() +474:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=23654f4a28ae697d81f49d72568e7f0657d5c15b82e173fd7381760ebcb61cda_Device=CPU_Config=() +474:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=adabeb9321a0770cb065345aca787cbf7d1adef68132cc5c7d8df885ea64ab2c_Device=CPU_Config=() +473:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=f89eecd15ff45d6929f82696f96a68adfd694043ec3f859952d80080bd140627_Device=CPU_Config=() +473:conformance_ScatterNDUpdate/ReadIRTest.ImportExport/Op=ScatterNDUpdate.4_Type=i32_Shape=dynamic_IR=91f59d10b16e7305a651b8ee9480a0068225d6cd56026139e35ba69b9f84b00f_Device=CPU_Config=() +473:conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_Shape=dynamic_IR=694ab408745deafb90f8515e002a393e790a8b1f83e58519081b983067d76967_Device=CPU_Config=() +472:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=eb33c5485ec10ae4f1268ab19db6a4ef86812d4c92680b43791274bb055e2220_Device=CPU_Config=() +472:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6eeea9355df867c7fc97af81dae6d02799239ec1e480dc2c975a60761fc5f7be_Device=CPU_Config=() +472:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=59d132b45e2ac60a670eb43efafa43f065bb43d492208ac670fc8234b4f618c9_Device=CPU_Config=() +472:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=0d62db1843ef7e470a613f9f4d4999ce0e6c94365bd667b78c283cb9406e915d_Device=CPU_Config=() +472:conformance_Erf/ReadIRTest.Inference/Op=Erf.1_Type=f32_Shape=static_IR=906676850a62f56935dbd13792be1013db602488f29eb757a546b411699ccdd5_Device=CPU_Config=() +472:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=7b8d235013affb9589d57a8f99b36858d739258b787cffc7cec85d1dca567261_Device=CPU_Config=() +472:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=b5a1e5c47a0597ee9c9d0c0aca9909c596cbe71ebb069254460c2e97acfc1c0c_Device=CPU_Config=() +472:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=29624e785b9377dbf03b9aae46e7d0049e93a94655059ec37a0fe308ff7cb9a3_Device=CPU_Config=() +471:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=05690f7225eecae70805d45641cd02c02c46bc61f9fa4cf91d3ec7ce94f6fd3f_Device=CPU_Config=() +471:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2cc5f8b67a407507c1d59a08981887766d377c7368b53cb0a18ec71df291b1f2_Device=CPU_Config=() +471:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=dynamic_IR=7562536120d473cca837bb2ad1e3969484868111954ac0b168a5c2805264a689_Device=CPU_Config=() +471:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=9b915f1788d3d4768839d2cefe4fbba2f8b2d8aa4c22f9ad574335c22d0db1a2_Device=CPU_Config=() +471:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=f64585bfa3951a93f76c18fbc795f3ef82176e270c9f37161bdfe48e094c1d39_Device=CPU_Config=() +470:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=51d309244d7c90039cf86929d62320f5e5c5df8b1390c6b1241d8389eb6914e2_Device=CPU_Config=() +469:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=37b1b14a23dbc309d75fbd98158648e1a7fd246684b96e1ebb10a75c3f5b03b6_Device=CPU_Config=() +469:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d69304b651805edf18138147ec5a4c16e883ad5e5d9828db849a35249c28b263_Device=CPU_Config=() +469:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b66a71c512cd06f5dc1d1a254ba0128b606c1c41b860f272dc1d2514502c2350_Device=CPU_Config=() +469:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=5a82d5761e70d13623af2cc6a6eab20a7a0657ac28f38223e34b63d6cbc1224b_Device=CPU_Config=() +469:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=94de295ab12bd6b03bc5de22f9e9c46d5875d111eb942d3ba35f8e2456ece1cd_Device=CPU_Config=() +469:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=9b915f1788d3d4768839d2cefe4fbba2f8b2d8aa4c22f9ad574335c22d0db1a2_Device=CPU_Config=() +467:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e27f0bcb3118a7cdb488f4685707bec982ae54ff8bf7e97aff9ea6ecedd66714_Device=CPU_Config=() +467:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=ca0d551f3da549b28475d996906bfa5202402be286f59f9bf53ac809c9fceb49_Device=CPU_Config=() +466:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=060423427a9100b6a38aad12a83043441f8af436c1d2502350ae867f45bd721f_Device=CPU_Config=() +466:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0fb6a7848271e000d49d4966647edf55e65f181523883089f43147c14cfb9871_Device=CPU_Config=() +466:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0edbc14a5d5ac1265a4b880131348aa16e284012547556ddedb36b185d833284_Device=CPU_Config=() +465:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=28bb0064e4cb56c497227ec69899b08dc09cccbf7d390555416aff617a393f81_Device=CPU_Config=() +465:conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i64_Shape=static_IR=44e0e688ecb44d7a9e83f7c9e1639fae49b2883dfc1b1ed588c98c5bd1f614fe_Device=CPU_Config=() +465:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=075342290aa43542c81f7ed4e804c905f110edc23440452c6d0c0f0c312b65c1_Device=CPU_Config=() +465:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b4c737aec2f47947d1afbe26d9d8cd124c6fdd24e30cab1f563d91310d1b62c7_Device=CPU_Config=() +465:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f0853773e26eae3d051504ed8db7f182c0e90ef7b45625a1a72ac51a73e2208a_Device=CPU_Config=() +465:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=02f589480d24784ece323ba30be856c7cc718151d3588f683ef4825a407749ac_Device=CPU_Config=() +464:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=87baad85c649084e386ca502375581e9dc47c68c076bacae5e5ac1ddbaaa7830_Device=CPU_Config=() +464:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=2ad5b63ed56c3966570062970125d1cac16629595e9ac34c6613cf00d6dec0aa_Device=CPU_Config=() +464:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=4946bdb7dec06c2bc8eae33d5903d6fa41bbf3654b13a0cb5cfa4af5a4720426_Device=CPU_Config=() +464:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0ac57f7cc81a683585f810885288fdaa174de2497d00156b85e067653aad3a56_Device=CPU_Config=() +464:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=bd99ad9e1d756435cca9c6309caf45043f34c6c3c844f60e17deb8dfef4234f4_Device=CPU_Config=() +464:conformance/OpImplCheckTest.checkPluginImplementation/Function=GreaterEqual_opset1_Device=CPU_Config=() +463:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0a2b1efb810d1dcf7897c3671f1eef0c36bcdca679e24b8e86f078128b381833_Device=CPU_Config=() +463:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=427900d25144ee6b8cd4b35cd53c6e9335375018f6328dd01ae4db304846d991_Device=CPU_Config=() +463:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=dynamic_IR=0f5965e2daa2a1f6b050813850956d9a4bbd771cb234ec814617099e1541ea0c_Device=CPU_Config=() +463:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b9bab6ef11eb6ae637924a902a40dff310a45916d50c8f0a4ec667c8d6bde6a6_Device=CPU_Config=() +463:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=8411c0432159fb60adefa760384515552240bc6220800a736556d7461765eb60_Device=CPU_Config=() +463:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=boolean_Shape=static_IR=0702c04c1d16f65b7d552044e66732886a0b389702aa43f4c845e2460ddff1c4_Device=CPU_Config=() +463:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=ef6e4b3910cac801199b1f6be74902b42105d23de549d426b1c4bcdd7361f79a_Device=CPU_Config=() +462:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=dynamic_IR=516ad610612780fdaf83c5dc151316e83772eda4700882f934c97b2a2bd86dac_Device=CPU_Config=() +462:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=d683b96d525bc074d4f8c15934a5082a3fba1068b591f67e4b05d605fe5e6aa7_Device=CPU_Config=() +462:conformance_ReduceProd/ReadIRTest.Inference/Op=ReduceProd.1_Type=i64_Shape=static_IR=44e0e688ecb44d7a9e83f7c9e1639fae49b2883dfc1b1ed588c98c5bd1f614fe_Device=CPU_Config=() +462:conformance_HardSigmoid/ReadIRTest.Inference/Op=HardSigmoid.1_Type=f32_Shape=static_IR=f6f85e9282e58756d40411508d6edaacc75c0f4e64d4e25021ade07ba17bd8ce_Device=CPU_Config=() +462:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=29a544bbefe85bdabe1d5d36d83d8ee1d80c71f8b98ff6e898e1062671daa8ad_Device=CPU_Config=() +462:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=dynamic_IR=70c260fea7c5ff6d2d1e9580ecf6c6a8a26c0e688b4f8dc4540888526bc13e76_Device=CPU_Config=() +461:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=fffd24bb56af50d2e56fb2abdc6c0c96fceb21f00a9a1556b3890bdc50840352_Device=CPU_Config=() +461:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=6289232b1cbbafc963ac3cd787330c81a9cd02def9fefb83d6f6cced249de92f_Device=CPU_Config=() +461:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=99866ef63c9a2e7e2d9b7f00d11a4c177775bef9cfdf074e83f56318c143e6a3_Device=CPU_Config=() +461:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=static_IR=57d49137431cc7fe4364cc2fef13111fb9f7a5a908b2d7b6f5663100ba5d636c_Device=CPU_Config=() +461:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i32_Shape=static_IR=7aacf3576c3d114915bc3aa48c8ee4ac9e94bc00928709d86461877a8d2d84fa_Device=CPU_Config=() +461:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=d38ac6654882078aafe169f6d1280279fa81e646529f6f2bd621338a756046a0_Device=CPU_Config=() +460:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=3fb25dbf33700d0b8ebc3c53fe328f2ee9f45c5a090240eec120b954998d17ce_Device=CPU_Config=() +460:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=952ad9af4561d61157cc5e73bbc5608bf8cbea1473c52a566ad1ae7252bcb35f_Device=CPU_Config=() +460:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=a72b942dc1915ccee8af871c00b16647db7c8935100b012f91ebd799bbe8d416_Device=CPU_Config=() +460:conformance_GroupNormalization/ReadIRTest.QueryModel/Op=GroupNormalization.12_Type=f32_Shape=static_IR=3e0fb4df6ea780921a8ef21a06bd602e97f91baa201096d438de60e9114acfb1_Device=CPU_Config=() +460:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b9bab6ef11eb6ae637924a902a40dff310a45916d50c8f0a4ec667c8d6bde6a6_Device=CPU_Config=() +460:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=14598e053d7dee616de43f2b160e780b4bb53decaea53b31db58341464b82e48_Device=CPU_Config=() +460:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=d46d4fc3e7b3b2cea07f7ba710f77f7d99b4799e7fb0d3127ea6862f3f731ae9_Device=CPU_Config=() +460:conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_Shape=static_IR=c602b01c85ee95a1d7deb1498c5f0494a5ee727ce8874d5beded8bf33631d0b4_Device=CPU_Config=() +459:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=23f7f775455e615175f3122ce422ee96de019ca40fe603b5a4605d51f28210b1_Device=CPU_Config=() +459:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=7147d6ead151efc24a53c196b63fc441e240c34b41ad2226a535580eb2a3f3d2_Device=CPU_Config=() +458:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a2ca34430931dd41f08f2b3cb8163ea5c1889a23b53d0f3b7d26b7a8af1acef3_Device=CPU_Config=() +458:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=5df86bdcc255998a0b5b18e64e3059afb2c80e37b5695208d04a6fc0f1410b50_Device=CPU_Config=() +458:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=30680a7972de02e47d59c768730b8a64a06b011dc8b5be4fd25f190662cf1c1d_Device=CPU_Config=() +457:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=7cfdc0a97fd79a5d272b29850c24dad4a0a8f147ea89b7683c98fa203a448c52_Device=CPU_Config=() +457:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=3888863c4f725445490846561b2aef4a5498ef1583903b365fb864e387eb9641_Device=CPU_Config=() +457:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i32_Shape=static_IR=592176a8c97f4d759a0c6b3ef56c3610df4a0df4743f3be7ba3ed2ffb5dcfaed_Device=CPU_Config=() +457:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=ac87d885a27bfd348d3f9fad5a03680b73b7198fad17dfdf08675e6e3d239ca3_Device=CPU_Config=() +457:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=81973bc511c12f7470f620b3484f6f7c82077975f916e080091dcd4757268b17_Device=CPU_Config=() +457:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=c202ffc0e1805a36e48ee4b06d06b68a9f179eef00dc353a092a13818e8ebbe9_Device=CPU_Config=() +456:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f0af28fe49c157f5f62f72f0ab209c50aa07d97c65477217fde6e3a3d0dc98ef_Device=CPU_Config=() +456:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=0c491c1a451ad85717879e05678f7d1c85cc35d95b108087dd241b9b24b39ddc_Device=CPU_Config=() +456:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=8411c0432159fb60adefa760384515552240bc6220800a736556d7461765eb60_Device=CPU_Config=() +456:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=a0336bba08291ea34d6271c83816fb349d163fc5989171b07fe1bce50a2f3ea9_Device=CPU_Config=() +456:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_Shape=static_IR=1c63f30ce7cb977ac945ee25eb97f3c472a81b999eacbcdd4b3bfd253f25cb51_Device=CPU_Config=() +455:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=8042d30c9796e8eca03cb2e3651f84b5167204aaf186ad08ad5f74a9b0a26b9d_Device=CPU_Config=() +455:conformance_ScatterUpdate/ReadIRTest.ImportExport/Op=ScatterUpdate.3_Type=i64_Shape=static_IR=f5ad439e65ed1e090d3d5744e9e5bcd9b8fed6ac6a191735cbb1cdd9af8bccf4_Device=CPU_Config=() +455:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=81eb5381e1d4d3dc7cf0d83a9cd787813d3267c99b31cc9a3cb0cf9b01727c0e_Device=CPU_Config=() +455:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=5bfbbb826bcb2c9e7b5364fcc5da23e737953150029c2ea7455ad4b09caaf01d_Device=CPU_Config=() +455:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=12b6ad1cd462f676c9add533f2fb2a5d98698e72fc5d0e6dc984abb27f54475d_Device=CPU_Config=() +455:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=bfd899e1dd2a03f99d8b55d9fa5ab04c6e4576358c910e9bda97cf497f0418a4_Device=CPU_Config=() +455:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=64efb6dd46c36bec02b92148d178bc032417c8c2d999ff7b0a24ba08af365f91_Device=CPU_Config=() +454:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=16b3235d5271e534a1bc725f80e2bfcb837a1c6f144bcfe8211a3e5359644441_Device=CPU_Config=() +454:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=bca72a16df5bcf81d10dfbbb0e53aceb2a8a70ec94d4247d47333679de7214c5_Device=CPU_Config=() +454:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=static_IR=6d5907929d59d1f99e85183238e29d6602c84721d099284dcb8900ae5fc3c45f_Device=CPU_Config=() +454:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=8ef34b5ce0dd0100a8efad53b3b71e87f76ed69496cb6f030e76478d7daddf69_Device=CPU_Config=() +453:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=87f3815fd73265960ef5910a3b03580b13e96d02784e159a0bf0ebc30bc911d5_Device=CPU_Config=() +453:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=73623637f6155bde0a4735dcd904e5b491d7d459bef5f8d3f66f02f9558937a1_Device=CPU_Config=() +453:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=4a64918e1c0c648268ad4a1c2147889b2578b4513693737ec2ea1c7ff81dbc52_Device=CPU_Config=() +453:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=dynamic_IR=a937747c04b70351d3632aab91189200e2c0a69b6467ed856b7075885c54d83a_Device=CPU_Config=() +452:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f89a1dfd0ef8b50a998962d5a4f4b54451ea4c533476a2e3d42c04e9e645afaa_Device=CPU_Config=() +452:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a0f8789f0f95beb6f28efc829bdf2f99d34a3e9397ad1a80d7831aaaf125b5eb_Device=CPU_Config=() +452:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0aa7024ee856fc832b1e639fbed60e1382c8e1b84f7cf2d33447f4bbd9ce75ec_Device=CPU_Config=() +452:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=644274eaea5cff1fa9976380a2c024a8510f88826d0c1a6036aea3b18e3ecd8e_Device=CPU_Config=() +452:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=9c651eeba5c3e7b07a8cd0d4ba479fe8c5aaa2c4df9b18ab022e775ea01dd867_Device=CPU_Config=() +452:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i32_Shape=dynamic_IR=6a0218ea2e7eb0329e4915f2f6a7c215742d2469e868a4a8e43c683c2dddc01d_Device=CPU_Config=() +452:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_Shape=static_IR=6ddb35aeda2a6cb63282d2fcf6503aa02135ad60e23c752280ef82aaf6a31191_Device=CPU_Config=() +451:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=7efae3e9c1a0419670b3967f8b2dda53fb0200f946a3d529b8da235ee14690ff_Device=CPU_Config=() +451:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=1c9d69e1a85d03b8599961a8a1b90af7b3b2d43bc5c4f4a6b8d5da3c22166abd_Device=CPU_Config=() +450:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9aac77567d944de6632688fd3de80c0b3da1ee741da639897c2104d3121d690b_Device=CPU_Config=() +450:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=23f7f775455e615175f3122ce422ee96de019ca40fe603b5a4605d51f28210b1_Device=CPU_Config=() +450:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=28675c37d06426cf6895e7ffc15d6c212ef8be1b278fd199d1bfbd0678f825fa_Device=CPU_Config=() +450:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=120cc30794fe9c9d59dc9df6fadbb9791f3a6b99e4b9fdc06c5e01f494b05780_Device=CPU_Config=() +450:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=704412b992d55bf9ff00d823458e5d3b3a369e47b3eca3429fed94b87c8da554_Device=CPU_Config=() +450:conformance_Log/ReadIRTest.Inference/Op=Log.1_Type=f32_Shape=static_IR=038bd1e152575a3b8ca28bfe18fdcc9cbf19c9489e7bb831b9d5f56f7499cb7c_Device=CPU_Config=() +450:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=753b524e2aad8fde7e7206fa8c3e7ca15c52c49f22f41d48cfb6b4d814cb40af_Device=CPU_Config=() +449:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=67ed6a8048424f4e44f40c542faf7a2a2d2419e81aa982fe32a054af05caf309_Device=CPU_Config=() +449:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=6e53e1fedd57631f3ec70d6825d8d1029ac95905b82b6bef7fd44ba87373e9c6_Device=CPU_Config=() +449:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=d93633fac99f9472435ede6fcdb9c72475b68bf1352d58b33e8cbdf9ca74ac50_Device=CPU_Config=() +449:conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_Shape=static_IR=556c6863ca3b12d255c4c81d92b4573203f02c5588e064fb22dd4aa23c8283c6_Device=CPU_Config=() +449:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=e1a894c49294c6930cb8f8c857ec745fa2c6d18cc3607389c89af4d13df4e411_Device=CPU_Config=() +449:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0a2311ddc09b949cceb73fd0e09bbdcc2932c2635fee3a2c411bec27a30e9439_Device=CPU_Config=() +448:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=4df4ab698c70278594efe8b4349a4c99c8b2ab7c4ee0182c5a4b7673da922ad6_Device=CPU_Config=() +448:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=97f8a2367c5590d5fe7e405d32ec48e5318a6cb3c0862f2b0e8705a7842e8105_Device=CPU_Config=() +448:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=0e58762b5cd9926391cba6f63db3c7db49285b900ad0abc93b4d05d4baec800c_Device=CPU_Config=() +448:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a7ad8306fe632a2d0c45a492ad2d21dbe40f2f9ea55074d602beb6f8dde17982_Device=CPU_Config=() +448:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=7b2199d0ea56102a7c6737be2334b9717ee292c13cdb692d07fddfd173ea5b82_Device=CPU_Config=() +447:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=b08690e29e0249d5a6a30f2ad886ec714067df994bc4d8cbd82d0d02af6335bf_Device=CPU_Config=() +447:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f99caac2fbfafe61a686cc29c0df0779eae1a0a1826f5bcb820048ec3c148207_Device=CPU_Config=() +447:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=7ad5da9c461223f21afd023e08220eaed788598f50e144e45fcdf3466c0810a3_Device=CPU_Config=() +447:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f3b3afbedffce0d70b40d78f882a0061ba05e26e385c37cf902aec88ea43a649_Device=CPU_Config=() +447:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=79a6d2a402cdd74cf1277a57ff95b71d61384da394ad2a4d9ebcf422eb5c3258_Device=CPU_Config=() +447:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=593116ea16692c8f5a8994c0562c47e1c627f9088c519b752a635a7d91973085_Device=CPU_Config=() +447:conformance/OpImplCheckTest.checkPluginImplementation/Function=BitwiseXor_opset13_Device=CPU_Config=() +446:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=f2685b40efb789012e69252fa0fe30803c68be724a52dbcda9b2cb796138ea57_Device=CPU_Config=() +446:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=b61800abac107b248c29df7ba04a73c91d490782b1da46164c1b7d2f8cec3cdf_Device=CPU_Config=() +446:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f43df065734a36674b3fdc7a47fddd1cfa5c1b36bf73e7de86a100c645fbc7d3_Device=CPU_Config=() +446:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=dynamic_IR=6b70264ed3eb3831e0e034230813ce1a1e71c157a302822b56335e587bd200b3_Device=CPU_Config=() +445:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=c4d1a1fdd0a336620be37a8ce7578ca0dd0c74f89fdb32ee86e7004792aa8445_Device=CPU_Config=() +445:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=d294c71f3796d2e2b88f819f6512ed03942eab440681a5bc5b092e5a34192107_Device=CPU_Config=() +444:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e86061c75b7e9a65644e82de6b8fb2a532ebdfb302f46f378b6ff20af8d1d14b_Device=CPU_Config=() +444:conformance_Tile/ReadIRTest.Inference/Op=Tile.1_Type=f32_Shape=static_IR=6ab37e1d52328b5ce1204cfe13977b06dcfabeb4acff9821d65ffc91bd3cf09d_Device=CPU_Config=() +444:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=5aa10dbbcee8d7434796180d5fbe8f0a954b772c441c8d6046439c615d3b9011_Device=CPU_Config=() +444:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=99183013393075553f5cd30818ccd603ff5d3e9e71dd8f42ced0df2377280729_Device=CPU_Config=() +444:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=21f786ad25c17eff66f16501d160439b96636a7d5d8512c1bd3db5fb5d5e6987_Device=CPU_Config=() +444:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=fb83c1c4a2ce0a8860479916f23f3961a5c20481e62de79390573dd7859c09f0_Device=CPU_Config=() +444:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=7ab51e173b82572bfb29cac5dfdc326e3689e466c68cf91590dcbdddf1f530de_Device=CPU_Config=() +444:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=8337ad383956ad96ca95f4aeb967e05c694fe586b4ed6e46547e3ffa0217c59b_Device=CPU_Config=() +443:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=abefab3b34ee5f7da347f3c86a1a0b7b17617de416051dc18c3aee80862c3000_Device=CPU_Config=() +443:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=3d37166487c5c52af657343f8fa10903efc7d580d5b370a519a0ccfbf6fc56bf_Device=CPU_Config=() +443:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=4e6db028c1ff414e411bc09accf3b7c20cf81e530c903e14586eaad4c21fa111_Device=CPU_Config=() +443:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6f2159bf315f062962fe87512c15ed5cacf09f898397a92b690c32caf147e50e_Device=CPU_Config=() +442:conformance_Round/ReadIRTest.Inference/Op=Round.5_Type=f32_Shape=static_IR=f4cc9554ddbd189f18575e3a80afe6e8f8bce613dc8852a48d4171ab6916e087_Device=CPU_Config=() +442:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=8c5831a53b504e86ce404e5a521921ef86bf4e130e79819c1abdb0e88a6543c5_Device=CPU_Config=() +442:conformance_NonMaxSuppression/ReadIRTest.Inference/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=802164adc9e651b0a3ec0b5f96341fc3cbd098042412236b65e0c8f77b5153f2_Device=CPU_Config=() +442:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6964f870fd6bf44d1d5ee5925eee8892230b8928aeee1966db73b6c4fcd5acf8_Device=CPU_Config=() +442:conformance_Clamp/ReadIRTest.Inference/Op=Clamp.1_Type=f32_Shape=static_IR=0662f4c4f222a79755532ac9eed43118b2ebd0faf0fbb9b400f9047ca1071b5f_Device=CPU_Config=() +441:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4ef9d8687805658001fa7650e660620d74bab09868b356603c268bc8cdf7a5c7_Device=CPU_Config=() +441:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=77316b4db1518258304408714d1b57a023070cefb5c1b141b6721028258f5744_Device=CPU_Config=() +441:conformance_IDFT/ReadIRTest.QueryModel/Op=IDFT.7_Type=f32_Shape=static_IR=cf47311b142dabf10271ebf5c2e359455d9bcea82d95ad2a1a2d58915c77bb16_Device=CPU_Config=() +441:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=dynamic_IR=50ebc9636f3321fe9bc87cbfe301c8ca3ea27f56cf429c983ceed6ae63bb3885_Device=CPU_Config=() +441:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=d76cd25e23d34af8e58f6447a49a50d66cc28592d3432577c240e75e00d5a765_Device=CPU_Config=() +441:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=c0c33bc628fffda062b4f013c7d41d0f9080f14f41e084ac547099384a9b3d20_Device=CPU_Config=() +440:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=1d52baa348f922bf85866fbfaa488c1ca33e01f0b79bd6a25fb430e8b7fc8b06_Device=CPU_Config=() +440:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=90f981f067c23b4fd3d2df838af8e6d11ae1c5e9465b566501628c7f3d63674d_Device=CPU_Config=() +440:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=19d36388bdf9535fef89243d6dfce670fc91377062ed4b3095ea55b88e4f296a_Device=CPU_Config=() +440:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=c56cf3dc39ed0072f3e5a8cadd1502fef904b32de3b7760ee4c6964c0e505ac9_Device=CPU_Config=() +439:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=5a82d5761e70d13623af2cc6a6eab20a7a0657ac28f38223e34b63d6cbc1224b_Device=CPU_Config=() +439:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=5f8b64ad8dd9ccd202ae8d5080ce166fe9f47b909e803da49546dbffdfb4ab3d_Device=CPU_Config=() +439:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=4e14d87b7667a7900d4427ec46c72eb3c7bfd2e3d86e5bdf92eb2485059b4951_Device=CPU_Config=() +439:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=48a273073ced3efa39d01e5ce40c30b2901e8a3dff0b414911282b8fdfc0b09f_Device=CPU_Config=() +438:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.1_Type=i64_Shape=dynamic_IR=45a9a897d75b175e3d805e74ec09322789564e0c0e8d9535724f262a9f534572_Device=CPU_Config=() +438:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=d81ef130a76622c79592b0b42acf5cd6dd357ccec28958dec6eb02a654beb9ab_Device=CPU_Config=() +437:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f166c58732107cb0c82859af62b8fc0d3d144468ab66ff4615a1eb4bd325d3c4_Device=CPU_Config=() +437:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=b6e3f37ddee609d492f47b36b8fe937ee401d01e6d43d7e0b7c06d1a1781b501_Device=CPU_Config=() +437:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=e020cc29b6ec76cfac0e0b52ed3024458fbeb567c4fe9932eb5257e3ade79b95_Device=CPU_Config=() +437:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=7b2199d0ea56102a7c6737be2334b9717ee292c13cdb692d07fddfd173ea5b82_Device=CPU_Config=() +436:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6ea8e16cab0d6f60ef13562706c941f5ba3c90d3a65447ab3844e100cec5a0ad_Device=CPU_Config=() +436:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=3421ca968a9f4061cea0492ac3920fe1a29fb35093314cbb56a78bbb136d8fc7_Device=CPU_Config=() +436:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=952ad9af4561d61157cc5e73bbc5608bf8cbea1473c52a566ad1ae7252bcb35f_Device=CPU_Config=() +436:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f9b090cbcb19663630a1490fe18357b752e430ad793c0e3aaabedcb74ab64934_Device=CPU_Config=() +436:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2386bb6412e51aa72e9426e12f9f2b2646e7074413b33fff8d95dde141ee12fc_Device=CPU_Config=() +436:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=9c6d5cdaf19c92d1f994e4ae6cfdecf5a9ff04e47a2e0e68f3a08ec8f6e74479_Device=CPU_Config=() +436:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=a3d8e1343e43c666358304b530278c73bc7c52a0d7fff38977154b6f7c456731_Device=CPU_Config=() +435:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=21534d0488c3f7c8bd40bc81476832e866000c97ee6892359826c7877905d733_Device=CPU_Config=() +435:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=f8b14e90b051624d56678dbe68f15e6db94e22878b22914d0be241047d1a3783_Device=CPU_Config=() +435:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=b81d993247e604272e6df01b8c4ba016be7f60263c892e8469deef67a8a6afba_Device=CPU_Config=() +435:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=17be9a027c25bbfbc08cf4dd106ee25d649680b30d16c74580fb3f8fcab54baa_Device=CPU_Config=() +435:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=8fdd77d8381b78b82c04360bc3f05a358bd690bd8204e2cdaa2c0a65bff61a41_Device=CPU_Config=() +435:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=31e75a7408a46928e1a3a8babe3da21bccc6d442f87291c0b2bf57b29e18face_Device=CPU_Config=() +435:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1b59316585dcbdfdbef9fd71e2681207498cc867a2285eff20d125c4fca0502c_Device=CPU_Config=() +435:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=efeea353bf41d0aac1f5400e451346d6cb407610566018f368726328cafca221_Device=CPU_Config=() +434:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=86a9c310cdd99d5c2fc0b7239080c9cff89efd37662cb38da28bc9e2a1471d7a_Device=CPU_Config=() +434:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=cbfc33348aff4daf15fb7926884243c7ffe38aa29e60eceda90fa9b8aadad5b1_Device=CPU_Config=() +434:conformance/OpImplCheckTest.checkPluginImplementation/Function=LSTMSequence_opset1_Device=CPU_Config=() +433:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=static_IR=41bcf70f8013164bdfeb7e348c05e6d43d9a1afc49087c49745679bc3aaf1e10_Device=CPU_Config=() +433:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=9ec0516350dc25e0dff22b12b65f761cd4e2744881c1f356f9ab50680eee1a69_Device=CPU_Config=() +432:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6e9fb2accb692c69349a88158442052e6350143ca7dc28f2525d8e8df29f8c78_Device=CPU_Config=() +432:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=5b6503944921be5fa3feb0b7647c6715465af16702c645dec4e2f2556d8d679c_Device=CPU_Config=() +432:conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_Shape=static_IR=ce108d9befa5ee87b0161e969c5ac986c176e468ecae9f66895cdc4fc6bad940_Device=CPU_Config=() +432:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=c1852c534b8b95bf1a9aa2771decf2368fa095c5f5688d38ab9ce0bd86152a19_Device=CPU_Config=() +432:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=7754523e2d3739481e051eb21a4347f2d157e94db3c37d47f0006ecd8d77d512_Device=CPU_Config=() +432:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i32_Shape=dynamic_IR=1af860b153ea667f413c7de4c98752d48ed8ac1fc7f90889009a2916e2ab1026_Device=CPU_Config=() +432:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=86a9c310cdd99d5c2fc0b7239080c9cff89efd37662cb38da28bc9e2a1471d7a_Device=CPU_Config=() +432:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=80a8c850ad3eec3e8fd00d2ac09695a0f87a10e4b80b9022f49ddcd9805eb2d1_Device=CPU_Config=() +432:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=f2995592ad35fbaf52873e0180081397916db8165b9596166e8d449e44b57169_Device=CPU_Config=() +431:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=static_IR=7df296e0e156bb36cb643a292802f9db374c77035c6a05ee4a865fbe2c6ef92b_Device=CPU_Config=() +431:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a46f51b7498c921515a53b67480ec4d413ed43ff809e1fa6a4deb7365f4a0460_Device=CPU_Config=() +430:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=392b855febfc39fd1b2a9fa43270f58bae53e0d210525e8700edc15a10d28d33_Device=CPU_Config=() +430:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=57c57d85bad2b76d3d65d88baf2b3677dca6e5d534121e87efd618efbe5b1547_Device=CPU_Config=() +430:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=0832e6deae4ceb25b92cdfa532fb5d5fadfe7fd7a00b79f630ddb5bc011986ab_Device=CPU_Config=() +430:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=72373e9c2bc4cdf2f0aa0a5d14e30ed1a5e0545d9a96f4ab675f3b9dc69d8cf4_Device=CPU_Config=() +430:conformance/OpImplCheckTest.checkPluginImplementation/Function=BitwiseOr_opset13_Device=CPU_Config=() +429:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=526afcc4dff58aaa019466b0440b94dbd2d5f14c060d47b8ec40183deafecd83_Device=CPU_Config=() +429:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=8216637727ccef527454bfdea7ab22ccd4e5e29709494bf96dde5af3b4a7eaaf_Device=CPU_Config=() +429:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=fced0ff647e4ea9a4b1673016b017f68ed75cdc778cad156dbd6cc379bb815f9_Device=CPU_Config=() +428:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=a7b79789ba2466daa67ce8610753fbd89a2ca372d65e2326802c24cce03f795f_Device=CPU_Config=() +428:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=876a77d1e2efb758a87bce1dd2fe35cd8e455c6f3dd7cd2bed8e10504c426de4_Device=CPU_Config=() +428:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=c161ff64d4c506fdbe44d0ee76042f958f5dfce778833653628a026de01a3f9f_Device=CPU_Config=() +428:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b78ffc69401084763d529e2aee12f9b9793bc92be3eca3df2a97730b9a252ce3_Device=CPU_Config=() +428:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=d76cd25e23d34af8e58f6447a49a50d66cc28592d3432577c240e75e00d5a765_Device=CPU_Config=() +428:conformance/OpImplCheckTest.checkPluginImplementation/Function=Assign_opset6_Device=CPU_Config=() +427:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=a94e0bbcae35d7cb33efba2c6df3275f7bca8520ddb37eeeab81829906fc8964_Device=CPU_Config=() +427:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=a3de81c04a0e7d5cab275045415ab4c294ed3270588c2ef704ab6db5514ed0dc_Device=CPU_Config=() +427:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b04f836c4ed5b0403f4b7fdf9c5cb8d11ff9f65105ab9bde39f80191a65f7f17_Device=CPU_Config=() +427:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=46282ba6f0eb5aac6acc1e114a2408cc301300a027c6d7a05691928b5e6dd9dd_Device=CPU_Config=() +427:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=static_IR=99fbf009fb26eae6bfc372a5b3d9bef89d6f82e5fa45c62cc5ece995bcc71079_Device=CPU_Config=() +426:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=53da49d7aaa81cbb7c3a3dbc8ea938bbffabda14bd106fa6c2b6abe244ba5cda_Device=CPU_Config=() +426:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=b729ddf6b689006067cfce88ec7d9e89268dd6cd904e4596717016541632b13b_Device=CPU_Config=() +426:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9cb8bb36dacdb562fddf77e93890fba560c6cdf038921e057e21f3e5e458c88e_Device=CPU_Config=() +426:conformance/OpImplCheckTest.checkPluginImplementation/Function=SpaceToDepth_opset1_Device=CPU_Config=() +425:conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=static_IR=ace54c326bc8255cd741eec12762e4d8f645fe93d50c037effce893745f8fdb5_Device=CPU_Config=() +425:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=933c6450f6856b32e879034662cf60eca53970c10106f8a11eb925e5621042e9_Device=CPU_Config=() +425:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=dynamic_IR=5c05bbc013fc857a8f2b340df778f3ad5bdbc1b7273cf41b23d6da410205c612_Device=CPU_Config=() +425:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=b0a418fb8ec50f25147079b3aef1b13095ea626a9e52a643600c39972982ff9c_Device=CPU_Config=() +424:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=63de0838ea26e3575f49700f73fffb0d3415ab68b29b1a1da690b84f7a034822_Device=CPU_Config=() +424:conformance_ScatterUpdate/ReadIRTest.ImportExport/Op=ScatterUpdate.3_Type=i64_Shape=static_IR=b5f5ffd783aa251498c2011f19a63c1d68991e426384ef9728bc0b46587faa2f_Device=CPU_Config=() +424:conformance_ReduceMax/ReadIRTest.ImportExport/Op=ReduceMax.1_Type=f32_Shape=static_IR=590a910a27283b92d7a4650bba546a3bec08a6ded604bbe8523ab3c6d734c70b_Device=CPU_Config=() +424:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=f32_Shape=static_IR=f735a44db0a337a22f5ebed052a5718168765287ff4e0eca961c3f9fd68586c0_Device=CPU_Config=() +424:conformance_Negative/ReadIRTest.QueryModel/Op=Negative.1_Type=f32_Shape=static_IR=c29451ffff103b5e965a1bbea7994ef6da6394060855ee071b9e7a3a4702141f_Device=CPU_Config=() +424:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=14f550bd7e83223ffbf501918141376e6a144484865f03c9768fe9da49a9f06f_Device=CPU_Config=() +424:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0c5ac67592b69e8c2b7acbae7a0f877cfed184c572d2fae09eb8fa629e86eeb1_Device=CPU_Config=() +423:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=45ce409a7078c7e732a092633cee36d6a0aa80fa9249cc98dce44e5b4bfc1693_Device=CPU_Config=() +423:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=004b6fd9b060324a42aad296dcb21f5b7eb7586c082f98d23f25a6d882f70c14_Device=CPU_Config=() +423:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=b050ebcbd31acbbc43d657d87a54415e0e52d3e91fa95b57aa1dd0451a5bf50f_Device=CPU_Config=() +423:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=i64_Shape=static_IR=b35fe21330bf6e76f55ad27b71fb0422a737d0c400255fd6cf2cdb3252d3617f_Device=CPU_Config=() +422:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=486cda3fac543c53e385e5b26f0932be2c2c67d937dce02e9376ba2956321e5f_Device=CPU_Config=() +422:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c1923c409aa2da9da8daf339b8b26be9ec6a106e65098182015c21881b0b5379_Device=CPU_Config=() +422:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=1bae1a851b5bf95921ad7666e48803dae416315a20a3ddbcc1c81243cb5bdede_Device=CPU_Config=() +422:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=u8_Shape=dynamic_IR=5de1e1eb337f4eff857dccbc075ec7079425a50de3096d4f81d25f0118acc6fd_Device=CPU_Config=() +422:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=223a34e46344a0dff7f35a637c9bd08e2a76a552ca87e5bf0134c9fc6d6be41d_Device=CPU_Config=() +422:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=8457db0b4ea6829aad99afe4c31b7004b57daef4cd0ae02ca00090cbe5feb72d_Device=CPU_Config=() +422:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b91f26a0b7b56224c507de772631016119cd0bc3fd49527013f571e2db477402_Device=CPU_Config=() +422:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=b4fecfa9b5d565a02a9f0d0ed19a11127ea9c8c4e70a0e5f7b920701e0665d51_Device=CPU_Config=() +422:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=1d8577d7a316c5a2726f3be79b4f8b22d6dccdd5491a4c7896a7c9de37330e77_Device=CPU_Config=() +421:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.12_Type=f32_Shape=static_IR=6e67522f2df32ac8e237fd4de148d082f3c55e6c31ace80cffeaef784dfe75a0_Device=CPU_Config=() +421:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=81eb5381e1d4d3dc7cf0d83a9cd787813d3267c99b31cc9a3cb0cf9b01727c0e_Device=CPU_Config=() +421:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=287a7562757ef0295cc38442e3d775cff0fb1ea9b27e6897bd456f01ce82d455_Device=CPU_Config=() +421:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4bedf955c6ec574258a05f59e5397225e1360ba68ea49d4fe105d6a62ccb3e97_Device=CPU_Config=() +421:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=1f29402ea664e850ea05d5f2e500f087a6165f1f4c9b3e5102b5509c020f0f6d_Device=CPU_Config=() +420:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=86d8d42c30e423e801b5d4d832f87cd6837bf9feb3c546f5bf87e04f842a04f1_Device=CPU_Config=() +420:conformance_Minimum/ReadIRTest.Inference/Op=Minimum.1_Type=f32_Shape=static_IR=c307ba8fc5f5d81037e40e46cb8ce1057d0bab7433138943596e5b21bb84221e_Device=CPU_Config=() +420:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=6119edc6e1f969159ce54e6ff4451d96db51485b54fae625a972035414c704ef_Device=CPU_Config=() +420:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=95884fb8d74cae609a67146ef94a84eadda8f3bd6369a9cb465bc413264a1d0a_Device=CPU_Config=() +420:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=c0413244803edff103b95dbbcab27b2c714740372ba215264371a9474355a8c4_Device=CPU_Config=() +420:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=8f731757a7c32fa8e4d602d7197af81a1a82ea228ec05f4baeae7c59eba11f2b_Device=CPU_Config=() +420:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=4ccf5cecf790d27400fb95526a993f8a1a28cd4f3120b897cf45bbe78f087ab2_Device=CPU_Config=() +420:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=5e7e2adae49fae3a376e9a5a971513a9b23b5fe4008ce51814e0fa1fd91f1f22_Device=CPU_Config=() +420:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=c8bb865a43a3782b3b85e05c3e86388fac07473697ed45a7b04f60010555a3c9_Device=CPU_Config=() +419:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=d77f317dd01a80955f901d0da2930aa1f82531848f4bf22d839c60a84941e6c4_Device=CPU_Config=() +419:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=08b46b9b2881764fde87811d2462a361d75c30fcec74f631f116f010953daced_Device=CPU_Config=() +419:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=24e44f75d91fe4e7e28db6c93870a47d536abeb87240841ff5b7e74b40189e42_Device=CPU_Config=() +419:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=ba4f511cc4a0870c64cc5027fa39b2bf91a6e7f39ea36cd43a693eb59de6d836_Device=CPU_Config=() +419:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2ef3273b8c144dedd6cc2d2b8c2d2921d999fa286b10d90aa796fa188dc52cef_Device=CPU_Config=() +418:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a9c40d7a1ada834400ffbdff779b9970c83bd576891dfa7f637182cadf9e9681_Device=CPU_Config=() +418:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=28cabba0fd0acde452552a362925344e8cd8c5af033419d83041bf26b1d14d69_Device=CPU_Config=() +418:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=6aff57130da7904e5d2300c4962f104d31c704872d5c33bbda4bb38efc34d563_Device=CPU_Config=() +418:conformance_HardSigmoid/ReadIRTest.Inference/Op=HardSigmoid.1_Type=f32_Shape=static_IR=cf9b80bd696164fc7c8f077eb532fffc455eaf648589c54943cd1b5668e2f077_Device=CPU_Config=() +418:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=3a3886301663fd20cf2c8c0f74c11d80dfe8b74ac39e41652f0eac1ec9bfa2df_Device=CPU_Config=() +418:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=11477a6d571446d4e895d1cc6b0155c36606963d5c4a3a0a516802063a60906f_Device=CPU_Config=() +417:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=776b4b6d6b102654bbc08df901869e4d16af505a5dff7f2d27686874bd20ccc1_Device=CPU_Config=() +417:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=5f18fb02adfd683f379dd5a15d38f01cf744e6940754f6a40e2646a1d9c97be8_Device=CPU_Config=() +417:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=d1d0510ce6d862a5512bf4c5c588f84548f1aed0226eca6850b5e2d470a5ee84_Device=CPU_Config=() +417:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=461bf15d226b7ee3cbdcbc8cf1806e98267c5f14f0aef49dfb9de094f56347b7_Device=CPU_Config=() +417:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=00d924b3557896a41b0be32897f7b7293fcc44d79a285e91695a5fd2f29f3b8c_Device=CPU_Config=() +417:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=08393711dca608a5beec54493fa162068673eb746a6223b6dab2640d411570c0_Device=CPU_Config=() +417:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=d50dd7c19583071293118e8b98f2bc749ef3e34ab8eb0149138e6b9fe49a153c_Device=CPU_Config=() +417:conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=b8e32896d2ab304fb4fdca3924e0110852da92be25307f30709cd7d897c2f038_Device=CPU_Config=() +417:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=03e7b025285b1369ca39bcf887783a843fe06ea29f7f394efc8201d1b7ad3a09_Device=CPU_Config=() +417:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=586094b4ff6617c08c87a53c7be1ca26aae40657c8d964d81eda731dbb27e848_Device=CPU_Config=() +416:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c5637c5151109c002830514b8b1450092dc52df14146ecee467dc54469a77718_Device=CPU_Config=() +416:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=e8df0b3ab9e127c1d37881f4c250ca0fd0dd2ec822cd24bf95e7860484fe9b8a_Device=CPU_Config=() +416:conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=9f4964a8b6440cdec94781121b408df16f0ef2283b0355583eb934b3cd2bcb66_Device=CPU_Config=() +416:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=a449aa561efb222cad1a414ee87443f9fec0e5c2f6220f6a57b6705c9ef26cd6_Device=CPU_Config=() +416:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=dynamic_IR=7f37f31081476643f5c279fddc3d25eae22d909730b4aca0211aa70fdd572843_Device=CPU_Config=() +416:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=41c94561e79611e27aaf339205962d4967188b385d68c169b2bf4557173005d7_Device=CPU_Config=() +416:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=ac87d885a27bfd348d3f9fad5a03680b73b7198fad17dfdf08675e6e3d239ca3_Device=CPU_Config=() +416:conformance_GroupNormalization/ReadIRTest.ImportExport/Op=GroupNormalization.12_Type=f32_Shape=static_IR=3e0fb4df6ea780921a8ef21a06bd602e97f91baa201096d438de60e9114acfb1_Device=CPU_Config=() +416:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b38f11a07d752c83a5e4fc709d5b78fe9a40ef3394f4b617a30df29c21640338_Device=CPU_Config=() +416:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=d76cd25e23d34af8e58f6447a49a50d66cc28592d3432577c240e75e00d5a765_Device=CPU_Config=() +415:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f2293320b7533e95bf000229d2458244fb9af573cd737ca0088a00674df1ac52_Device=CPU_Config=() +415:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=aa6c3816ce7ce49f40be5edbe957468e80910a8eb5a3956f54d89fdf7c264b44_Device=CPU_Config=() +415:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=fb9febc1b0984c7d6887460d058a75a9444bd1ade793c5b945c9b79ad2c63e46_Device=CPU_Config=() +415:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=e22e40a4f300567612f963b17707be4de09093cb9a248aed62af594e7986f7dc_Device=CPU_Config=() +415:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=76ef553ce6e6b782a200e030fcb744ed737623fc3a8c9c8faeb0e05691c5a55c_Device=CPU_Config=() +415:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=13c78a6d628bed4392d2577f132f924d9e17a7e29a2171dafebc0a596d2ade04_Device=CPU_Config=() +415:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=static_IR=5b466c4e4b53a5ea739df517da47f0764f9e31197b7d30fd9dabf17d1b33a489_Device=CPU_Config=() +414:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4104a7840dc96c214be896cac75911b70baebb902a42a26f12b281bc2cd87318_Device=CPU_Config=() +414:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=3e1e1cd684c1bcfcf06febedcb4eb0f4f62b5c0920098fa0715c828e9a9761a7_Device=CPU_Config=() +414:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=3147f462ceda9b383de633ac08d6014a7779e74b169d3745990fa2b2799b1dbd_Device=CPU_Config=() +414:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=a5dc3f8dd6385eb7f6d4052af82e27b7af7e8a58bdcb6092ec79ea3087f141c6_Device=CPU_Config=() +414:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=8973f2f4c2be5d0ed57c94e1aed24bf809e51854c03c2abd73ea37ef7221d328_Device=CPU_Config=() +414:conformance_ROIAlign/ReadIRTest.ImportExport/Op=ROIAlign.9_Type=f32_Shape=dynamic_IR=7260d5fcecb95f9632da5784702239161bdcab6bee60e0c1296a46e5120d5ca0_Device=CPU_Config=() +414:conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_Shape=static_IR=08a7845e89900ed725c984b42b6bc262a7f7956ec50e0a7bbdfe8e4a34d584e2_Device=CPU_Config=() +414:conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_Shape=static_IR=1c38a17a13c5c03cfc1eeb147ca2474debea05ae1d6f2357ce40ce23552286fa_Device=CPU_Config=() +414:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=9033954b258cdfa9fa858317ee4588b8c92cc946d7eb305bf130d3ca8ee0f1fe_Device=CPU_Config=() +414:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=e8a26a33d6dbe0bb560820295fb6b8aafc3da0d2b78e29199d2f09e952722efe_Device=CPU_Config=() +414:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=5df86bdcc255998a0b5b18e64e3059afb2c80e37b5695208d04a6fc0f1410b50_Device=CPU_Config=() +413:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=910dee337e395f94d7673f664a3e58647ead8bcedf50ea1439250bdfe8da25dc_Device=CPU_Config=() +413:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=15995a372d69998eb6a001f53486201fa9bbc89fb608c7d2a447203a404713ea_Device=CPU_Config=() +413:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=d13d862f7b8306948676388381950639ef433dcc4e38f5a6fa8d50575d1aa814_Device=CPU_Config=() +413:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=a7242174afe3f7c2e95d31cd14d56ceb0a566e2e8d65ba97e07d004200f4f517_Device=CPU_Config=() +413:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=fe70e0ee3f24f0bfe4391da7797647a01f66fcb109b481ca859c9f8f7dc7b411_Device=CPU_Config=() +413:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=42f3f3a5b34aacb93147f9c77ad5709cf7436ae8cad9318434a9b6ff6852982d_Device=CPU_Config=() +413:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=234277ecce31161bea52cf4aa2a37aa8cd43f1bbeed281a79a6aa1d07368872c_Device=CPU_Config=() +413:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=51d309244d7c90039cf86929d62320f5e5c5df8b1390c6b1241d8389eb6914e2_Device=CPU_Config=() +412:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9257d329b4cc9eff8545270d1693734adac9ac4ee44dcbaa21c774287e84aadd_Device=CPU_Config=() +412:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b339277c7465442a5163600e784319030de12cab4005f43c0b903bcd0c46e87f_Device=CPU_Config=() +412:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=1e5127a9c21ad1ccabe67dd1f1e28a3730c09ba294ef1f9fc001c6dcd723ec62_Device=CPU_Config=() +412:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=6b2c79edda9cc9cce61c98552d6a0d3a3555c9ccac3a56c6692f536a0abdb61e_Device=CPU_Config=() +412:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0fe2c94f8e2ed43edc0deb92ffe044a089c6920f886dcf6985ee910e7a4ffaed_Device=CPU_Config=() +412:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=b35fe21330bf6e76f55ad27b71fb0422a737d0c400255fd6cf2cdb3252d3617f_Device=CPU_Config=() +412:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=899cf50d8feefa9c5e02f6fe88b79e66b59c4a53478755d51b3e82570683613b_Device=CPU_Config=() +412:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=b2dd13c363e41fef66b0dcc3e21e77b9a97e413c1c89f8c8a53179b05f01c2cd_Device=CPU_Config=() +411:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ed872c2ef0d35af97e7f9be84d83eee6d42f2fb279b71f4feaa1aecefb450a28_Device=CPU_Config=() +411:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=d04bc06efa76ef2937aa1539893ec9c79ac61c765cb50cd4a26dbf5586bfc904_Device=CPU_Config=() +411:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=3801fd5b86bf772977c131734d8356c8dfa41b9056091937473be600e332fbee_Device=CPU_Config=() +411:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=335e78458fe959fc5a9669069890bcc67c1f1eabf21dbfb6011cc80b8322e9c0_Device=CPU_Config=() +411:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=b6417017678573faaf72824d1bec40bcccd73ae0007aef24b089dc3743276b14_Device=CPU_Config=() +411:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=0256d48640841a9233553afa85e34dca797e6b5eedbd772f606c1a0e6f8e91a1_Device=CPU_Config=() +411:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=d435aa8d2d045d69b2d187147f90c879205f27346ac991765ba97bd47d4fe0f6_Device=CPU_Config=() +410:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i32_Shape=static_IR=ddacee38f2bf3dd45ddd36ba236440ae28b9737487e0fb186c2b9777c0b557e9_Device=CPU_Config=() +410:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=02f589480d24784ece323ba30be856c7cc718151d3588f683ef4825a407749ac_Device=CPU_Config=() +409:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d27e8ca8280dc9219f4b76a2c8f47cf526b32a58710126c7549e2c04026944de_Device=CPU_Config=() +409:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=992d8967c619d96c75985952485fcd79b943ac5e71c40457eafad4b71bf56a4a_Device=CPU_Config=() +409:conformance_Select/ReadIRTest.Inference/Op=Select.1_Type=i64_Shape=static_IR=d106f0cba8d8311b75f6074c099f45e10400c0829fdd1826292b1310471076cb_Device=CPU_Config=() +409:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=77b3b21d35d3742f7abc1097b99d510453f42ebe921681685fbc457d2fa9912a_Device=CPU_Config=() +409:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=static_IR=424814fbe4a3ba7a49c506f11509c035212fbdf4ef44fb2bc708c5f201e4e1ec_Device=CPU_Config=() +409:conformance_HardSigmoid/ReadIRTest.Inference/Op=HardSigmoid.1_Type=f32_Shape=static_IR=4c3b0cda20bf6b3c574eaefbce21b9b2b0ed92fa1b37c32af252b111b6466d0e_Device=CPU_Config=() +409:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6624c22e3b5d72c4e8d21df59af6f3759fa4d8fa68f2b5f3f92a98d6a943d0b4_Device=CPU_Config=() +409:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=577ff3f9c8d226d1899056073c0223ae2d81dcc940c5fef8b9ce9cf63931e9e2_Device=CPU_Config=() +409:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=150b1e03f5e8abf76f88e68ae56a3afc3cb3ae110fcb12af35192aaf93b20f5b_Device=CPU_Config=() +409:conformance/OpImplCheckTest.checkPluginImplementation/Function=PSROIPooling_opset1_Device=CPU_Config=() +408:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=bf802641cd9b20a23b73202f401f4b32903ac7083d0ac7026098cfb4311b35c5_Device=CPU_Config=() +408:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=28e31d83986a1435f11ba6355b98472025fcf2c3c6e090103283d9486356b5de_Device=CPU_Config=() +408:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=8c5831a53b504e86ce404e5a521921ef86bf4e130e79819c1abdb0e88a6543c5_Device=CPU_Config=() +408:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=c55846f7a08af86fb1c914c925433852fd4bc735f671c87e965a6db9b6971708_Device=CPU_Config=() +408:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=cb67c5d0b8712ebac00fe4169f0cad2e0a8c71d7f9603d5d2ce6ff6dd6bc055e_Device=CPU_Config=() +408:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9c63b760d92c46d2ba731cb9edc4cf19a96848e4f3c354797f10a7a1bb9edf8c_Device=CPU_Config=() +408:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=87a966d3d3b90cb32db3454c5dfb2f67af86b68a5e45fa1c5f4a75c3b5cb452b_Device=CPU_Config=() +408:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=fda1f84f5e911136f8daaf4fcebfb989f3216c066ddc1cae578882a41ca0f5bf_Device=CPU_Config=() +408:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=e365913541918ae265939740fd9930d4f5d919773ce47a4e896a264bd8f86460_Device=CPU_Config=() +408:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=68ae288409f3972b9f52f4ea76573a81d764758059915949e76dc5f20e6952bf_Device=CPU_Config=() +408:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=dynamic_IR=ea860537d420b0d1afe0ec9a10192912ec59d8f4ba01b27add362ce50fd6b380_Device=CPU_Config=() +407:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=8a5bf21112b4a458a3323e615dfce41a8627c89ac692e1d568786634667849ab_Device=CPU_Config=() +407:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=static_IR=990cce0ce92df99ae74ad8840f7b89d1c48c0044deb9cb71619b44a565eed911_Device=CPU_Config=() +407:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=static_IR=2a8596cca11141e34e75c884b1be9a75be19663caf4c0b1b4275f6035a73d62e_Device=CPU_Config=() +407:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=46b077d7466eecbadbb7ceba5ed90724db3d9e216d22171f5dee02e44b9a5377_Device=CPU_Config=() +407:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=c18d3d2fd8001cb07daaa5000258b36352807e3e81999d2d80a668e4d6add085_Device=CPU_Config=() +407:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=i64_Shape=static_IR=f9377788ac0fd1ad0a7f51d16543722cb5acb69640745df17d9f41f5d1d0b544_Device=CPU_Config=() +407:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=f3d84b4cb7f301c6b64c64927dd1e8c20e144671419843ed3d20692f0773445c_Device=CPU_Config=() +406:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=1837f66989053233e19b617ab462b5c608981c0be175b57a2366fd41ca1a9fdb_Device=CPU_Config=() +406:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8dfd99ad2ffed2573598829ff34a62deccbd70f5337c1fec4c2962cef1992595_Device=CPU_Config=() +406:conformance_ScatterElementsUpdate/ReadIRTest.ImportExport/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=3df69301c7a4d857a546a30a0d76674c52e3abd819d644ec036636eb7cb92fc1_Device=CPU_Config=() +406:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=3bfc4cff938f4386af23d87ce10f8680a62a25ce1fa9178874f212edf45ee045_Device=CPU_Config=() +406:conformance_If/ReadIRTest.QueryModel/Op=If.8_Type=f32_Shape=static_IR=If-8_707_Device=CPU_Config=() +406:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f6f3c6d199a224ee983f6905aa4f72ea4138e6076d7307c72588dda0cc9c6ed1_Device=CPU_Config=() +406:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=45c9fd0289649c455939587c623f1884a4e675e2f970192d9ac2f60a65e6da9a_Device=CPU_Config=() +406:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=c985b086d155654f9db8470da3af5245c4fbb0139015d049b8b3b20f393c2545_Device=CPU_Config=() +405:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=36783f31e83ed0f978f00a1cdd87a25b4b881c251fe059e5d2829be3d0b45c5c_Device=CPU_Config=() +405:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=i64_Shape=dynamic_IR=2c47f1ee19359a486a72bdafc2614159d48fffc80ddabe0f897212a454a75b18_Device=CPU_Config=() +405:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=i64_Shape=static_IR=8834a8881c2da907f6ae38d4c45100dde754e653f3e4994cf9add141c217c781_Device=CPU_Config=() +405:conformance_If/ReadIRTest.QueryModel/Op=If.8_Type=f32_Shape=static_IR=e178ca7afdd75b09f1ee18e50afd30eed0740497637863c3397b5a75c0f8bfd5_Device=CPU_Config=() +405:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=de3245d77d2e004bea85af29c91e1668ae1b6905fe2cdabb92711adbde6406a9_Device=CPU_Config=() +405:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9795aaeb71c115680b567eab0877df338c0d8971858b489a2636c4483f3512cb_Device=CPU_Config=() +405:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=0bc2bfc4481de7733f5503750d21376d00de6bfa699ecff3ee0c4333d9515db8_Device=CPU_Config=() +405:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i32_Shape=static_IR=1c63f30ce7cb977ac945ee25eb97f3c472a81b999eacbcdd4b3bfd253f25cb51_Device=CPU_Config=() +404:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=d51bc4204bb6079e79da8d0cf95ab8a3454c90a040aee0fc6fedb00f0795c577_Device=CPU_Config=() +404:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=17472505b59f6bcf4f5570eb83b2225b056a403bf2d165562edabb8501fad1e7_Device=CPU_Config=() +404:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=3638f7714d7627d7536ec02891656e512fee1ec55d59bb4f68c7409ad82f3879_Device=CPU_Config=() +404:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=2d6f6b955cd431e0f4786aae35f5a1f7f69a6b627e88c42643ded0477f1cfef7_Device=CPU_Config=() +404:conformance_ReduceMin/ReadIRTest.ImportExport/Op=ReduceMin.1_Type=i32_Shape=static_IR=a2b9f0b4c044e23f536d137b6e157d1357df657d1af119cb8f71294d7dc098cd_Device=CPU_Config=() +404:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=f32_Shape=dynamic_IR=33e67497d576ce6af4a214d55862646d034effd328ef5beed8d7b0f380b6b689_Device=CPU_Config=() +404:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b15fd62115a849e0b5226ebe9162cda9371ad2783637a518f2a8724d24710253_Device=CPU_Config=() +404:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2dd63d58c85301d765882b95995de97f4eff14bbb3c933c4e4b8ee5fbc2e9e71_Device=CPU_Config=() +404:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0973b76264164ca52a9883a69ff5f7df977e28c33a0dbe9095e7e92acd7854bf_Device=CPU_Config=() +404:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=33297e2649e2f0c53b0bfb5e349d83ede580471764202480855e3f1efc8017a5_Device=CPU_Config=() +403:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=12e571ef61251520c35bd8c0429b1ee71277033ae88101f08dd769a300d86c5c_Device=CPU_Config=() +403:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=de46537615051a46fea66871c5fc6ef3417b577ce42bd1f7e239d821e1ed5c51_Device=CPU_Config=() +402:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e5249d5630503351688090f1a9d0143b02e750045924aee8f9003072446583f4_Device=CPU_Config=() +402:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=aa14d6e18f8580015dd7d32b167fba6ee137133b87fd617eab4599f407a51b69_Device=CPU_Config=() +402:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=static_IR=8ea778d7d98fd08efe4b2efa501ef3599df00ca9bd036980ce86e0d6dc454b96_Device=CPU_Config=() +402:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=ae817dcac1ed2395cc4098f67bf6d2bcbecd8b7e91ef7592622d1ee75ed4a3cc_Device=CPU_Config=() +402:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=19a94fc5cfe3ab1b4e169b342ec8d9f0fdc4ef19484c8c34d6ab938c6e7bf5fd_Device=CPU_Config=() +402:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=f645a2275ff33ad614c801a8f2f262ce1ca95417e0ca59e28d4b87cf3289c00b_Device=CPU_Config=() +402:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=eaac8b3d6a4920fa2ac101965805d140502fb409e230821d5c2a370aec15eed8_Device=CPU_Config=() +402:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=6d7cce19ff10d7690177fe1e3200d872ef5d8827b7ff49e6c9994e597a15dab2_Device=CPU_Config=() +402:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=8ef34b5ce0dd0100a8efad53b3b71e87f76ed69496cb6f030e76478d7daddf69_Device=CPU_Config=() +402:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=30dd450fadb8a1081c1315cd0e5234728862b4de39b097a5a3248d551369b60a_Device=CPU_Config=() +402:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=73623637f6155bde0a4735dcd904e5b491d7d459bef5f8d3f66f02f9558937a1_Device=CPU_Config=() +401:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=f32_Shape=dynamic_IR=fc75aba0dd172d6628de0b473569c672b52f070ac3c446cc3342cb1184ef076a_Device=CPU_Config=() +401:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=bff490cc95cf384b15409e96ee7d0995aa91640e23409cda381b85b2fef69e01_Device=CPU_Config=() +401:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=e48a363cfdabe0b62509e21641bb1cc88edaaa7d2eb82bf3ce747cab8355ff3b_Device=CPU_Config=() +401:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=static_IR=2e3f53e7b949e1dd0ab38890b0c9fc9e770dfb68569e37fa5cdd4e3ef03d6eb0_Device=CPU_Config=() +401:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6017d3f7ee3d7e667e8e7e4881f9aae335d47c8617c92b18ec370aa0770314d9_Device=CPU_Config=() +401:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=bd3ed1b35506cb92c8e587acb102c70abbe02bdaa75f76e5792d48d8e1f2f33f_Device=CPU_Config=() +401:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=7581193e4db43b0e50c6a1a52f8b348d88587040bf38d1b780ac660781e3d3a4_Device=CPU_Config=() +401:conformance/OpImplCheckTest.checkPluginImplementation/Function=GatherElements_opset6_Device=CPU_Config=() +400:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0d74ee98934e32799620ac90fd3ae8335bca026b9225782458949c64139d89c3_Device=CPU_Config=() +400:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a4ab938f33d0b58425ed98a56789d0ee94beeca13ec7fe3358c9d3751ef136a5_Device=CPU_Config=() +400:conformance_Sqrt/ReadIRTest.Inference/Op=Sqrt.1_Type=f32_Shape=dynamic_IR=8b79cf070ed44bdefd5afbe86a81199e189fa486c42190795419dbfc7cc26d6b_Device=CPU_Config=() +400:conformance_Sigmoid/ReadIRTest.Inference/Op=Sigmoid.1_Type=f32_Shape=static_IR=b6a75c5d2a686eae53cc25c6b107630b31a8a4d8c6514980ed1a97754f33bdcd_Device=CPU_Config=() +400:conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=i64_Shape=static_IR=d106f0cba8d8311b75f6074c099f45e10400c0829fdd1826292b1310471076cb_Device=CPU_Config=() +400:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=static_IR=a6722b8718b7c028e1bbde4462945c096dfc551775af27bcc7d00967d7d73919_Device=CPU_Config=() +400:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=57921f181e48af2b294b923633e457650e5ab2a9ac7f5d4d07930974ad5e03e1_Device=CPU_Config=() +400:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=6838901bafb44e26f73134e2c0eb2be8f1f777ab794ae340d61b62d891ff3d59_Device=CPU_Config=() +400:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=439308ddb64edf02f96ade09e7888cf89f422fbdb8c8242521ecc3f93e61bdd7_Device=CPU_Config=() +400:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=596d0b6cfe8b39e0ceaa665f1fa82aeeeff78d09315fca7cef031b6dc210a1f3_Device=CPU_Config=() +400:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=dynamic_IR=7581193e4db43b0e50c6a1a52f8b348d88587040bf38d1b780ac660781e3d3a4_Device=CPU_Config=() +400:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=80a8c850ad3eec3e8fd00d2ac09695a0f87a10e4b80b9022f49ddcd9805eb2d1_Device=CPU_Config=() +399:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=9bae5a53011ecba6327961e6496f3312134c81e148523434968c3c56b5e0c491_Device=CPU_Config=() +399:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=30466048a7da9db59d20a210af1979341f7b9552362e64a89357d650102a213e_Device=CPU_Config=() +399:conformance/OpImplCheckTest.checkPluginImplementation/Function=GenerateProposals_opset9_Device=CPU_Config=() +398:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d6d8f4f28ac34b734cc984f83e8f5f6598c063a6955d00ef4c08252d5d05c276_Device=CPU_Config=() +398:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a3e1aaa7054d4d046bab4614737c931b25a574051a2f8b79799aaf6fbbd2c2e3_Device=CPU_Config=() +398:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=37337436d0d481c689caabec3bbc8f21ecec65560c70de4dd1f5b0ed9e444bf9_Device=CPU_Config=() +398:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=780fe1f9a82f728f88511b2d8194c4f425144ffb5ae4aaeb1ce90c6fdea3362a_Device=CPU_Config=() +398:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=i64_Shape=static_IR=9fa81cf001e6c48dfcf4e75aa77f95b3dce4e8d48b6ec3cfc896dcc08006c62e_Device=CPU_Config=() +398:conformance_ScatterNDUpdate/ReadIRTest.QueryModel/Op=ScatterNDUpdate.4_Type=i32_Shape=dynamic_IR=91f59d10b16e7305a651b8ee9480a0068225d6cd56026139e35ba69b9f84b00f_Device=CPU_Config=() +398:conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=dynamic_IR=43ceadf05184954dd8697d4f737de323ec2ee75f93e0d33d60dab2acc995f3b6_Device=CPU_Config=() +398:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=dynamic_IR=662ca1fd253f0a0c29b89eb1310ea5c7c87895533130ca1a8b76f791ef1ad99b_Device=CPU_Config=() +398:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=27a43bf8c20a81f1e244ace4c53f7cd9343a2603ba2c8b50bb041a4046ae6ecd_Device=CPU_Config=() +398:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2058548f687014df36b4da1b2644f07fa117d5a1d303a13c4d913a3f979d3ed6_Device=CPU_Config=() +397:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=5c33d593e408ad72bf438729a423318330c69c69f1504402420635942050ac06_Device=CPU_Config=() +397:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=e9539332df9388555564db1da36679acc7b505b8c1fa687731f2052999bfe1fd_Device=CPU_Config=() +397:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=c7ce41820be10f17c8d48c005703d536d18e4f49b1d2022ac58f77b7b9afadec_Device=CPU_Config=() +397:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=efbe921ab3c27a093f20ff704fd02e5c610e7507d94a2d2092379c5a99743380_Device=CPU_Config=() +397:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=ba1b92833f2c8734c5178762b6cd8c847c23027ecf79ebeba295c39b667162a1_Device=CPU_Config=() +397:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=755b95b2e9c5cb5da4d4cd2c46ced327e10dbfc67a0d934667177b5fab73d431_Device=CPU_Config=() +397:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=319c7b312e9074a43819b034ce82eddf1c8f9e51d4eba3fbc7a112cb6393debf_Device=CPU_Config=() +397:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=d38ac6654882078aafe169f6d1280279fa81e646529f6f2bd621338a756046a0_Device=CPU_Config=() +397:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=f2995592ad35fbaf52873e0180081397916db8165b9596166e8d449e44b57169_Device=CPU_Config=() +397:conformance/OpImplCheckTest.checkPluginImplementation/Function=LogicalNot_opset1_Device=CPU_Config=() +396:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=3801fd5b86bf772977c131734d8356c8dfa41b9056091937473be600e332fbee_Device=CPU_Config=() +396:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=474e4bfe52232239280bbe4e2d2aed15cf69c7ec8db86b010084c6e68a8d0e1d_Device=CPU_Config=() +396:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=43a00b4dc097228af52c00054951dd5b57d8e0086207f11a8996e5ac880c8980_Device=CPU_Config=() +396:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=789949951bc3afd20fdff943ca2a706f79eb4f95be60086ddf632b43c3e401e6_Device=CPU_Config=() +396:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=a0336bba08291ea34d6271c83816fb349d163fc5989171b07fe1bce50a2f3ea9_Device=CPU_Config=() +396:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=1a0639f04a426db13dd7cfac918ec6e2254e1cb8f18e0853e3bd597cdf090421_Device=CPU_Config=() +396:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=1da672197f2c962a6cdfb059e9d09c10a03c3b082838f53d2faf6a761fee0637_Device=CPU_Config=() +396:conformance/OpImplCheckTest.checkPluginImplementation/Function=ShapeOf_opset1_Device=CPU_Config=() +395:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d9771ac46751569172412bbd4495eccdbac435f78a97f8fdfffa9215faa74544_Device=CPU_Config=() +395:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=efbe921ab3c27a093f20ff704fd02e5c610e7507d94a2d2092379c5a99743380_Device=CPU_Config=() +395:conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_Shape=static_IR=85df90c3ae7b84d89ec4eae30556ebf4af996c318afa45d90dbb219f73033f31_Device=CPU_Config=() +395:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3d73edb68da4aee1c052b79ffce030b368f204c04bffd9a9dc01a9b54de932e7_Device=CPU_Config=() +395:conformance_FakeQuantize/ReadIRTest.Inference/Op=FakeQuantize.1_Type=f32_Shape=static_IR=66f4344fac8e5e5484f5762b1bfea68ed08bcbc378a8b10f53d0a8e053524749_Device=CPU_Config=() +395:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=fe80b80ced0033aef6f7f97abd22de1271430f700d7dc9aad9a2a819f91e11a5_Device=CPU_Config=() +395:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=b93daedfdba7331025c12a5eb4b881bd7df445d80bd4fac34833087fe6d65bf5_Device=CPU_Config=() +395:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=335e78458fe959fc5a9669069890bcc67c1f1eabf21dbfb6011cc80b8322e9c0_Device=CPU_Config=() +394:conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_Shape=dynamic_IR=8c78da5f8bf9c1a4cd7f89cde9d61eb6500fa10ea0454e36a585466ed97fb12d_Device=CPU_Config=() +394:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=469a63c5aee73bdefc9abdf8abd8413713c0b68cc098d16c193399a11c7093c5_Device=CPU_Config=() +394:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=01b095b8763565527be0de9edff565070949485db907493e99e95c2cddf6abaf_Device=CPU_Config=() +394:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=dynamic_IR=791be312b2af6da6abd2eadadc6185c7052271efbcf314bb678828313fc58414_Device=CPU_Config=() +394:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=static_IR=99fbf009fb26eae6bfc372a5b3d9bef89d6f82e5fa45c62cc5ece995bcc71079_Device=CPU_Config=() +394:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=486675b6412030beffb4209c326672af07d343d5e1bbca31b9bfeed3cc339e3d_Device=CPU_Config=() +394:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=4520f02da2bc674bf781c84ea3cca92375a1eeaa77f4f4f7e4cfc3ef75fb2964_Device=CPU_Config=() +393:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=610a8f8c44b0e133d4b5684c37017859d06bb2251482eca0cdece0a1c216b936_Device=CPU_Config=() +393:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=dynamic_IR=2af646407076eafcc1ed2d628158fc32eac4ef2fb34fb967962c06f81376d61c_Device=CPU_Config=() +393:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=fe80951a0a44625457a6106d8613c9813c9c0b8fe3606fa5ac1c064217c8a0e6_Device=CPU_Config=() +393:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=745c0804609863998b4bcc6956b1e78fc221e0e4f1535ab09b89a9c966a16995_Device=CPU_Config=() +393:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=fda1f84f5e911136f8daaf4fcebfb989f3216c066ddc1cae578882a41ca0f5bf_Device=CPU_Config=() +393:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=e365913541918ae265939740fd9930d4f5d919773ce47a4e896a264bd8f86460_Device=CPU_Config=() +392:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=51bb427ac8abf618a72159cde1ee840e08518016a09e995f503cd888941f5039_Device=CPU_Config=() +392:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=d5f5f2d39bfe4ccc6f12f76e5eca8e2e40ac7ac6c5f38a7cac21970df213d4cc_Device=CPU_Config=() +392:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.12_Type=f32_Shape=static_IR=e7b766e89f08e80fd96ba40dac738561546ca7210c4566b727ca8cb49528c823_Device=CPU_Config=() +392:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=29a633b64671e28103c44b79ec5c329118c0d7c4f70466ad44482116aa2a3b6c_Device=CPU_Config=() +392:conformance_GRUSequence/ReadIRTest.ImportExport/Op=GRUSequence.5_Type=f32_Shape=static_IR=860decd2bf091a335f6f820b2c6b6acc58618fbb6027e30484470ce899bb1591_Device=CPU_Config=() +392:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=static_IR=b35fe21330bf6e76f55ad27b71fb0422a737d0c400255fd6cf2cdb3252d3617f_Device=CPU_Config=() +392:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=dynamic_IR=2d924ba2d56e6b5c7423c6d622e7bd250ab275e0a0ab4745e232046a3223ce7d_Device=CPU_Config=() +392:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=e61665bc5590265246ab882bb55b9487e81412012ed98ac9cb16154bc8eddd17_Device=CPU_Config=() +392:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=static_IR=7e1801bf4ef7ad1b27663dfb399f318ccb2526e925d48e3d30e2ab837824b217_Device=CPU_Config=() +392:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i64_Shape=static_IR=f7bc08f4bc2edb455c7afc9cecba3666df1150bf4e3a67a20061714f867ddb0f_Device=CPU_Config=() +391:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=29bb3b751638e157d0ba7114cc0e156a4b792a9dbb2bafa3ca124516595f01a2_Device=CPU_Config=() +391:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c777366b6b37df3f4a3b19b637f66b707fbbb113972a9eff7eb4d793731f8c9b_Device=CPU_Config=() +391:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=4a9237e5cd29f0d2d5e738891752c6f6b29c9dc4c29d130b9c9921ad5787f819_Device=CPU_Config=() +391:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=bc90fb9a95a74efb937b6cf808584dd1e91aa6c4d774640b51f4325f0aca6b42_Device=CPU_Config=() +391:conformance_Select/ReadIRTest.Inference/Op=Select.1_Type=f32_Shape=static_IR=ca5d2626f2066e0c806addc4b6ffb4b3a71f1183b93783b92f44de62d82faaf8_Device=CPU_Config=() +391:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=f802331401875cb16be10c9f752520406437b2e63a50e022b7d95b732e5296f2_Device=CPU_Config=() +391:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=91b6cdd8a7664759217ce0b84a8baed2105bca0ae9876e9efd01c074aa27039c_Device=CPU_Config=() +391:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=d81ef130a76622c79592b0b42acf5cd6dd357ccec28958dec6eb02a654beb9ab_Device=CPU_Config=() +390:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=90cf12798b53937dd1a31daebe5444e1c10c27c5a67fcde6dc61b5feb1df89ec_Device=CPU_Config=() +390:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=5995707c0c9656ffe179147e29d03df5a35286481a4140b7ef019434d83aaa61_Device=CPU_Config=() +390:conformance_Sqrt/ReadIRTest.QueryModel/Op=Sqrt.1_Type=f32_Shape=static_IR=4420cfb7f4a734731dacfe5b0c27db41ccaac2ab8bbff56cac0f99ed96e976f2_Device=CPU_Config=() +390:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=27e8804992c0d74c18c958f0876c06be6c7eda2b36fe7de3ab616b577dce13c6_Device=CPU_Config=() +390:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=0d62db1843ef7e470a613f9f4d4999ce0e6c94365bd667b78c283cb9406e915d_Device=CPU_Config=() +390:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=c1c38223834d99f4481cb74db2bc302710629de5807b4f08381fd01655b9d44a_Device=CPU_Config=() +390:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=a1b6d340122e8e3a7a665c69fb11b3c7b460eae79ec81ed3c32e878d10d5c3eb_Device=CPU_Config=() +390:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=e03d85019ea733c10b7ece4721036f3aeae2e60179d9b044d34e862608fd36a1_Device=CPU_Config=() +390:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=29a544bbefe85bdabe1d5d36d83d8ee1d80c71f8b98ff6e898e1062671daa8ad_Device=CPU_Config=() +390:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a50644dc2d133df429ff4aa6a19ca9bafbf41d2948522e584fc5f417ad16d76c_Device=CPU_Config=() +390:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9c66c6a6d93c10149920c3e034d9a0765afbef45dab66083fd5e3d796a57e406_Device=CPU_Config=() +390:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=static_IR=40876e66f31053b621aea004baaba7607b9131d4fff8e8b00ed7e1e58204988c_Device=CPU_Config=() +390:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=be4d557c62a3a43e7f309d2276cd7549bf1976ca8593bf2be752e60c42237a19_Device=CPU_Config=() +390:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=07b257862a62290d7e8ae939147bb7422992528bf54209b8d1bff500b99b6f4b_Device=CPU_Config=() +390:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=c808434d1d2cbd9ea66373f22c7e635c5bb2e3a6294f93421d1d9d34ac62515d_Device=CPU_Config=() +390:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i64_Shape=static_IR=f7bc08f4bc2edb455c7afc9cecba3666df1150bf4e3a67a20061714f867ddb0f_Device=CPU_Config=() +389:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=1696523c5dd3a701251583b9c9f29e43f852383cec3dde5a93e6f7f7cabf3398_Device=CPU_Config=() +389:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=766d904d646b6f43847158972a615db69af2bf66517db0992a19418856bef52f_Device=CPU_Config=() +389:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=21a343909133e844b3d88a967b2f6c948e4c9c9eb96030b936f9517dd9bec865_Device=CPU_Config=() +389:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=54a5630072fb0e0127611a4ae63db14b7c0fa0979f4d2be7bfec548b5291a0af_Device=CPU_Config=() +389:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=a3d8e1343e43c666358304b530278c73bc7c52a0d7fff38977154b6f7c456731_Device=CPU_Config=() +389:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=525408cae199f0936f7552165ba12d61ced6b675d75d56f1d69be8281feec5d5_Device=CPU_Config=() +388:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=d77f317dd01a80955f901d0da2930aa1f82531848f4bf22d839c60a84941e6c4_Device=CPU_Config=() +388:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=77316b4db1518258304408714d1b57a023070cefb5c1b141b6721028258f5744_Device=CPU_Config=() +388:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=f32_Shape=static_IR=9031b1919c35a9df591ff64fbe4748c02cc837649899099542716f35b5c68cc5_Device=CPU_Config=() +388:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=e08e84b17997c1b1279429161d287720e4c7deb0e6d055539149bc577ed3b104_Device=CPU_Config=() +388:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=6167830634e0b253aa78e883453d45bb737cd5df33c849e4b16b99164fd49d5e_Device=CPU_Config=() +388:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=fbdf008803736374dd213f1d7e0a041fc0e9b3f025c212a588fa05842ee5ee56_Device=CPU_Config=() +388:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a848753a720bf9791ee4c239cf08712d714b877bfb6df23805590ad690ceaff7_Device=CPU_Config=() +388:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=0bb9a29f02d37ba32dc29b4284f58e10ce59571799f58381d449c77655c795d6_Device=CPU_Config=() +388:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=boolean_Shape=static_IR=d296b02cead8f38f8a2c9fa73ab8103d3050549c92fb807b040dd6e3bbd7e2ff_Device=CPU_Config=() +388:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=d38ac6654882078aafe169f6d1280279fa81e646529f6f2bd621338a756046a0_Device=CPU_Config=() +388:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=e365913541918ae265939740fd9930d4f5d919773ce47a4e896a264bd8f86460_Device=CPU_Config=() +388:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i32_Shape=static_IR=cd4d566c041357cdd7f8539933888956fff5cfd15e3c42872df59d9890c169b3_Device=CPU_Config=() +387:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=cbd851b8c4e89bce3a20b8795b3bc5a0105d26e252a4674541ff630496144aaa_Device=CPU_Config=() +387:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=a449aa561efb222cad1a414ee87443f9fec0e5c2f6220f6a57b6705c9ef26cd6_Device=CPU_Config=() +387:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=e1130d42d591780dd2a746ce7ff874a2bf4725ca9fd09803932ba4a7b0b389aa_Device=CPU_Config=() +387:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=dynamic_IR=5038017e90f931327d5159938d422b2afc229aa4d776a4ac80a946724fee357d_Device=CPU_Config=() +387:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=9ca1360242688f494c59b8eb1073a4bf7291ee7b2ff460380bd47248fc591dc1_Device=CPU_Config=() +387:conformance_HSwish/ReadIRTest.Inference/Op=HSwish.4_Type=f32_Shape=static_IR=98546b7eda390c30f82053a093b5e3855c6dc8c631451b3637eadf95858af2bb_Device=CPU_Config=() +387:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1385af2553c7c9b0f9ce2aa4345d8b767d36136a9cd8e2acae79d4970d6b5c8b_Device=CPU_Config=() +387:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=8337ad383956ad96ca95f4aeb967e05c694fe586b4ed6e46547e3ffa0217c59b_Device=CPU_Config=() +386:conformance_ScatterNDUpdate/ReadIRTest.ImportExport/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=d8a48dc7581c2ece0179d0ad668e8caebdddddfe492e365ea2e0e5f3a7302eea_Device=CPU_Config=() +386:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=6b69e46c11a2a82ac7ad6697cd768d88da6e870e75f489779bbd1714bad23450_Device=CPU_Config=() +385:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=59bac5d30704b81def0385b29fb8d79e459a71b9251b4f6e94116524bd9aa7be_Device=CPU_Config=() +385:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=0138363d3baa37869a3e55e1b059a42a87612507ba318e753361a58549ed5ec1_Device=CPU_Config=() +385:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=26d09bb7dc7ce95aac39023ac90bd083da9101b9e7383af49e7467e4f0571f2e_Device=CPU_Config=() +385:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=d661093ec9006177e5d47e7f666d7c98353f9c3d5290ba6284145f60822f2573_Device=CPU_Config=() +385:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=b99ba096eea2f3725fa98eabc2a941fa895c0a58bcd7a8ea68d2a245ce913113_Device=CPU_Config=() +384:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=77316b4db1518258304408714d1b57a023070cefb5c1b141b6721028258f5744_Device=CPU_Config=() +384:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=120cc30794fe9c9d59dc9df6fadbb9791f3a6b99e4b9fdc06c5e01f494b05780_Device=CPU_Config=() +384:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=static_IR=f76da5edfb7a9e3fa7cec034fa43307bce74eeb0629176ae5dd40d154baf858f_Device=CPU_Config=() +384:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4bb7bd2471752f1a62dc15dbcacad87dd329443459a90dc6768b1a34fd00c064_Device=CPU_Config=() +384:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=6faa91bd8e7037c9233825cde9313cfd2afafa21ff423a00544eaa36d734332e_Device=CPU_Config=() +384:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=586094b4ff6617c08c87a53c7be1ca26aae40657c8d964d81eda731dbb27e848_Device=CPU_Config=() +383:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=eb966d8fd7e1301280e6ef709dd785d210a35a1346eb88c3f38379bd96036ce4_Device=CPU_Config=() +383:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=static_IR=8fc296db9f7dd10289217cb81cdf5991c6b5f3c89369936a94c8ac484702bfa3_Device=CPU_Config=() +383:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=1cb2f17dcf4f8b738a23313501e9a98101169cd9e368f3fb98c552f994232073_Device=CPU_Config=() +383:conformance_Abs/ReadIRTest.ImportExport/Op=Abs.1_Type=f32_Shape=static_IR=083771171646a2eadcbb3384bd457e04d74ce8ea771813cdf67c56f7bbf20c69_Device=CPU_Config=() +382:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4baf5444c85028a4cfdedc5888a7cd403e2491ab694ab65c820dd3c410f8eafb_Device=CPU_Config=() +382:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=3c03ae2ab13dfccc85d9909840eafb6a291b978e9bf859f27886b4a0d3e87ffa_Device=CPU_Config=() +382:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d16722dfa770998d9923d09fa1e2a973bac5ae7afc6452a0b5ac21d839720bb4_Device=CPU_Config=() +382:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=92dc9b12889f441d7a93e95851a15849139787b0ecc080e70d266fe4cb6dd9c1_Device=CPU_Config=() +382:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=2f842d4b64513c6df5748c54a1166a3f14436dc1ca59b7a28530bcafcdcde2f6_Device=CPU_Config=() +382:conformance_LogicalNot/ReadIRTest.ImportExport/Op=LogicalNot.1_Type=boolean_Shape=static_IR=66b8769b499fa31cfd7545411d16a17b04e1a336bb63a7e907707cd170a30fc9_Device=CPU_Config=() +382:conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_Shape=static_IR=4a55e1cc1410675b7789f083f2cd3f6ff851f49c8a0818f5bf0dd27280b197f9_Device=CPU_Config=() +382:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=517a5eeb2f1f21304b8a1d5971f89bfc93aa678252180bdb05144657b1a8619f_Device=CPU_Config=() +382:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=cbfc33348aff4daf15fb7926884243c7ffe38aa29e60eceda90fa9b8aadad5b1_Device=CPU_Config=() +382:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=dynamic_IR=2058e018d32d8a73b2bf6471186e555c47e2c1a15ceb4131bacc43110bc17d30_Device=CPU_Config=() +381:conformance_TopK/ReadIRTest.ImportExport/Op=TopK.3_Type=f32_Shape=static_IR=a56b3f758c88a5723e4a2cf04ce46c92681ed7fb0d6dd7f4d5b937dbf00b0eff_Device=CPU_Config=() +381:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=0138363d3baa37869a3e55e1b059a42a87612507ba318e753361a58549ed5ec1_Device=CPU_Config=() +381:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i32_Shape=static_IR=8368b4f6e208aa4cfbf0aeaa648e9408c281a71d98d15ee09407d26274fb349f_Device=CPU_Config=() +381:conformance_ScatterNDUpdate/ReadIRTest.ImportExport/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=d42cb628111ca80a33a558dcd1c2c310aa7b95d6c48549075291f49ec59c302d_Device=CPU_Config=() +381:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=99183013393075553f5cd30818ccd603ff5d3e9e71dd8f42ced0df2377280729_Device=CPU_Config=() +381:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=cd2470c72fa7d2238d2eca4d067e49a02340ad187681be2fa7e0bac6eab3500b_Device=CPU_Config=() +381:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=u64_Shape=dynamic_IR=5f87db7fc306440f807b413acb7eb175932f29f59d1b5eb4a9df8945b9aef9d4_Device=CPU_Config=() +381:conformance_BatchToSpace/ReadIRTest.QueryModel/Op=BatchToSpace.2_Type=f32_Shape=static_IR=f118f5911730937f9dab91ad5eb6f78cb1af6de7bae1dc745dab2d4f02257fff_Device=CPU_Config=() +381:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i64_Shape=static_IR=b92112b2ea2f233a6fb6ee512363082a49db0f85ab23f89dc29ad907e6ab408f_Device=CPU_Config=() +380:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=a05c6778a396b4eb89a5e112fe505a41f47ff6bef50fa025eee1dfb7ec6a95e7_Device=CPU_Config=() +380:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=634db7c7a580a605f3375f671b3bcb2a1baf5856b32032d2786a5f8061df63c3_Device=CPU_Config=() +380:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=5b1fc9693e4e947bc88a88bf1ad22ee2f59c13bf291626eec3e8ed49b0cef7ed_Device=CPU_Config=() +380:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=93ce70e605eb712479090e3a266e86eb7422bf0fdd3acb1c38a0b92a9c381e2c_Device=CPU_Config=() +380:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=d442b2d9df68f25f567a3e8da8d87866c200d391624cf1c339554a57a9a527a4_Device=CPU_Config=() +379:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f1ffa9874732c1aa88e04fd55fbc864c9c6986877d3d52045fa6ae7f18dba62b_Device=CPU_Config=() +379:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=429b91023f3ae9a323e40ed372fc29926fcd6aa7a8e77e4ddaaf68fa648c43b7_Device=CPU_Config=() +379:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=f7e1aae2dbc817ca8f64a6bb0742e476055c239cc6e31a4233b7580205feeb41_Device=CPU_Config=() +379:conformance_Loop/ReadIRTest.QueryModel/Op=Loop.5_Type=f32_Shape=static_IR=7ad6fe3ff1472399c9c0e12aba1db89105e1e4a243cd092dc43ee763a2571fa9_Device=CPU_Config=() +379:conformance_HSwish/ReadIRTest.QueryModel/Op=HSwish.4_Type=f32_Shape=static_IR=ce108d9befa5ee87b0161e969c5ac986c176e468ecae9f66895cdc4fc6bad940_Device=CPU_Config=() +379:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d141b35e277394511f5635b2e395039c986ac392e6f49c2415da6a5071bee96a_Device=CPU_Config=() +379:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=43aed1509066aa7c839a82c9865228ce3ebdfbe519061649807875ec6e86d715_Device=CPU_Config=() +379:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=285bcc240dec2c32e171f3866ea33107a109566fb8ef39f0dd84e99664aaf8df_Device=CPU_Config=() +379:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=27b03da9a0155039856b1bebe424d10d1b8ad768747cbeb851bfc0463edd5cb6_Device=CPU_Config=() +379:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=15d323a190bbeb1834cfa08a3afc633a2c203e44e2660bff4e98453c02ea4cfc_Device=CPU_Config=() +379:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i64_Shape=dynamic_IR=79cffe28ff617b42488d33b204b0f50bcf4e304c74d2a11820c830e091c6383e_Device=CPU_Config=() +378:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=557b6184296452c25e420a307a2021cfb0eedcb73e42bb4bc247c34c15b18447_Device=CPU_Config=() +378:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=41c1e13447cce632ccd478ec2bf36f09e510942449b0bffd3271f3b1f0b48d54_Device=CPU_Config=() +378:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=d962e7157ea216206d6c5b11fe5ef6ee162a1f7dc20f84a3b058e405c324a592_Device=CPU_Config=() +378:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=94b08f3c309048124724d9de0d120698fed90ff0237b07c4a4a2b7ccf843d76a_Device=CPU_Config=() +378:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=aaafa4ff22a5fcab1e6e0f48065210ff790275fba7a5c16602aa4a00951a8cb8_Device=CPU_Config=() +378:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=8ef4d7ceb7d904a084d93d6ede1c15a64d2511b3bf1312d630792eb21c591408_Device=CPU_Config=() +378:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4b00183255fde45d5c3b815b552e5a4279284bfe1ceb31389560260ad5546c14_Device=CPU_Config=() +378:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=99b432aa5821136994e06b4e3c690a4e298bc5a496740ea2c5fe6aa300edacf8_Device=CPU_Config=() +378:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=boolean_Shape=dynamic_IR=2d924ba2d56e6b5c7423c6d622e7bd250ab275e0a0ab4745e232046a3223ce7d_Device=CPU_Config=() +377:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=254937408e91c70536c4f3b3f81f1a7aede93b29f142631a46fa7d962c531131_Device=CPU_Config=() +377:conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_Shape=static_IR=4c3b0cda20bf6b3c574eaefbce21b9b2b0ed92fa1b37c32af252b111b6466d0e_Device=CPU_Config=() +377:conformance_GroupConvolutionBackpropData/ReadIRTest.ImportExport/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=68853f0b8867d4ddb5eeb239690f1b41600c05f64ee4d3efa8cc828e72b9bc1f_Device=CPU_Config=() +377:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=766d904d646b6f43847158972a615db69af2bf66517db0992a19418856bef52f_Device=CPU_Config=() +377:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=131fa1ed3ff9df038bbed73979ab906c3d84fea9dd2cf5dedc82b3222d511b1d_Device=CPU_Config=() +377:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=060423427a9100b6a38aad12a83043441f8af436c1d2502350ae867f45bd721f_Device=CPU_Config=() +377:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=cccecd6fd3e8f3d84fb98f219b212cd2b55ae0e4e34c099a25a1028e9e2f83e7_Device=CPU_Config=() +376:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=75aed7fbb8f7d7e8a1281d4a16c4fe2e55160dfb9e6a1bc446913a223c5aa0de_Device=CPU_Config=() +376:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=1245c8dbd9027cc56d2eeb58e1bd23774ce945522f66a17ecc3c03ca1ca163b0_Device=CPU_Config=() +376:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=e77dc4aecdbd4ab3d67fc3c1d9e350a9d259af1d4c0188d680121a31c6ed8ccf_Device=CPU_Config=() +376:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=f18fa21106120cecd81f50d635b1c42cbd641877ffbf78e746ef7375ff546d7d_Device=CPU_Config=() +376:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a76c4cc0a1f2294a3ceb18dd5d214d842cf37c08d2e34770c66c29b44ee92e48_Device=CPU_Config=() +376:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=static_IR=35ab7a27cb56964d974f5e1b55c1ed76d7f9443f97da0b977370ca9fc414e093_Device=CPU_Config=() +376:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=d294c71f3796d2e2b88f819f6512ed03942eab440681a5bc5b092e5a34192107_Device=CPU_Config=() +376:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=0256d48640841a9233553afa85e34dca797e6b5eedbd772f606c1a0e6f8e91a1_Device=CPU_Config=() +376:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=5e7e2adae49fae3a376e9a5a971513a9b23b5fe4008ce51814e0fa1fd91f1f22_Device=CPU_Config=() +376:conformance/OpImplCheckTest.checkPluginImplementation/Function=If_opset8_Device=CPU_Config=() +375:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=7c8594e723d769f8817c58fc16146033afb91d821bc941dff944223796029f8b_Device=CPU_Config=() +375:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=574e53e574b1a6e0bc16a7296aadd78785cac535293e956b008b0a2274b7cb36_Device=CPU_Config=() +375:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=9efb5290056ad2f5ee663d4f67a89edbcc4936e512748bcbc0e9f3935b690b1a_Device=CPU_Config=() +375:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=cbd1111f323b8e6d78b59b531708defef64b90463f973f64f52251795ac5a7dc_Device=CPU_Config=() +375:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=923032e47821636c4c8098a7a9afa97b331a47d47357c780b7bced2e46ea9921_Device=CPU_Config=() +375:conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_Shape=static_IR=cc18959ba7c26661ba0f986207bd00aca503bf924b31c4a2070ac40ac3ec5468_Device=CPU_Config=() +374:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b2931a4972ae4f946778af45cd5824e6958dcc1fc79cea4da1032590b2663d16_Device=CPU_Config=() +374:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=1269afc1a9f9a4f71ca2167cc59274b7a3bead8cca474162919619b810eb9c1a_Device=CPU_Config=() +374:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i64_Shape=static_IR=edb5dc5a42b36879d5ced77fc2db7d8b331c888534602893ffb277f742da1005_Device=CPU_Config=() +374:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=66bf131d73ad3116d698e15ac3c9e48bde66e096228138eb865c0807295c0d4d_Device=CPU_Config=() +374:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=a1b6d340122e8e3a7a665c69fb11b3c7b460eae79ec81ed3c32e878d10d5c3eb_Device=CPU_Config=() +374:conformance_HSigmoid/ReadIRTest.Inference/Op=HSigmoid.5_Type=f32_Shape=static_IR=4a55e1cc1410675b7789f083f2cd3f6ff851f49c8a0818f5bf0dd27280b197f9_Device=CPU_Config=() +374:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=fa2eea1b545d6b876282ed0165fb935f0af249c713e3f20fd97cc06118e615eb_Device=CPU_Config=() +374:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=c56cf3dc39ed0072f3e5a8cadd1502fef904b32de3b7760ee4c6964c0e505ac9_Device=CPU_Config=() +374:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=922e194a5ae53e76be5ae624754d3c1fe5ea0d8c564410062bd9c30afc48ffe0_Device=CPU_Config=() +373:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f0145ffb8d2846d866b1a89c8217d54209830e6d3d0d10913e75af42f2510c74_Device=CPU_Config=() +373:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a956d2fb1fd17e2d864b3eaa8915cc0c4f9a768e35fdf5bf20cf6bc7f41aa130_Device=CPU_Config=() +373:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=13f3d097d5e17c2add48d6f9b6f86454a1b521408d7fb8252e3638d9f17ea6fb_Device=CPU_Config=() +373:conformance_Swish/ReadIRTest.Inference/Op=Swish.4_Type=f32_Shape=static_IR=d79b47022a50437c9df095b34e515c53eb042c9813fcf6dc7bcdb96962818ddf_Device=CPU_Config=() +372:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f84dc200af2852df01662dfbe891b8ed4abb27db6763f3a2b645ab75324834f3_Device=CPU_Config=() +372:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=8c43b49d99c64bec883205ca15c7b2d9dbb47b9fe5140fedaeb8eb7220a36f6c_Device=CPU_Config=() +372:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i64_Shape=static_IR=bb6a76dcb7d086a6f8dc96d3e0b17573b6dc2775ff9d0f19060947deda586bde_Device=CPU_Config=() +372:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=dbee34cd3b708559af1ceb5fcf89aac35add00fc1b9e3eda2beebb2d5b629fc1_Device=CPU_Config=() +372:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i32_Shape=static_IR=e256f7acbc71e64cab857fb6378a035096c7ceebdd4f867b5140d35865cf6532_Device=CPU_Config=() +372:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=static_IR=bd927dd60e7b65e84d03c2c01d29c6932961f801bed1312124c2212b5e22a921_Device=CPU_Config=() +372:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=a3370e3b46f385ea6e46137d49d5f1b4158fe08d0a3e9feb47a162f6b3640951_Device=CPU_Config=() +372:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=6daca83f4b162285c00c695825e255cbafce9cf9c9cea68b969a301105475303_Device=CPU_Config=() +372:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=922e194a5ae53e76be5ae624754d3c1fe5ea0d8c564410062bd9c30afc48ffe0_Device=CPU_Config=() +372:conformance/OpImplCheckTest.checkPluginImplementation/Function=Proposal_opset1_Device=CPU_Config=() +371:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=77dbcc61a98e0bf3c1bdcbec543818a8a959751f10b8ec1489b66570ff4e634e_Device=CPU_Config=() +371:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=155b8d9ccf06f4d8f9ada6024fbe66f39e4e6e96917c12d7ac02eac98c5473de_Device=CPU_Config=() +371:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=54009010668252832a2a755d277e9f574fd2486892184caa0eb4774e753ed094_Device=CPU_Config=() +371:conformance_Minimum/ReadIRTest.Inference/Op=Minimum.1_Type=f32_Shape=static_IR=5150e1785d97b052a42873f9e9d23a511027248ff4b13ba7c269c8c3d4639e45_Device=CPU_Config=() +371:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=df085870336c57084e22afa8b52ece7149abc21b5d1784965a7d36d5ada91e8b_Device=CPU_Config=() +371:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=c54189129febdb864ceaa5447a7a0011c8ccdf3711fcfd87424feca61b44c0b6_Device=CPU_Config=() +371:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i32_Shape=static_IR=681b1f284fb69c16681d3efd2081d7f812496e3a027baef35a75bb0aeb9c003b_Device=CPU_Config=() +370:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e23a8faab46e1096894a906794325ff1a8c6001d3b980aa809088385675c77ed_Device=CPU_Config=() +370:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=1366ff72dd5b68a3faf25de8f98e4ac5500663b1aac4941af11532ea2ee769d3_Device=CPU_Config=() +370:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.1_Type=i64_Shape=static_IR=1c06ff77487507dddcddf290d75d4812bfc8a7b2c9bc78176da5212eab029966_Device=CPU_Config=() +370:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=360205b273a323d2cea16c9ac98847c904ed6cabb2412d3b49c27fd2eec52ab1_Device=CPU_Config=() +370:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=651e5fbc222577151cf14e9c8e9bdf9e155f1e0d277206887160d65b532caf53_Device=CPU_Config=() +370:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2fda32f5fe8957d151306845ffd0f877b2efad70f7bd4921fab2fd770d78c2a8_Device=CPU_Config=() +370:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=1988b645a87be14c17740085aa8c4a38e88cd2111f0ba294f77ed0bf856b0561_Device=CPU_Config=() +369:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=35d15ad61ee34c17abe50c4a67e568c2e253712c2d63cb828b0bccdb2175a6bf_Device=CPU_Config=() +369:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=92ed2f40e1ecbb9a90904cfe8e8ceda94f73154a44ac28a50c0d7acb221e8835_Device=CPU_Config=() +369:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=53da49d7aaa81cbb7c3a3dbc8ea938bbffabda14bd106fa6c2b6abe244ba5cda_Device=CPU_Config=() +369:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i64_Shape=static_IR=a6722b8718b7c028e1bbde4462945c096dfc551775af27bcc7d00967d7d73919_Device=CPU_Config=() +369:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=0a2311ddc09b949cceb73fd0e09bbdcc2932c2635fee3a2c411bec27a30e9439_Device=CPU_Config=() +369:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d932ccb58823509e768be954dc85ef1162d9456db17138d650a2a883e31b99ed_Device=CPU_Config=() +369:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=86a9c310cdd99d5c2fc0b7239080c9cff89efd37662cb38da28bc9e2a1471d7a_Device=CPU_Config=() +369:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=73623637f6155bde0a4735dcd904e5b491d7d459bef5f8d3f66f02f9558937a1_Device=CPU_Config=() +369:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=f64585bfa3951a93f76c18fbc795f3ef82176e270c9f37161bdfe48e094c1d39_Device=CPU_Config=() +368:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=96117baf3ff208c696a9796404eec467b613c37977067ff0cc62e39355856d30_Device=CPU_Config=() +368:conformance_HardSigmoid/ReadIRTest.QueryModel/Op=HardSigmoid.1_Type=f32_Shape=static_IR=cf9b80bd696164fc7c8f077eb532fffc455eaf648589c54943cd1b5668e2f077_Device=CPU_Config=() +368:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=154d7358887845b8f2a661e79ef57318fa9499ee5c19b7cae461b6f798c57b36_Device=CPU_Config=() +367:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=8919e05ab2b0d545cabc2e2732828fa693c8f364e9d4d03faf7097f787d4f628_Device=CPU_Config=() +367:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=14598e053d7dee616de43f2b160e780b4bb53decaea53b31db58341464b82e48_Device=CPU_Config=() +367:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=0d782801290370c7c390ad549171ec3500ab344b8b34ce4b8fd8b05339fe5557_Device=CPU_Config=() +366:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f977fc239a0230860702f8c1971bd424f10b978bb03937668c37edee6777f12b_Device=CPU_Config=() +366:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=920aa0d732c7ace2bcfe73df0e7217e66b6388dce554ef827efa96f4e7d31a2f_Device=CPU_Config=() +366:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=10c19142631a9ac6d8026ec82820aa75ba1e14605fe5ea1e017fa4bde4a90c44_Device=CPU_Config=() +366:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=29a633b64671e28103c44b79ec5c329118c0d7c4f70466ad44482116aa2a3b6c_Device=CPU_Config=() +366:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=e8c2981885674129fedb6fc6a376f3fd3db7bf6f9867ee8a3f4e5aede63ee168_Device=CPU_Config=() +366:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1c73b4d05053065f5c37954586376ae4e1cf9e220959363b7c2cb381f489bee0_Device=CPU_Config=() +366:conformance/OpImplCheckTest.checkPluginImplementation/Function=LSTMSequence_opset5_Device=CPU_Config=() +365:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=63ba45deb14e56e09574bd3694e3d94caf6ab09f67f5278e6c299c6c924a3cf2_Device=CPU_Config=() +365:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e4523b73661dc593224b91713f8f20f1c87513a62e3b8ee8265e1136eb74f9ed_Device=CPU_Config=() +365:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=d25e26d9a54a5dc9799e9881e3035bfd5f125d12ea6cb69fb1eb0166e29ec88d_Device=CPU_Config=() +365:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=b4f42a7d1252f2dd02b31ac7b0cf4ffcbd452dbf0e508833e7dc709ee04889c3_Device=CPU_Config=() +365:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=120cc30794fe9c9d59dc9df6fadbb9791f3a6b99e4b9fdc06c5e01f494b05780_Device=CPU_Config=() +365:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=i64_Shape=dynamic_IR=502fbd3f8c0e9c0a9523269a9df9b0fbd83d59ca94f373fd543048429a957f5c_Device=CPU_Config=() +365:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=dynamic_IR=502fbd3f8c0e9c0a9523269a9df9b0fbd83d59ca94f373fd543048429a957f5c_Device=CPU_Config=() +365:conformance_Loop/ReadIRTest.QueryModel/Op=Loop.5_Type=f32_Shape=static_IR=35c61b2251b78ad9f9804bd3f9e301e1f974c6dc138ce0466b8b940d106ddd72_Device=CPU_Config=() +365:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3a17c045930ed967b45d1606b78fdc92e736731b198465e95ed7268d99eed246_Device=CPU_Config=() +365:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0ce1ec496e5d71728fc5daaba87809c5922406a65e85823913381de0d2112e01_Device=CPU_Config=() +365:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=3209c1cce78c7b856203c0a5676f6fad4d098a3146c7305ee3c0471b3be2e3d5_Device=CPU_Config=() +365:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=e05af92d21ebd869cf6e9554a4aa0bfc60c8b0c64baebee798f0be5a0a01019e_Device=CPU_Config=() +365:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=1a0639f04a426db13dd7cfac918ec6e2254e1cb8f18e0853e3bd597cdf090421_Device=CPU_Config=() +364:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=dynamic_IR=9feb072b58552898ff80a05dffe8f39c880b4f2a2382d56cb24a78e278ea1756_Device=CPU_Config=() +364:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=static_IR=b91a183b8c36d6e8358dad7056638b8091005393dd1ee6813728f25cd3e6a9f5_Device=CPU_Config=() +364:conformance_LRN/ReadIRTest.ImportExport/Op=LRN.1_Type=f32_Shape=static_IR=c1a0f6661ad306b82e66063988835c1a17072608792f3423bb058fe38c4b14d1_Device=CPU_Config=() +364:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=fdb744ee1deeced50395d992d949989a5e8bac5d4f73a6d4b51a56f22359f4f1_Device=CPU_Config=() +364:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=58cd9ea3d8db317b6ff7fca55bebcbc6846aebdbe309b1b621f5535b18a70320_Device=CPU_Config=() +364:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=99720c46a11f3e84906fd9327f25b187f328c6910868ac89738bc67ce0d90b64_Device=CPU_Config=() +364:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=f8662769a2f3a5fb20582ccbb1931b7e3fa73ec7713eca30362b0e7c0baf829a_Device=CPU_Config=() +364:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=2e70eb484f4bac4cd11e9f643d2531cd0e78994af07c015183edf9d62a709d47_Device=CPU_Config=() +364:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=00b85178c2e7f891c89e99a6692b94a56ab0882f4a30167997e104db1429a9c9_Device=CPU_Config=() +364:conformance/OpImplCheckTest.checkPluginImplementation/Function=GridSample_opset9_Device=CPU_Config=() +363:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a3e1aaa7054d4d046bab4614737c931b25a574051a2f8b79799aaf6fbbd2c2e3_Device=CPU_Config=() +363:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=822cfb44c999b67217c8fff1da18293fcbd3a8a71d901d95991ad6df22398af2_Device=CPU_Config=() +363:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=db5c391cca1031cb4ec32def18ce3a4776c53f71e861c39b350fe5856da4fa43_Device=CPU_Config=() +363:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=dbabd4c2992053ca70e9d6a489b437cf8d1f13807220adb5054204e9bede00e1_Device=CPU_Config=() +363:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=165dc8f683138c4d731ee850aa6212a70851b91630cc42e2b4e9d46e0ab15b57_Device=CPU_Config=() +363:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=851aa3cf931a01e0188758055b866fd14280bc344f548da6166e4a57ca7c9254_Device=CPU_Config=() +363:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4fb0809c5cf2945a097d18f445de6f4f5cd2c124cdb495e6f0a12e9d937e2b80_Device=CPU_Config=() +363:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=d442b2d9df68f25f567a3e8da8d87866c200d391624cf1c339554a57a9a527a4_Device=CPU_Config=() +362:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e2734d3e803c031e2fd56d0c9f7a72818227bc7981d9f7d9d1148f1cf07135fa_Device=CPU_Config=() +362:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=22707f4dd48a39013f543e7eea951a8feb16952bb25f9dd34a0f05dcc28883f6_Device=CPU_Config=() +362:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=1696523c5dd3a701251583b9c9f29e43f852383cec3dde5a93e6f7f7cabf3398_Device=CPU_Config=() +362:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=abd733caa05592feccf41344f138de6625efce4afe605efeea57e0748d7b2e07_Device=CPU_Config=() +362:conformance_HSigmoid/ReadIRTest.Inference/Op=HSigmoid.5_Type=f32_Shape=static_IR=85df90c3ae7b84d89ec4eae30556ebf4af996c318afa45d90dbb219f73033f31_Device=CPU_Config=() +362:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ce2bcc21fba106cc8be4846179a73cb30f650e7ec48d443fed591f6b479fa9d1_Device=CPU_Config=() +362:conformance_Abs/ReadIRTest.ImportExport/Op=Abs.1_Type=f32_Shape=static_IR=5713be8dd761def00c701c74d0aa913d259206eff1103b9fa6de0f6f1a25e566_Device=CPU_Config=() +361:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=949611ba6617b054b828175c04452b8fcbd109c99cb25d5d8827a872b4044fd3_Device=CPU_Config=() +361:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=68c3856ae6a30752004a5ebfabb93bd0d98446a91ba7fd84e686226f45d326b9_Device=CPU_Config=() +361:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=576ef0e9eaf8fefade547928d4592bc2b341ff1100c3de5104f0a63b2fbeeca0_Device=CPU_Config=() +361:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=49245e23b8c1c485428d0e490a687e48c541bfb833eb7838efd8c112736a076d_Device=CPU_Config=() +361:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2ce1f8773e871f8aed0d3541cfafba0bb079e1765f04c1336af8a47f354cd766_Device=CPU_Config=() +361:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=9281a7e3ea8124fdbe416d1f15434752a7e799fc77a63be64babddf60b6f2d8b_Device=CPU_Config=() +361:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=7685da6dcf91a208b72d5961c2c248d816de501366163d61b1ee3c148787fe77_Device=CPU_Config=() +361:conformance_ReduceProd/ReadIRTest.QueryModel/Op=ReduceProd.1_Type=i64_Shape=static_IR=44e0e688ecb44d7a9e83f7c9e1639fae49b2883dfc1b1ed588c98c5bd1f614fe_Device=CPU_Config=() +361:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d7ce9fd9d99a7ce9ebb5fdadb4db39f4ea66f74788704b2b9f96660c7403c031_Device=CPU_Config=() +361:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=dynamic_IR=7562536120d473cca837bb2ad1e3969484868111954ac0b168a5c2805264a689_Device=CPU_Config=() +361:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i64_Shape=static_IR=b92112b2ea2f233a6fb6ee512363082a49db0f85ab23f89dc29ad907e6ab408f_Device=CPU_Config=() +360:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=93dee798d72e36c04cf60499e95f84cd6b63d84226d7dd1dc0edcf0875cf301f_Device=CPU_Config=() +360:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=dcfe0aa2fab0afc3b370be59184a5e59c7bc0e8b2930bb671d1d6b38f55234ea_Device=CPU_Config=() +360:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=bec81407211db6e10d7c8811bc58b53c23c8aafa0e2083f262204f345b9bcfc6_Device=CPU_Config=() +360:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=49ed5fbacb5510d9cb3970dee136271e98ad5322b95217c6dc41026e583f3bcc_Device=CPU_Config=() +360:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=c30414e8e845d75527c26f62880518cc4d24c1a528b20cefc3b2c32be7436c81_Device=CPU_Config=() +360:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=f32_Shape=dynamic_IR=c838ac42d5464130a9049a63f7020166b34e2ef974c257a4060fa02c3b70ff76_Device=CPU_Config=() +360:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=54009010668252832a2a755d277e9f574fd2486892184caa0eb4774e753ed094_Device=CPU_Config=() +360:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=e1a894c49294c6930cb8f8c857ec745fa2c6d18cc3607389c89af4d13df4e411_Device=CPU_Config=() +360:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=94de295ab12bd6b03bc5de22f9e9c46d5875d111eb942d3ba35f8e2456ece1cd_Device=CPU_Config=() +360:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=a9b0552d84d057a656080c8e302afa30962dc02105abe7136cfd77f0433eec18_Device=CPU_Config=() +360:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=7b8d235013affb9589d57a8f99b36858d739258b787cffc7cec85d1dca567261_Device=CPU_Config=() +360:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=dynamic_IR=70c260fea7c5ff6d2d1e9580ecf6c6a8a26c0e688b4f8dc4540888526bc13e76_Device=CPU_Config=() +360:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=3e016da7faeea7044ea204d1c3a2f1729d3d7ef0be27f5b769484bc7aebea5ab_Device=CPU_Config=() +360:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i32_Shape=static_IR=cd4d566c041357cdd7f8539933888956fff5cfd15e3c42872df59d9890c169b3_Device=CPU_Config=() +359:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=12e571ef61251520c35bd8c0429b1ee71277033ae88101f08dd769a300d86c5c_Device=CPU_Config=() +359:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8fa841d409e36b6665e289f4963330eaff4124d5452c93b75d779937cabe14d8_Device=CPU_Config=() +359:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0a2b1efb810d1dcf7897c3671f1eef0c36bcdca679e24b8e86f078128b381833_Device=CPU_Config=() +359:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=319f74dd5b7a959d0e5443c76051fa5958463cd18ec11c275ef92b77321bb93c_Device=CPU_Config=() +359:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=6a8fb5f2948de2436a33999ee2a01e239193c268f61634f1e80692b0c45aa3da_Device=CPU_Config=() +359:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=0b603f9cb63e722122080ea36f76fe45b25da83b0b1e213871140e82dea5f405_Device=CPU_Config=() +359:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=939b665ae35f9a384e3119dc3bdc1904b105de495d262648282c859b0cb4c9e3_Device=CPU_Config=() +359:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=33199e062b7b59c8244477fd2682a08876e72126701842265efc0c9fb4a90c94_Device=CPU_Config=() +359:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=34404b0d0f956acd1827f417b44bc1cf1a085d6517d5d81a6c6d38ee27c745e7_Device=CPU_Config=() +358:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=539610c21b2839b71cfecbb15b7b7145f9fee8bfef8ed9e1d73aaad2de661496_Device=CPU_Config=() +358:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i32_Shape=dynamic_IR=7f37f31081476643f5c279fddc3d25eae22d909730b4aca0211aa70fdd572843_Device=CPU_Config=() +358:conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i64_Shape=dynamic_IR=0d660483dfd9c9975f102d300ec98da49785fcb6484b379c45df8a61e1292797_Device=CPU_Config=() +358:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=704412b992d55bf9ff00d823458e5d3b3a369e47b3eca3429fed94b87c8da554_Device=CPU_Config=() +358:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=8ec74565f16a2ee1e322b4549ea19aa0b30719787abd90bd957e121705edb268_Device=CPU_Config=() +358:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=9c6d5cdaf19c92d1f994e4ae6cfdecf5a9ff04e47a2e0e68f3a08ec8f6e74479_Device=CPU_Config=() +358:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=f826a40d2d5d59d35300a911a15dfd8022c0fc486ecdc7f00c06a26f5dc44338_Device=CPU_Config=() +357:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=fffd24bb56af50d2e56fb2abdc6c0c96fceb21f00a9a1556b3890bdc50840352_Device=CPU_Config=() +357:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=3dcf824c36c868d06d715e3fe24587c31eb7cad18ae9f9e044c7f6abfd261651_Device=CPU_Config=() +357:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=356e2a728749d3970a85939d23344315d0ff533567c35a559caa3bef173b76f7_Device=CPU_Config=() +357:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=2ef8d38ce64fd0460d641e6f3bfcb1654bbe3d2c25f9dd244ae259eaa4b6941b_Device=CPU_Config=() +357:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=b6669eb568f36e5d649ae67afdecaa481064561d7a71f1aab592968aca7d8bb0_Device=CPU_Config=() +357:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=c843b49e26b9be555df454d4c63f0bff72e6ce29d3ae80e9193741500b08f424_Device=CPU_Config=() +357:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=5beb9762188e985c9554ffb0a05fdc1608fb7d970baacebbbd7118186a324617_Device=CPU_Config=() +357:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f28013382ca254b4538a5527896cdfcd9d404aa854af83ef1d417abcdd781ef5_Device=CPU_Config=() +357:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=674e2446a2f5929d12d36f14451d68e7b55ad61d2d8df755e85c27c4a52943e3_Device=CPU_Config=() +357:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=9b915f1788d3d4768839d2cefe4fbba2f8b2d8aa4c22f9ad574335c22d0db1a2_Device=CPU_Config=() +357:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=dynamic_IR=edf223c654667e60869d97d2fb6a2bdf356db8d7e997b4b9a66e56445bc24f30_Device=CPU_Config=() +356:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ca3d0cbccca665493e85a757798ab5e12399ad295466cea744c7a2d278c86c97_Device=CPU_Config=() +356:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f208ab78a0ef0497856952f499578a17818269d066f4281183ef92ac2f9ce449_Device=CPU_Config=() +356:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5d738cba54fcfd696b0cb7e808dd466b4510900ccba26c728b5eb272a55d6bab_Device=CPU_Config=() +356:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=5995707c0c9656ffe179147e29d03df5a35286481a4140b7ef019434d83aaa61_Device=CPU_Config=() +356:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=451a3b635d3882a87cc0d7b3f6f74197c08b708669751bb11fef93da9604e276_Device=CPU_Config=() +356:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=f286960ead5b83e3e4015ee5751b114a9d70e90aa788e0fb004ac50b95a8fa2d_Device=CPU_Config=() +356:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=2c114b0035075d866c028f9a1168725375feac9a666a881ae6b7db6e9066bb3f_Device=CPU_Config=() +356:conformance/OpImplCheckTest.checkPluginImplementation/Function=Sin_opset1_Device=CPU_Config=() +355:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=24920893b72e3bdf88b7e4142d1dd9ae0a679f686a3b187bf740f014d04b9ade_Device=CPU_Config=() +355:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8ad9f9e5cb26eb922d7d7d80f93be2e9d3a5ef344a013c9dd546df2ef195ec24_Device=CPU_Config=() +355:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=879bb4767167c3e9c45eacd08a14fb7e01b072864013784f924d62aad7b37c56_Device=CPU_Config=() +355:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=df97761393479b4c56cc923a2b89888b7c3fb949f5c3a93f4bba0ac8a44178aa_Device=CPU_Config=() +355:conformance_Tanh/ReadIRTest.Inference/Op=Tanh.1_Type=f32_Shape=static_IR=7065a836f4fd77a07431ecff6bcc591ef9b0160cb5366a8f3c8b8fe5f83f7be1_Device=CPU_Config=() +355:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=1d7cabddc96cb4ca2ed111c9f7a9c31b76ed9a052fd0b79db6bdc8fc55f24a4b_Device=CPU_Config=() +355:conformance_ReverseSequence/ReadIRTest.QueryModel/Op=ReverseSequence.1_Type=f32_Shape=static_IR=1ff07d9b87513cab5bbcf5059507b8c998fdb25d2802b267bb6c0b90eb3e231d_Device=CPU_Config=() +355:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=9bb8728e6e9f68cf68a9e39d1aa4c618c4aca4187d4262f735c0647d680c0506_Device=CPU_Config=() +355:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=dynamic_IR=e306da3fedc4369302fb21159f2bbbe65849661eabe5bb83efdad3e83f64fd68_Device=CPU_Config=() +354:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8f622d39d560109549e99d37f3c9cb476f4d69e8525e7a0ad8fce6fe79a6f982_Device=CPU_Config=() +354:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=ade98724a678a73bf789fc539dfa277031242ea3a694227dae29c11b45cdfb9e_Device=CPU_Config=() +354:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=static_IR=7b702f91c21af6c336654c924011d0f4d149111c503c697fcb85a83cd60b7ab7_Device=CPU_Config=() +354:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=05c2f891e743416ad510bf0ebf713738bd41258123cc4bbdc5cf067f251e35d8_Device=CPU_Config=() +354:conformance_ScatterUpdate/ReadIRTest.Inference/Op=ScatterUpdate.3_Type=f32_Shape=static_IR=537f04d52049add01923acd0c57cee03462926f9ce213a4fc9774496f5f66398_Device=CPU_Config=() +354:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=817b3db8f96297276bc70f1b4854867cb92c164925c9dce59a1d054e3c315bee_Device=CPU_Config=() +354:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=08393711dca608a5beec54493fa162068673eb746a6223b6dab2640d411570c0_Device=CPU_Config=() +354:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=bd927dd60e7b65e84d03c2c01d29c6932961f801bed1312124c2212b5e22a921_Device=CPU_Config=() +354:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=1b13b40884ddc8a2afdfc9bf351627746534303122dd4e0c2c5fdeace9e89e7c_Device=CPU_Config=() +354:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=3ca9994321c7492af9bff158852a484636638e711ae39a6acb66d273f696906e_Device=CPU_Config=() +354:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=boolean_Shape=static_IR=0702c04c1d16f65b7d552044e66732886a0b389702aa43f4c845e2460ddff1c4_Device=CPU_Config=() +354:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i32_Shape=static_IR=a7f6c704686f1b0e6fd4ab522930aa3fb5b4cd4683b204aa31e5c73b427e7058_Device=CPU_Config=() +354:conformance/OpImplCheckTest.checkPluginImplementation/Function=Ceiling_opset1_Device=CPU_Config=() +353:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8b55c14423b60f30029c68c603417fb98119c5922e2827c60c99edc05ea813e1_Device=CPU_Config=() +353:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=822cfb44c999b67217c8fff1da18293fcbd3a8a71d901d95991ad6df22398af2_Device=CPU_Config=() +353:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ca72f78cc4db6d46ce969f61c5bf707507ed8204785159e1ac5130e7aa251858_Device=CPU_Config=() +353:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=7b9883414482f3b1108e549a9c47bb8a8aa162d962813c7e99411d000e02690e_Device=CPU_Config=() +353:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=b288dfcaf8fd8fefe24212a70255bb280e7e695badf6fad6538042701d77073e_Device=CPU_Config=() +353:conformance_Select/ReadIRTest.QueryModel/Op=Select.1_Type=f32_Shape=static_IR=da15c9ddbf446de00565c83e95b8a554d400b8b925481e56eb3df41f7efe26d9_Device=CPU_Config=() +353:conformance_ScatterUpdate/ReadIRTest.ImportExport/Op=ScatterUpdate.3_Type=f32_Shape=static_IR=537f04d52049add01923acd0c57cee03462926f9ce213a4fc9774496f5f66398_Device=CPU_Config=() +353:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=i64_Shape=static_IR=2e3f53e7b949e1dd0ab38890b0c9fc9e770dfb68569e37fa5cdd4e3ef03d6eb0_Device=CPU_Config=() +353:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=ee49657e646466b0c22aff01740a48c1cc271a828a8c3e10a21d75b04f511cb1_Device=CPU_Config=() +353:conformance_LogSoftmax/ReadIRTest.Inference/Op=LogSoftmax.5_Type=f32_Shape=dynamic_IR=a3f02c85607891ecc34c484b433c6a78333e13f3d8cd231e651f8bec26e7d0ce_Device=CPU_Config=() +353:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=db85fabcfcf049a7225468036e29c949eb779253ba145485205596e72cb8cc7e_Device=CPU_Config=() +353:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3e2e16f3ba7681bebb6b4c06788f38a40fe24e26fa3ec3accd756c87bee7d62f_Device=CPU_Config=() +353:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=4fe6c9c924477957512c3d32086ca167fe5a4ddd5cd1b90d5d32452f6de8317e_Device=CPU_Config=() +353:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=99fbf009fb26eae6bfc372a5b3d9bef89d6f82e5fa45c62cc5ece995bcc71079_Device=CPU_Config=() +353:conformance/OpImplCheckTest.checkPluginImplementation/Function=GroupConvolutionBackpropData_opset1_Device=CPU_Config=() +352:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=94693638ec4742dea16dc168eb9323995f1b2a35a53f577cf58ac3a08096892d_Device=CPU_Config=() +352:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=478861c92198ee8679e3e43476abfe79906c4ead6ee80af975af365829822025_Device=CPU_Config=() +352:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=d6be1de020bbe6277d8cacd77eece21f766e5e39badb520ef29e880d52e3604b_Device=CPU_Config=() +352:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=055b7eb16539ce5cee62e165db9a6d51a11e0bdf90bc9f82eeca1f2faac2bf89_Device=CPU_Config=() +352:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=776018866cd0a06171706794dcd0d7bb13b5960fd98a66b306ecfac7595feec9_Device=CPU_Config=() +352:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=40876e66f31053b621aea004baaba7607b9131d4fff8e8b00ed7e1e58204988c_Device=CPU_Config=() +352:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=02f589480d24784ece323ba30be856c7cc718151d3588f683ef4825a407749ac_Device=CPU_Config=() +352:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=18d294f76a0d8f4562d85033a45aaa3f2d50fdfd9d9f30e295a772fd10540d25_Device=CPU_Config=() +352:conformance/OpImplCheckTest.checkPluginImplementation/Function=Loop_opset5_Device=CPU_Config=() +351:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c80a104f5912c91154ff9731be5aaf1ce189988eb9689ebc32cf4bb8f1307615_Device=CPU_Config=() +351:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=e7b766e89f08e80fd96ba40dac738561546ca7210c4566b727ca8cb49528c823_Device=CPU_Config=() +351:conformance_Mish/ReadIRTest.ImportExport/Op=Mish.4_Type=f32_Shape=static_IR=64374638dfe8bed8e9432c51d92d23b807172fc490c0dfc76428f2c49be92400_Device=CPU_Config=() +351:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=185e849a9d8fec26bd81b2098d63bd842d34dc7a8ee7e47086a208e4b8bd9298_Device=CPU_Config=() +351:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=335e78458fe959fc5a9669069890bcc67c1f1eabf21dbfb6011cc80b8322e9c0_Device=CPU_Config=() +351:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=0da39d97a2f46fcbdf524727d0283243d3bf0c3fab75f76f529b6480c84f67c1_Device=CPU_Config=() +351:conformance_Ceiling/ReadIRTest.Inference/Op=Ceiling.1_Type=f32_Shape=static_IR=1484c3d0a5a8b6d1daa002e27b07bb8ba0b5d83aae50b0a3b3bea08483815d55_Device=CPU_Config=() +351:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=29624e785b9377dbf03b9aae46e7d0049e93a94655059ec37a0fe308ff7cb9a3_Device=CPU_Config=() +350:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9ea20be5797b5ab937555c69751a5be584c73a191b3fe3d6fb96a5665e26fcbb_Device=CPU_Config=() +350:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=f32_Shape=static_IR=d2759b52de5dc9f1fa494c243d08ac40cf4e877c51323d53dbfa02abc1564e45_Device=CPU_Config=() +350:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=41c94561e79611e27aaf339205962d4967188b385d68c169b2bf4557173005d7_Device=CPU_Config=() +350:conformance_ROIPooling/ReadIRTest.QueryModel/Op=ROIPooling.2_Type=f32_Shape=static_IR=baa256d53878b528f6bdba95bf1837cc570dd83b577220f95d9c24cb26d37c35_Device=CPU_Config=() +350:conformance_Proposal/ReadIRTest.Inference/Op=Proposal.4_Type=f32_Shape=static_IR=b169d6330e4006909e4deaaf78b03e789ccd9538c5b59d9d41e05f878bb60704_Device=CPU_Config=() +350:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=0db5765bcfeb7716699abd0cee850918cf5ef18e2cfdf1614b463734ca35a20f_Device=CPU_Config=() +350:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=e020cc29b6ec76cfac0e0b52ed3024458fbeb567c4fe9932eb5257e3ade79b95_Device=CPU_Config=() +350:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=962d8a421369e4dac96b6d89d05053f63c9e5fc8b7b82a60c922432125da80c0_Device=CPU_Config=() +350:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=6faa91bd8e7037c9233825cde9313cfd2afafa21ff423a00544eaa36d734332e_Device=CPU_Config=() +350:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_Shape=static_IR=d5cd3fb647dd4a57feb28366d922a151a3ffb1707864f2ac85595fcc30f222be_Device=CPU_Config=() +349:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=883597c2c4e004b0ec1e1ca8d1b75395c714fc6a99cd31e35ca0597d0ccd8f8f_Device=CPU_Config=() +349:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=bbe05f014b2e4602f4e44d9c07795321404d2459bf782d2dd406de14bd2bd523_Device=CPU_Config=() +349:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=i32_Shape=static_IR=f777fb31e1669cd58cc77e2a04c3f9a804b654b6d710432641a3dc34504460b4_Device=CPU_Config=() +349:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=static_IR=564cd54b2564c7e39fda0c5e580c274b7bf99603760f6c66f03b4450f23cc4bf_Device=CPU_Config=() +349:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=22707f4dd48a39013f543e7eea951a8feb16952bb25f9dd34a0f05dcc28883f6_Device=CPU_Config=() +349:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=6837cea94eff6256c3c29807532662e123ccbffde1fcb6f75875d65aa7124a4b_Device=CPU_Config=() +349:conformance_LSTMSequence/ReadIRTest.QueryModel/Op=LSTMSequence.5_Type=f32_Shape=static_IR=1f24aeeef6f9f91272546fca89299c1ce448b0008fe43905db434ae3f28a75d0_Device=CPU_Config=() +349:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=aaafa4ff22a5fcab1e6e0f48065210ff790275fba7a5c16602aa4a00951a8cb8_Device=CPU_Config=() +349:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6e508ca44667fb311f5b6d634584d2751c3fb15fc034626765c90695b7de9619_Device=CPU_Config=() +348:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c90ac17f02f16c647a0a206326f24ac348a0f8a7787037486e52ecc8c091818e_Device=CPU_Config=() +348:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=259cf71b937e6d184948130afa5684d7539769988cee7a74b06138ad4d09c689_Device=CPU_Config=() +348:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=b7aebb27d8d2b43e770ade887778c291072210b947b77b1b92e05d3327843977_Device=CPU_Config=() +348:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i32_Shape=static_IR=b4f42a7d1252f2dd02b31ac7b0cf4ffcbd452dbf0e508833e7dc709ee04889c3_Device=CPU_Config=() +348:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=01d609bdfca9f2a499a564f66ab9dd71b394310593d27b8739283b19980e2dc2_Device=CPU_Config=() +348:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=e7ab5b597681da2db03c13a2424b4e0a62135eecfb2f97f4c59b53331afb7f85_Device=CPU_Config=() +348:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=01b095b8763565527be0de9edff565070949485db907493e99e95c2cddf6abaf_Device=CPU_Config=() +348:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=ff96b044b0064dcc13dc7c1d80f2b2cddde0ead8c4501d5d741034833079d47b_Device=CPU_Config=() +348:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=d507892556310f7fe85cbf9245ddf040b219ec8cfe9c779809180a011caab9d6_Device=CPU_Config=() +348:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=de01a0d560bebb0018927f02409922427ef35b59a96f0aef8f18991ee0d9542a_Device=CPU_Config=() +348:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1c9d69e1a85d03b8599961a8a1b90af7b3b2d43bc5c4f4a6b8d5da3c22166abd_Device=CPU_Config=() +348:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=77d771a04d8618bf32943e460b714076f7bbc34cd1d40f9a90864af976bea30e_Device=CPU_Config=() +348:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i32_Shape=static_IR=6e614b9877c6dd3bf1ebd731443e5a1e0b7492edbc3a4683adcff53c965ca1bb_Device=CPU_Config=() +348:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=486675b6412030beffb4209c326672af07d343d5e1bbca31b9bfeed3cc339e3d_Device=CPU_Config=() +348:conformance/OpImplCheckTest.checkPluginImplementation/Function=DFT_opset7_Device=CPU_Config=() +347:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f83f2d7d9c08aaf30635b39b51c0d7f1f622b4624da59c6cbcdf28d42470f11d_Device=CPU_Config=() +347:conformance_Tanh/ReadIRTest.QueryModel/Op=Tanh.1_Type=f32_Shape=static_IR=2b026a0d21a35251b07099e31ec58c459b848602575d2afa67e55830e8f3f411_Device=CPU_Config=() +347:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=a5dc3f8dd6385eb7f6d4052af82e27b7af7e8a58bdcb6092ec79ea3087f141c6_Device=CPU_Config=() +347:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=08b46b9b2881764fde87811d2462a361d75c30fcec74f631f116f010953daced_Device=CPU_Config=() +347:conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_Shape=static_IR=e939c4d2a27e1d7dba93827ab807881c32e47d48b726fec701712bc85c3404a8_Device=CPU_Config=() +347:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=static_IR=725aaeceedd7eba9be6ba4203e31cead733ed80dbafc33e902465d4338dc8f4c_Device=CPU_Config=() +347:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=f0edc45979b98d4401eea2c345bbcb794721dd3cdbfb3963be5a2842b27ccc5b_Device=CPU_Config=() +347:conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_Shape=static_IR=98546b7eda390c30f82053a093b5e3855c6dc8c631451b3637eadf95858af2bb_Device=CPU_Config=() +347:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e7e10785757d3131ebc375ebfd83c556e2c34a72be20965d9dd3e4f24a5ee2f9_Device=CPU_Config=() +347:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a1e0bbe02c433cb144b4825a9f1b2c30c03743f210830db5462736850b6db383_Device=CPU_Config=() +347:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=14108fac0139d5bb39f6b2106857e1ac91c8d44ef9156e4e0873facf9d932316_Device=CPU_Config=() +347:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=547fea7da34d5e65ad7ea069be003753e9ef281110c80dde11520bc350c4ca14_Device=CPU_Config=() +347:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=3c1c8bc7ce009c03509ca9d6a86f3d5cff89be49439e7513edcde4e62fbfb8ce_Device=CPU_Config=() +347:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=b005a58abf8192face35451602a847d378849223e4d433924581d28ef8141303_Device=CPU_Config=() +346:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=64358a022d0c072ff89427a2f3acd3a3afb49b8f76e57353eb95962fd2572ca9_Device=CPU_Config=() +346:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=42afa027ada245d36900a89c54a870ba5fc7fe3cc3bc0fc7dbda23af3e5111d8_Device=CPU_Config=() +346:conformance_HSigmoid/ReadIRTest.Inference/Op=HSigmoid.5_Type=f32_Shape=static_IR=cc18959ba7c26661ba0f986207bd00aca503bf924b31c4a2070ac40ac3ec5468_Device=CPU_Config=() +346:conformance_FakeQuantize/ReadIRTest.Inference/Op=FakeQuantize.1_Type=f32_Shape=static_IR=48256cdbf5a3d19f0b7bb6b0540cbd664a36885a88fa8f5f56da7057de97a608_Device=CPU_Config=() +346:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=8b8121ebbd51ee995f98531f595145a01ba70ce026ad0bee588733c33e70272d_Device=CPU_Config=() +346:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i64_Shape=static_IR=7e1801bf4ef7ad1b27663dfb399f318ccb2526e925d48e3d30e2ab837824b217_Device=CPU_Config=() +345:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=2c5e9a1cd59ec2d5786132697bfcb1519a7857cdfe06038bb39abed39c09e9a2_Device=CPU_Config=() +345:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=48a273073ced3efa39d01e5ce40c30b2901e8a3dff0b414911282b8fdfc0b09f_Device=CPU_Config=() +345:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=f45b24f3bf21a2c94bc89cdc3d20c283d47f4e6ea386444897330e232bd7d90f_Device=CPU_Config=() +345:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=6f2159bf315f062962fe87512c15ed5cacf09f898397a92b690c32caf147e50e_Device=CPU_Config=() +345:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d7e3ea8c5ea46f1b0430b6a2763c85395235c0ac58652e1d269e1257f6dbf7c8_Device=CPU_Config=() +345:conformance_Clamp/ReadIRTest.QueryModel/Op=Clamp.1_Type=f32_Shape=static_IR=cc989fde083332a75d3066112105028a711bdac4fc44463d098022774da752b7_Device=CPU_Config=() +344:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=1457b3d8c7f130113d74f540dfbd2d4062f869018f7b1afb11c743acc0a007b9_Device=CPU_Config=() +344:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=eb966d8fd7e1301280e6ef709dd785d210a35a1346eb88c3f38379bd96036ce4_Device=CPU_Config=() +344:conformance_ShuffleChannels/ReadIRTest.ImportExport/Op=ShuffleChannels.1_Type=f32_Shape=static_IR=46e851dee1f7bead1a6e2459157df33266c45559375a1caff90a2732cacaf881_Device=CPU_Config=() +344:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=a78437a93ab424a706d064188d1bc0971b2e1afc98a74fea979a6f8b99036597_Device=CPU_Config=() +344:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=bb610d59221e7c5d8e96f971519b7ef27bda7bbb9be329b873a901a1e749b9cc_Device=CPU_Config=() +344:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=79e0a530c5a64063a9af26b438f208476e3bbf5a267c28ddded0459019a1d8e1_Device=CPU_Config=() +344:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=e15d2825807b2c7fda150b7b7b4e2c6914fab2d4af4313e959abaff56dffe6d2_Device=CPU_Config=() +344:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1eb25d18fbd1070f2a8ff803d76077d092d493f9e9df80e93e2f58f3621a121f_Device=CPU_Config=() +344:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=09c1f9f81a463477da73d33f00d1321fa5c1f64a9c3c51c6e3c1344e362d4ced_Device=CPU_Config=() +344:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=93ce70e605eb712479090e3a266e86eb7422bf0fdd3acb1c38a0b92a9c381e2c_Device=CPU_Config=() +344:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=5d68272f8318c073e481b5353e6e4350e6b3b5e120f389a98859dbd5af43db9d_Device=CPU_Config=() +343:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=8042d30c9796e8eca03cb2e3651f84b5167204aaf186ad08ad5f74a9b0a26b9d_Device=CPU_Config=() +343:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f977fc239a0230860702f8c1971bd424f10b978bb03937668c37edee6777f12b_Device=CPU_Config=() +343:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=89dcb383b48e2a4423a7c81461f282b74b1d9ab0f48f0a0427cd4c599672f3fb_Device=CPU_Config=() +343:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=bda73cc94d837df9fb535743febd300cf0baf7fdf48ff538c079a4a7ca291592_Device=CPU_Config=() +343:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=i32_Shape=static_IR=c78feba7097eb1c59197840a7e5510c26faeaa51ff724d643dc1f1ec077a6344_Device=CPU_Config=() +343:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=u8_Shape=dynamic_IR=b12ccd794c23494b994608015d049eec0f2ca30dc319bd35c1adddb3e4b8e631_Device=CPU_Config=() +343:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=d8546655166c322e3049ed3a71725c8e89901212007c44c8029ef8379de96db6_Device=CPU_Config=() +343:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=8d472bf25f969c5ab5eb85fb198c2176766a2de7cd444819e8b60d416969e3c4_Device=CPU_Config=() +343:conformance_MVN/ReadIRTest.Inference/Op=MVN.6_Type=f32_Shape=static_IR=ea71ab322d6f3d74b0a7bdc3ff5dfd322f2d8c518a1fb5bc9960c5e04808f28e_Device=CPU_Config=() +343:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=6eb80b60b2162fc469f652535ee11822ae34c903ca44191dc95ad7f9678b9337_Device=CPU_Config=() +343:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=54a5630072fb0e0127611a4ae63db14b7c0fa0979f4d2be7bfec548b5291a0af_Device=CPU_Config=() +343:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i32_Shape=static_IR=681b1f284fb69c16681d3efd2081d7f812496e3a027baef35a75bb0aeb9c003b_Device=CPU_Config=() +343:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=4a64918e1c0c648268ad4a1c2147889b2578b4513693737ec2ea1c7ff81dbc52_Device=CPU_Config=() +343:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=3fec5c6f9e39d8a15d58c5800a889e1660adb375cb7660af1526cd31e69f7cdc_Device=CPU_Config=() +343:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=f826a40d2d5d59d35300a911a15dfd8022c0fc486ecdc7f00c06a26f5dc44338_Device=CPU_Config=() +342:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e250a19bfbe236f81b6715a92beb0c259080e4a5d379ea1187892e8c8d9add8a_Device=CPU_Config=() +342:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=7b42d3a61f732f3639d1ae7011b86158d070acc922308a18f00a01b9c6a60ead_Device=CPU_Config=() +342:conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_Shape=static_IR=936ac30f388261cb12776b5e94062a9b5f7b81aa16c9aa5d8f994b8d69231c40_Device=CPU_Config=() +342:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=static_IR=e1d727df48a0a74d8b9865c00e5c39c9d53a5023d83da3c58f281b6b1411b696_Device=CPU_Config=() +342:conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i64_Shape=static_IR=9402d607ff481567bf322dcea9aa597387a195b9d3756ff46de81c3ac2737a49_Device=CPU_Config=() +342:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=659cd025e440fdc633859089f52f7f38cab5701c63c79d1e8d1837c217b8cf75_Device=CPU_Config=() +342:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=9c63b760d92c46d2ba731cb9edc4cf19a96848e4f3c354797f10a7a1bb9edf8c_Device=CPU_Config=() +342:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f06ff28476f886d4298a83d39f88aff34399d5cd589e0a6d6395e00b0ad96876_Device=CPU_Config=() +342:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=58b9cf97229bd8293e747a47979c3d98261275f9da473dc942b746a06a1fa214_Device=CPU_Config=() +342:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=f0d5131a073c03932316e3f20f40c527ddabafc926f0d10824a96158c03524b8_Device=CPU_Config=() +342:conformance/OpImplCheckTest.checkPluginImplementation/Function=Gather_opset8_Device=CPU_Config=() +341:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d74cf8dde02b582dc1efa697474a50738532e0ce5b40831d81d0852a74a94c79_Device=CPU_Config=() +341:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=95d9789ef78c733e0c7972738bafd4da289a90f0d9ea00bc9452192173390b6f_Device=CPU_Config=() +341:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=f32_Shape=static_IR=fbb53c04f3cfadff9d6543e2fb4eb88d882c3189b4212e77a6ca6e50bdba6e07_Device=CPU_Config=() +341:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=c61a8f259a8b37e49f9267dbc921d88dd60e5766aa30dd05319f423a01c14aee_Device=CPU_Config=() +341:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=1f8a096633b64512b865ea5e4a57529cbf621afedcb873285bd5e24cdb199a46_Device=CPU_Config=() +341:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=50a0e83d438a3220ed14dd8ae783e92c96381f645b10719669054ea944297244_Device=CPU_Config=() +341:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=152333527a542f3e2228bac5d0fd4ed288dde9205632a318b9b22b64e43be329_Device=CPU_Config=() +341:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=9d4ebc5a7d195ea0e566695253c38ac5d02fea1f4fbe97396828ef9f7754808a_Device=CPU_Config=() +341:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i32_Shape=static_IR=349d64660bcbb9269f88067431a4b8fc31fcfd09ffb1afa9f3ecf4bc37e8c4ca_Device=CPU_Config=() +341:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=f61b45eec10f28e255a0f82842384e1c947830dc5d5618bf00c6385cecbab8d5_Device=CPU_Config=() +341:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=1e95665a92aa6efcc7e06d24fbe4cb2afa07d75374cea3ea928658a270ef489b_Device=CPU_Config=() +341:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i64_Shape=dynamic_IR=79cffe28ff617b42488d33b204b0f50bcf4e304c74d2a11820c830e091c6383e_Device=CPU_Config=() +340:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=a16b5a0ea2fc8d89980db21cab743fbf776918ed2ed1f91f2e4d3ad3c304d4a4_Device=CPU_Config=() +340:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=797bfeedb05fe1883757101c44e78eb807ff9c3570aa58b0891172e729d4b384_Device=CPU_Config=() +340:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=dynamic_IR=b0376bbdfc6560184c2eb15a9cff7fc6d6b39c47dd22936fb64629d345e227d0_Device=CPU_Config=() +340:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=u8_Shape=dynamic_IR=5de1e1eb337f4eff857dccbc075ec7079425a50de3096d4f81d25f0118acc6fd_Device=CPU_Config=() +340:conformance_Round/ReadIRTest.QueryModel/Op=Round.5_Type=f32_Shape=static_IR=f4cc9554ddbd189f18575e3a80afe6e8f8bce613dc8852a48d4171ab6916e087_Device=CPU_Config=() +340:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=boolean_Shape=static_IR=6d34694c9c8e71415be894a80a8ededc6a83657c6e7ce3aaf66dcd6f9ab99226_Device=CPU_Config=() +340:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=2a8596cca11141e34e75c884b1be9a75be19663caf4c0b1b4275f6035a73d62e_Device=CPU_Config=() +340:conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_Shape=static_IR=c0884ce897724cace24b30df395a33443364f8494f1f8495d212f2db20fc49e2_Device=CPU_Config=() +340:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=927c151867c504f57aea681772afe32ec9c67cdaa4a0dcbc9055a8725c0296dd_Device=CPU_Config=() +340:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=77b3b21d35d3742f7abc1097b99d510453f42ebe921681685fbc457d2fa9912a_Device=CPU_Config=() +340:conformance_Minimum/ReadIRTest.Inference/Op=Minimum.1_Type=f32_Shape=static_IR=a5a2ba7fff85401feb05248462e85d334440769790e7e6ba1a75ffb413f7fc64_Device=CPU_Config=() +340:conformance_HardSigmoid/ReadIRTest.Inference/Op=HardSigmoid.1_Type=f32_Shape=static_IR=08a7845e89900ed725c984b42b6bc262a7f7956ec50e0a7bbdfe8e4a34d584e2_Device=CPU_Config=() +340:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i32_Shape=static_IR=6e614b9877c6dd3bf1ebd731443e5a1e0b7492edbc3a4683adcff53c965ca1bb_Device=CPU_Config=() +340:conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=4d14510ef37733d7ca3d69697626c173feb05638f5036c49b060f6a80aea9ada_Device=CPU_Config=() +340:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=18d294f76a0d8f4562d85033a45aaa3f2d50fdfd9d9f30e295a772fd10540d25_Device=CPU_Config=() +340:conformance/OpImplCheckTest.checkPluginImplementation/Function=Gather_opset7_Device=CPU_Config=() +339:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i64_Shape=static_IR=6e53e1fedd57631f3ec70d6825d8d1029ac95905b82b6bef7fd44ba87373e9c6_Device=CPU_Config=() +339:conformance_Swish/ReadIRTest.ImportExport/Op=Swish.4_Type=f32_Shape=static_IR=d79b47022a50437c9df095b34e515c53eb042c9813fcf6dc7bcdb96962818ddf_Device=CPU_Config=() +339:conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=static_IR=c14da825d470c9141af0ea87eb82edd0866a415cb5ac59f1014c2ded35340201_Device=CPU_Config=() +339:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=f32_Shape=dynamic_IR=dac2c804cd13d69a51906319a3648ac0edd87764c686c99fb47179f379cecf7d_Device=CPU_Config=() +339:conformance_ScatterNDUpdate/ReadIRTest.Inference/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=b39441c49977534ef3b2d1c4a9d7a5a0aedd66c7ba0a6e1be3446151a8e18317_Device=CPU_Config=() +339:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=3141ed71fe3efbd7fb026a006824ec24e4673d8b97d23dce275548e92eedad91_Device=CPU_Config=() +339:conformance_HardSigmoid/ReadIRTest.QueryModel/Op=HardSigmoid.1_Type=f32_Shape=static_IR=4c3b0cda20bf6b3c574eaefbce21b9b2b0ed92fa1b37c32af252b111b6466d0e_Device=CPU_Config=() +339:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=dac1d1bb4f11cef03519894a2853742d914abb0e3225b7caa3bc5f23d167cdaf_Device=CPU_Config=() +339:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=79e0a530c5a64063a9af26b438f208476e3bbf5a267c28ddded0459019a1d8e1_Device=CPU_Config=() +339:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=43ba20ec70e156f4782e1f11a30f02daaaafb2039912a373620d845e995c97cc_Device=CPU_Config=() +339:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=cbfc33348aff4daf15fb7926884243c7ffe38aa29e60eceda90fa9b8aadad5b1_Device=CPU_Config=() +339:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=23ad83652d315aa08ee781b0fc81c0eb737265280c85a86a4f08cad71b33e74a_Device=CPU_Config=() +339:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=8b9cabc6a44ece744453092791ef63b8d6ca4d83af7e8635f2f4ad78186e5184_Device=CPU_Config=() +338:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=81954ff76e3fd04ec3b3e3c26e28a79ac259c9b255f90ebe3cc0772fb673874e_Device=CPU_Config=() +338:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=i64_Shape=static_IR=38f6cef69f6a7d9886b5d38902fb76e4ae41385fb3c95e229be4b44456ab2e87_Device=CPU_Config=() +338:conformance_ReverseSequence/ReadIRTest.QueryModel/Op=ReverseSequence.1_Type=f32_Shape=static_IR=a5cc0793d73f7f76fc02b5ae04ef2a29bf212ce5c59f9bbef91e0aa5ee17785c_Device=CPU_Config=() +338:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=e7ab5b597681da2db03c13a2424b4e0a62135eecfb2f97f4c59b53331afb7f85_Device=CPU_Config=() +338:conformance_ReduceProd/ReadIRTest.QueryModel/Op=ReduceProd.1_Type=i32_Shape=static_IR=e34207bf06e51dbf322bc0db76f3a9828ae018b02dba2b1826ed97004bee8125_Device=CPU_Config=() +338:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3b3a5cbc6a255792eeeec698aa5a90947164eab96ec744ada9d02b6c7f453f8f_Device=CPU_Config=() +338:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2f82842bfa510be994f17d9c00d43b6f67b232b3a41c64ae276e243610d927d9_Device=CPU_Config=() +338:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=boolean_Shape=static_IR=35ab7a27cb56964d974f5e1b55c1ed76d7f9443f97da0b977370ca9fc414e093_Device=CPU_Config=() +337:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=525ed9b2af76610bf0ee3d11cb1dcfd46059335968359c143d0da7465736ac2e_Device=CPU_Config=() +337:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=12965dd8a422975f08bb0fc707c666ad7ae2671d09c68757d534e3a1d67efd41_Device=CPU_Config=() +337:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=0bbbd97c4428b9565666e9a1e56acc70035b378e16abafc54559a155583d9e6b_Device=CPU_Config=() +337:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=6ac457e9181610da9eb4bf0bec6cd53bf3078e0b84df1211f49921207d81c6e9_Device=CPU_Config=() +337:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=a8ca0b23e0a0f66247fc693c6a8982e4f7daa11e14da296db0dbc9277fcad4df_Device=CPU_Config=() +337:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=bf4d5291899ea4eccf6584f62d4ecdfb39de79edd102e509f840664838f59d19_Device=CPU_Config=() +337:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a24dd1485e484f31d0c72f3a0c31f373f883f6ca4a751b1d2ce18132913506dc_Device=CPU_Config=() +337:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=30dd450fadb8a1081c1315cd0e5234728862b4de39b097a5a3248d551369b60a_Device=CPU_Config=() +336:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ac40c4284a523b39af21eda7394a11b9ca2f2deb5263c03c92c0e217d34bedad_Device=CPU_Config=() +336:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=fe5cbe273830f6a09e3f18eaf8e9410f9f7f1083af508a9dcaf5f0f22aa3ac1f_Device=CPU_Config=() +336:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=df19449b7a95887e834ba16ebf2e1f08416d6293686a6cb6b6cf39fc82559595_Device=CPU_Config=() +336:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c39e4c1d9cbf5b8730644e1686cc09f36f7e4a4b89cadaf8d8902fdb27993a7a_Device=CPU_Config=() +336:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=boolean_Shape=static_IR=6d34694c9c8e71415be894a80a8ededc6a83657c6e7ce3aaf66dcd6f9ab99226_Device=CPU_Config=() +336:conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=f32_Shape=static_IR=7ad6fe3ff1472399c9c0e12aba1db89105e1e4a243cd092dc43ee763a2571fa9_Device=CPU_Config=() +336:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a7d9ffa60c8d1f330ec303edf6a6c0f8d8e0fe8657c561431bfb91a94c2639e8_Device=CPU_Config=() +336:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=c3c821b46d994404c55856237eb70534cff33687df2bde0a86d0bcc9f20878eb_Device=CPU_Config=() +336:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=93788242c06d787e33afa50ecbef5372898e50024d0c88624056a752535572bf_Device=CPU_Config=() +336:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=c3f8bb35f2f4473c563c3e5171a8fdc6f7a0ae20e4acde31a578bd20630952fa_Device=CPU_Config=() +336:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=6d7cce19ff10d7690177fe1e3200d872ef5d8827b7ff49e6c9994e597a15dab2_Device=CPU_Config=() +336:conformance_Ceiling/ReadIRTest.QueryModel/Op=Ceiling.1_Type=f32_Shape=static_IR=1484c3d0a5a8b6d1daa002e27b07bb8ba0b5d83aae50b0a3b3bea08483815d55_Device=CPU_Config=() +336:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=34404b0d0f956acd1827f417b44bc1cf1a085d6517d5d81a6c6d38ee27c745e7_Device=CPU_Config=() +335:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=dc350b3fec164adcb096b8fc922e342cf7b0c6f7a4aa25074bec5566225cff01_Device=CPU_Config=() +335:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6b86bf4f834b297dcb461acb5854aeb9783a381521ea1a8e1cf4fbeb60d6d09b_Device=CPU_Config=() +335:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=64d7467cf7785e52814a8c25f96c1a5d82c071ced27dea8302b5cd69b464ac65_Device=CPU_Config=() +335:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=3fb25dbf33700d0b8ebc3c53fe328f2ee9f45c5a090240eec120b954998d17ce_Device=CPU_Config=() +335:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=d6be1de020bbe6277d8cacd77eece21f766e5e39badb520ef29e880d52e3604b_Device=CPU_Config=() +335:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_Shape=dynamic_IR=dac2c804cd13d69a51906319a3648ac0edd87764c686c99fb47179f379cecf7d_Device=CPU_Config=() +335:conformance_ScatterNDUpdate/ReadIRTest.QueryModel/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=b39441c49977534ef3b2d1c4a9d7a5a0aedd66c7ba0a6e1be3446151a8e18317_Device=CPU_Config=() +335:conformance_ReverseSequence/ReadIRTest.ImportExport/Op=ReverseSequence.1_Type=f32_Shape=static_IR=1ff07d9b87513cab5bbcf5059507b8c998fdb25d2802b267bb6c0b90eb3e231d_Device=CPU_Config=() +335:conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_Shape=static_IR=b169d6330e4006909e4deaaf78b03e789ccd9538c5b59d9d41e05f878bb60704_Device=CPU_Config=() +335:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=f110ef35c9642ecd941cd85a67a12b616353d4a8cd33f9770d532759e2846255_Device=CPU_Config=() +335:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=03ebf297344daffba82d04292a767fcd7c959f56788ede32ff0d7c5af06ea504_Device=CPU_Config=() +335:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=941fa6fdaa34b8082171350da966423497232e44077f333cf3a46488bf237aeb_Device=CPU_Config=() +335:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=6b69e46c11a2a82ac7ad6697cd768d88da6e870e75f489779bbd1714bad23450_Device=CPU_Config=() +335:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=0c6a844f626f6628628034d332ccb6d520e0447e4b616048c7efb516d0fd87bb_Device=CPU_Config=() +335:conformance/OpImplCheckTest.checkPluginImplementation/Function=Roll_opset7_Device=CPU_Config=() +334:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9465b2ea76ea3be1365dfe1255524d4ecce0dff6123e929a2157bfc767396b0c_Device=CPU_Config=() +334:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=16b3235d5271e534a1bc725f80e2bfcb837a1c6f144bcfe8211a3e5359644441_Device=CPU_Config=() +334:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=7e386c35d9d397e043876a23a2b9e5885964cee59bf46f1ae0660e6a84641ea4_Device=CPU_Config=() +334:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=u8_Shape=dynamic_IR=5de1e1eb337f4eff857dccbc075ec7079425a50de3096d4f81d25f0118acc6fd_Device=CPU_Config=() +334:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=8b8efa859c54f9cf2200c18953de243d469d2f04bf38ba5f3efe441de23ffe45_Device=CPU_Config=() +334:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=109afa0426a29179db58e16917b829096af105f0def2375a589ea1391138ee2f_Device=CPU_Config=() +334:conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=static_IR=944072d96554abf3ceac6b928cc00ea1705d5e0dfae8e9a0662de4e56fb3e62f_Device=CPU_Config=() +334:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=33d84638f606d759354e190991899e47d2f4c63b0e378aac985e5fb9132dcd01_Device=CPU_Config=() +334:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=b434cd386e4c5e688aac8da3425d2ed0d72961223eaaa1cf2ff951a88a5fa001_Device=CPU_Config=() +334:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=dynamic_IR=08776190d0fddfcb15ad75cdbf6892de03f79e89d57e02b7c3e80b4a7a125d35_Device=CPU_Config=() +334:conformance_HardSigmoid/ReadIRTest.QueryModel/Op=HardSigmoid.1_Type=f32_Shape=static_IR=f6f85e9282e58756d40411508d6edaacc75c0f4e64d4e25021ade07ba17bd8ce_Device=CPU_Config=() +334:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=12e7ea655764a32069a93a3f7ab147983bceeacc8a2bc88fbb2def005a1596b3_Device=CPU_Config=() +334:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=u64_Shape=dynamic_IR=5f87db7fc306440f807b413acb7eb175932f29f59d1b5eb4a9df8945b9aef9d4_Device=CPU_Config=() +334:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=0cc60631ab50733ce6b7a2256c0db1f9d9338505ae85b30fee02026c28511383_Device=CPU_Config=() +334:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=9d26a7c321db2d87b29b93baeca20dd25357e7777261ea6a4cbf968a203969ea_Device=CPU_Config=() +334:conformance/OpImplCheckTest.checkPluginImplementation/Function=ConvertLike_opset1_Device=CPU_Config=() +333:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=5288d099588f5876e907c5cd750c9f0b2191d1ea060881e80af1006cfad259ac_Device=CPU_Config=() +333:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=d6250086b712a16042ee74438bb61b89fbfaa5bae433049207402d1da4cffaef_Device=CPU_Config=() +333:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=100761a180c245ecb5f949d8a3ea0d4e26d7bb15d679ab797362f695bff03be9_Device=CPU_Config=() +333:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=d9231cf5e3e491e318f16514e771cfdee4b781b42fc9d45088da850ab48079cc_Device=CPU_Config=() +333:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i32_Shape=static_IR=6ddb35aeda2a6cb63282d2fcf6503aa02135ad60e23c752280ef82aaf6a31191_Device=CPU_Config=() +333:conformance/OpImplCheckTest.checkPluginImplementation/Function=Unsqueeze_opset1_Device=CPU_Config=() +332:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b3fdb9be3484a0c498bf40f1a102c452eea04caa5b1dd627e8267087df0acc87_Device=CPU_Config=() +332:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=f89e84d6fb931cf0cd074acd01a50e50daa47ad88b1b74e4b3671d63bd7889f2_Device=CPU_Config=() +332:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=ee49657e646466b0c22aff01740a48c1cc271a828a8c3e10a21d75b04f511cb1_Device=CPU_Config=() +332:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=8d472bf25f969c5ab5eb85fb198c2176766a2de7cd444819e8b60d416969e3c4_Device=CPU_Config=() +332:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=21a3318957d920b39d8b3d84c76cfd2a5ad98515824f88145326deead0961486_Device=CPU_Config=() +332:conformance_MVN/ReadIRTest.QueryModel/Op=MVN.6_Type=f32_Shape=static_IR=ea71ab322d6f3d74b0a7bdc3ff5dfd322f2d8c518a1fb5bc9960c5e04808f28e_Device=CPU_Config=() +332:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.4_Type=f32_Shape=dynamic_IR=5c05bbc013fc857a8f2b340df778f3ad5bdbc1b7273cf41b23d6da410205c612_Device=CPU_Config=() +332:conformance_GroupNormalization/ReadIRTest.QueryModel/Op=GroupNormalization.12_Type=f32_Shape=static_IR=139730a541ba475f22b71d8bbe850f280751594db3560e15590939b2f017fc02_Device=CPU_Config=() +332:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d242e8ecc8ae0239fc2e7773fe0f8a1d50792a71ae4aaac4fd439174e87e95b1_Device=CPU_Config=() +332:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=c1852c534b8b95bf1a9aa2771decf2368fa095c5f5688d38ab9ce0bd86152a19_Device=CPU_Config=() +332:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=64bd2f48b3326db083653b5993c9a75d21be515cbc5af67c62c981e9744e2f0b_Device=CPU_Config=() +332:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=550d5d475e0f53be8506153a78626cd5a5c0a949b9bbd9e2fea96a4ba2f7b908_Device=CPU_Config=() +332:conformance/OpImplCheckTest.checkPluginImplementation/Function=NonMaxSuppression_opset5_Device=CPU_Config=() +331:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a48d232b00b4d4a735d6b9999c29b413a32cd7f05c104610a11cab01465a3887_Device=CPU_Config=() +331:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=87baad85c649084e386ca502375581e9dc47c68c076bacae5e5ac1ddbaaa7830_Device=CPU_Config=() +331:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=e8df0b3ab9e127c1d37881f4c250ca0fd0dd2ec822cd24bf95e7860484fe9b8a_Device=CPU_Config=() +331:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=u8_Shape=dynamic_IR=b12ccd794c23494b994608015d049eec0f2ca30dc319bd35c1adddb3e4b8e631_Device=CPU_Config=() +331:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=990cce0ce92df99ae74ad8840f7b89d1c48c0044deb9cb71619b44a565eed911_Device=CPU_Config=() +331:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=i64_Shape=dynamic_IR=502fbd3f8c0e9c0a9523269a9df9b0fbd83d59ca94f373fd543048429a957f5c_Device=CPU_Config=() +331:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=f8b14e90b051624d56678dbe68f15e6db94e22878b22914d0be241047d1a3783_Device=CPU_Config=() +331:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=61091e18fb5e9eddcab243ec79234ef3b93a5d01d9b2611a3a0e027eed4e4b31_Device=CPU_Config=() +331:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=66df22ce11e7009aea35ba6a11b4294eda44815bf041eed0721499a3d2c484b1_Device=CPU_Config=() +331:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=dd6dc9060efbe22735c22c69f0323c7e6a77a30cfbaae7b79670b9b26fb2be70_Device=CPU_Config=() +331:conformance_FloorMod/ReadIRTest.ImportExport/Op=FloorMod.1_Type=i32_Shape=static_IR=2d09fd84ef3e176a2eae04f1066929ceb3973045b87989e5f0f11b97cab6cc7c_Device=CPU_Config=() +331:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=117fd22d36b97216edb2112c043ba97872b9b7915d7909dfc395406e8ad91e4d_Device=CPU_Config=() +331:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=6d7cce19ff10d7690177fe1e3200d872ef5d8827b7ff49e6c9994e597a15dab2_Device=CPU_Config=() +331:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=9ec0516350dc25e0dff22b12b65f761cd4e2744881c1f356f9ab50680eee1a69_Device=CPU_Config=() +331:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i32_Shape=static_IR=a3f2389f6a8a495885efa87742d53e1e154f58f8fd6e83df89bddf5922247095_Device=CPU_Config=() +331:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=a3032224f3e9c096102921fd8571966d23c21cba931b9d5e31ba41e9698d07b6_Device=CPU_Config=() +331:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=eed21ac7c17920ba437332691e231037113367ee3d256b4db1380c8d2e0db84f_Device=CPU_Config=() +331:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=a3032224f3e9c096102921fd8571966d23c21cba931b9d5e31ba41e9698d07b6_Device=CPU_Config=() +331:conformance/OpImplCheckTest.checkPluginImplementation/Function=Einsum_opset7_Device=CPU_Config=() +330:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0b7d6fb137555d6fde92f0c9b3e6278715adaeb38cf760236070b17bafb5babc_Device=CPU_Config=() +330:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=adee3d4d6728b17fb5ab17a9915c5b7c8808f949ad358e8a16a0bb12dad7c958_Device=CPU_Config=() +330:conformance_ScatterNDUpdate/ReadIRTest.ImportExport/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=b39441c49977534ef3b2d1c4a9d7a5a0aedd66c7ba0a6e1be3446151a8e18317_Device=CPU_Config=() +330:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=1ab723c2a389a999b3b01158b82719358d802c6d62767d6dcd91b5d7fe5531fe_Device=CPU_Config=() +330:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=cd5756749d3d73dc7b666f7f41dc292c73230e5d31ddbbd43aae77210b86220a_Device=CPU_Config=() +330:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=596d0b6cfe8b39e0ceaa665f1fa82aeeeff78d09315fca7cef031b6dc210a1f3_Device=CPU_Config=() +330:conformance/OpImplCheckTest.checkPluginImplementation/Function=Clamp_opset1_Device=CPU_Config=() +329:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=5f45e938f9e6d58ccc6bf771049731f2d9c4a8b0ed83e2a1942ac69ab76984b3_Device=CPU_Config=() +329:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=920aa0d732c7ace2bcfe73df0e7217e66b6388dce554ef827efa96f4e7d31a2f_Device=CPU_Config=() +329:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=3d37166487c5c52af657343f8fa10903efc7d580d5b370a519a0ccfbf6fc56bf_Device=CPU_Config=() +329:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=a94e0bbcae35d7cb33efba2c6df3275f7bca8520ddb37eeeab81829906fc8964_Device=CPU_Config=() +329:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=98274ec3fc894754adaacedf83b4b7da373e639a51cfa7dc348412898e45e8dc_Device=CPU_Config=() +329:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.1_Type=f32_Shape=static_IR=f735a44db0a337a22f5ebed052a5718168765287ff4e0eca961c3f9fd68586c0_Device=CPU_Config=() +329:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.12_Type=i32_Shape=static_IR=6650e462a4f0086329d8576eb6352979e89825517f48e264fe719c7c5ca276fc_Device=CPU_Config=() +329:conformance_GroupConvolutionBackpropData/ReadIRTest.QueryModel/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=68853f0b8867d4ddb5eeb239690f1b41600c05f64ee4d3efa8cc828e72b9bc1f_Device=CPU_Config=() +329:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=c1ffd0690c9370725a30028d2915ec798aff173f86a1864f3dc92a4defefef85_Device=CPU_Config=() +329:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=256f748d7b98b0eb70cc659403910bac929d62a2b153e63438f8746f602a83fa_Device=CPU_Config=() +328:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9f8fca1ab855d5a71d7acabdefda202e270bf16b559fd581f9e663caa301ffd7_Device=CPU_Config=() +328:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=2ebbd25d315f10aa32cd8251ced4e269c1688202ee64b4fb5245e4ab53cba16b_Device=CPU_Config=() +328:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i32_Shape=static_IR=28675c37d06426cf6895e7ffc15d6c212ef8be1b278fd199d1bfbd0678f825fa_Device=CPU_Config=() +328:conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=f32_Shape=static_IR=e7b766e89f08e80fd96ba40dac738561546ca7210c4566b727ca8cb49528c823_Device=CPU_Config=() +328:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=81c2956d325aab4a7bfd931d94151e1285083a15326e0890f861b97017a24bb9_Device=CPU_Config=() +328:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2f82842bfa510be994f17d9c00d43b6f67b232b3a41c64ae276e243610d927d9_Device=CPU_Config=() +328:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=30466048a7da9db59d20a210af1979341f7b9552362e64a89357d650102a213e_Device=CPU_Config=() +328:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=3e016da7faeea7044ea204d1c3a2f1729d3d7ef0be27f5b769484bc7aebea5ab_Device=CPU_Config=() +327:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=a9d3d025df92369ee1f1a81fe676bb00d7d6cc488868e04d0e713fb9e42451a9_Device=CPU_Config=() +327:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=9efd5749a1591709057d6e97334c9b5b89f5864d705c91774e0196d42966d1b9_Device=CPU_Config=() +327:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=e7e985d4d02762d236131e74fd867acff1828bcd4c4eb32e190de20eadb831fb_Device=CPU_Config=() +327:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=f32_Shape=static_IR=f2df871f255156043f03f34333d59d9213fd52ea24f69dda1b04888ed269acad_Device=CPU_Config=() +327:conformance_SpaceToBatch/ReadIRTest.QueryModel/Op=SpaceToBatch.2_Type=f32_Shape=static_IR=8acd95619121cb22760fd92815b1ba85f541f282d3860e910f73036ed335a9ee_Device=CPU_Config=() +327:conformance_ScatterNDUpdate/ReadIRTest.QueryModel/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=d8a48dc7581c2ece0179d0ad668e8caebdddddfe492e365ea2e0e5f3a7302eea_Device=CPU_Config=() +327:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=c078bcf5a6a207fd76d9cddc1a35df577529e71ba0a120b28c7ed17bd12673bb_Device=CPU_Config=() +327:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=933c6450f6856b32e879034662cf60eca53970c10106f8a11eb925e5621042e9_Device=CPU_Config=() +327:conformance_LogSoftmax/ReadIRTest.QueryModel/Op=LogSoftmax.5_Type=f32_Shape=static_IR=38bcc7d745ee21a7c6858a161e269f0281d3f41d62d65d10fde9b0a9b80992c4_Device=CPU_Config=() +327:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=4819c2459dd2bf875545cc912152c6751ed5db8ef07aba31d3eae6c3dedc7aca_Device=CPU_Config=() +327:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=2538d525d8f11b9f4961c2a4a8cc36fd27d8b3d97271ef7db4f7eac9732b71f4_Device=CPU_Config=() +326:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=59eaeea8102953f8ffe85ed1ced2a44ddeed77ec237608b45be0573bb32b1104_Device=CPU_Config=() +326:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f7cf7cbc88dec99af8d35e65e926745ad318706c454b90740a19589285733fe9_Device=CPU_Config=() +326:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=12965dd8a422975f08bb0fc707c666ad7ae2671d09c68757d534e3a1d67efd41_Device=CPU_Config=() +326:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=944072d96554abf3ceac6b928cc00ea1705d5e0dfae8e9a0662de4e56fb3e62f_Device=CPU_Config=() +326:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a46f51b7498c921515a53b67480ec4d413ed43ff809e1fa6a4deb7365f4a0460_Device=CPU_Config=() +326:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=046798a0cf8d4c3fd8f1dc12bd0363a669628e748a6c964385eb50bb783924fd_Device=CPU_Config=() +326:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=4ccf5cecf790d27400fb95526a993f8a1a28cd4f3120b897cf45bbe78f087ab2_Device=CPU_Config=() +326:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=2aa586a55098e1960c204572ca9704bb3b8b9a3baab5fcf08200594261f7bef7_Device=CPU_Config=() +326:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=66cff13148d62809cced5a381c251525486476f7178eddd3c8e45eeed40afd06_Device=CPU_Config=() +326:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=dabed23c3f30d92c6fcca7a6845160022837de8cbfa1077c222e6f1224b745e1_Device=CPU_Config=() +326:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReorgYolo_opset2_Device=CPU_Config=() +326:conformance/OpImplCheckTest.checkPluginImplementation/Function=MatrixNms_opset8_Device=CPU_Config=() +326:conformance/OpImplCheckTest.checkPluginImplementation/Function=BatchNormInference_opset5_Device=CPU_Config=() +325:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=90f882a97d637e527900edfb1b7c277b65544832793d08efdf8454be21a2f496_Device=CPU_Config=() +325:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ec19939673cc58f2511ffd6695a3652f1d724872b0db958a6d667e1e87002b21_Device=CPU_Config=() +325:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=8832b317ba58dd0efd1e8fa5238d35644d8468a03c9b35809a20ae64098dc986_Device=CPU_Config=() +325:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=2a8fce2d85c65eb0e8b40c2923338675276902296daf8744322876552dcd68f7_Device=CPU_Config=() +325:conformance_NonZero/ReadIRTest.QueryModel/Op=NonZero.3_Type=i64_Shape=dynamic_IR=7d0265450b8fc92464273ac05d685952ea3877be45b4d745959f2f373fef1431_Device=CPU_Config=() +325:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=a29bdaa31edbcf7b3dc392625c0aa0a27e827e1363d52519858c93defbf9ebac_Device=CPU_Config=() +325:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=b459cd78b41e36a6c3823301811fd3322a77f802ffc3399eefdfd8ffa4ce6e6c_Device=CPU_Config=() +325:conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_Shape=static_IR=a5a2ba7fff85401feb05248462e85d334440769790e7e6ba1a75ffb413f7fc64_Device=CPU_Config=() +325:conformance_HSwish/ReadIRTest.QueryModel/Op=HSwish.4_Type=f32_Shape=static_IR=1c38a17a13c5c03cfc1eeb147ca2474debea05ae1d6f2357ce40ce23552286fa_Device=CPU_Config=() +325:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3c7d4160bf883d550620e8d1ceb54b3d78bf1512388b5ee57e1a380949d441e1_Device=CPU_Config=() +325:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=51d309244d7c90039cf86929d62320f5e5c5df8b1390c6b1241d8389eb6914e2_Device=CPU_Config=() +325:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=15d323a190bbeb1834cfa08a3afc633a2c203e44e2660bff4e98453c02ea4cfc_Device=CPU_Config=() +325:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=34404b0d0f956acd1827f417b44bc1cf1a085d6517d5d81a6c6d38ee27c745e7_Device=CPU_Config=() +325:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=eed21ac7c17920ba437332691e231037113367ee3d256b4db1380c8d2e0db84f_Device=CPU_Config=() +324:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e00cea4f2ea99f32c11ea265ecc0483554192192bb99f36438dd38de09820888_Device=CPU_Config=() +324:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=83e2d01e24eebe910418ed24fb506852c37576ce70c18d27de197f675f49c9d2_Device=CPU_Config=() +324:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=fad6766f10f7a0ffee665be437521766f5dd56b673293920d8b469bdcef8e7f8_Device=CPU_Config=() +324:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=b9581fac6848b0c6c9fc9af5fd17eca3f2f64832fb7205f97684f1cc4c1985f0_Device=CPU_Config=() +324:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=dynamic_IR=f1e43476084575ad240db6631f433a61ba2076d1ca95e44a0e4471ea9d6f66df_Device=CPU_Config=() +324:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=659cd025e440fdc633859089f52f7f38cab5701c63c79d1e8d1837c217b8cf75_Device=CPU_Config=() +324:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=176c218db11ea18f367fdf98a3de14e9a9c65152bbcc39783c38772b37f6e9c2_Device=CPU_Config=() +324:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=fabbe8bc510b0d1afb64f2fbe68d755be05fdccfadec5fe845dc6b3c4e6a2767_Device=CPU_Config=() +324:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=80a8c850ad3eec3e8fd00d2ac09695a0f87a10e4b80b9022f49ddcd9805eb2d1_Device=CPU_Config=() +324:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=a937747c04b70351d3632aab91189200e2c0a69b6467ed856b7075885c54d83a_Device=CPU_Config=() +324:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=cc13aaec2a2bbe9b760651d358622114b4b0a20cb106472bd8519f0fade61dcd_Device=CPU_Config=() +324:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=2c2cec03b3ec1da29ad4d5fbb3530ee7343a436e27be923ee1f9dd97d29731a3_Device=CPU_Config=() +323:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b2fc03d707298e863f83bd3912617e76e63d0fd922c87edf912c17bf51cc1fcb_Device=CPU_Config=() +323:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=5ba879b46e93286e4c880a726e28d6956a1c8415508733b5349079f899462679_Device=CPU_Config=() +323:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=5295b6c6090a820891e5754c34d03dc3347d3436fa16fa4a701422ce8ac78b92_Device=CPU_Config=() +323:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=d1d0510ce6d862a5512bf4c5c588f84548f1aed0226eca6850b5e2d470a5ee84_Device=CPU_Config=() +323:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=a30154a78e0e565a598629670b87338d03582cbe4ed5547256634ddad7bc9d5c_Device=CPU_Config=() +323:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=5c5e10f28ed3a8d4ee0d3c8af982df5f383a4a1a713baba556dd17ee52e9ef32_Device=CPU_Config=() +323:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=d11097e7fa04dc0b540bf3b963cde252591b39b7dcbfae66e64ed19cd2b3b06e_Device=CPU_Config=() +323:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=3effc90c24c0eb76bbc89809d34c6541654366a02e21378a668dd932a6cc7756_Device=CPU_Config=() +323:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=214e4e8f7de64e9cc8c77c67d214172905cfb4b9fde65e2ef3d32bb7b4ed93f1_Device=CPU_Config=() +323:conformance_HSigmoid/ReadIRTest.QueryModel/Op=HSigmoid.5_Type=f32_Shape=static_IR=cc18959ba7c26661ba0f986207bd00aca503bf924b31c4a2070ac40ac3ec5468_Device=CPU_Config=() +323:conformance_GroupConvolutionBackpropData/ReadIRTest.ImportExport/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=74f34c8b7abfe0f7afe021ba5d4861e29f9f3915beba5cdb2af936f1f2409fb6_Device=CPU_Config=() +323:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=eba756a8d0ce89c9a8df50baeaeb82d5b719461bbaa06386db7e1be10ec535f3_Device=CPU_Config=() +323:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=dd6dd181ad2875cd08679b8554d2a85ea0fd15d7f09f733a8290f677fed6c757_Device=CPU_Config=() +323:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=b31dbb99720fd5083e5a7e5b1b626bda91455999e2918eb8e658992cfa6588dc_Device=CPU_Config=() +323:conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=cc989fde083332a75d3066112105028a711bdac4fc44463d098022774da752b7_Device=CPU_Config=() +322:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=afd856f31f3a815b84c34b66e1ba0a70a313301ce82fdccc2f1b779ad3157d4f_Device=CPU_Config=() +322:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=34784838e98e93a6b024109ef3a8a5d4e1fc7f89b98ca23c81cf085f19acc663_Device=CPU_Config=() +322:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=7c1b4dfda36336bb1a943fec9786d89e220f2a811159fe9cbed7d51186f8fdfe_Device=CPU_Config=() +322:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=6b0b123bc93e799aed7bee84e55ed9def25af4f11d27958d8368983eee9c527b_Device=CPU_Config=() +322:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=6aff57130da7904e5d2300c4962f104d31c704872d5c33bbda4bb38efc34d563_Device=CPU_Config=() +322:conformance_ReduceMin/ReadIRTest.QueryModel/Op=ReduceMin.1_Type=f32_Shape=static_IR=61bca82940fd4a54bcb587a88272b81c191b8feeab37bfafa044ef768240977c_Device=CPU_Config=() +322:conformance_ReduceMin/ReadIRTest.Inference/Op=ReduceMin.1_Type=f32_Shape=static_IR=61bca82940fd4a54bcb587a88272b81c191b8feeab37bfafa044ef768240977c_Device=CPU_Config=() +322:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=c61a8f259a8b37e49f9267dbc921d88dd60e5766aa30dd05319f423a01c14aee_Device=CPU_Config=() +322:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=d05c1b7fcf976117a23e0284998d9ce21689411ff24530175787f1512ca25879_Device=CPU_Config=() +322:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=641b1e74512a5cdc87bcd63515a28a409f155a3475fa923e440868e563daaffd_Device=CPU_Config=() +322:conformance/OpImplCheckTest.checkPluginImplementation/Function=PriorBox_opset8_Device=CPU_Config=() +321:conformance_ScatterUpdate/ReadIRTest.Inference/Op=ScatterUpdate.3_Type=i64_Shape=static_IR=f5ad439e65ed1e090d3d5744e9e5bcd9b8fed6ac6a191735cbb1cdd9af8bccf4_Device=CPU_Config=() +321:conformance_ScatterUpdate/ReadIRTest.Inference/Op=ScatterUpdate.3_Type=i64_Shape=static_IR=b5f5ffd783aa251498c2011f19a63c1d68991e426384ef9728bc0b46587faa2f_Device=CPU_Config=() +321:conformance_Proposal/ReadIRTest.QueryModel/Op=Proposal.4_Type=f32_Shape=static_IR=c0884ce897724cace24b30df395a33443364f8494f1f8495d212f2db20fc49e2_Device=CPU_Config=() +321:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=c5ce2b16d47cf93b073c2ba13556fa9fdd1b6f1dbe6387a50b507a40ab1d1c1e_Device=CPU_Config=() +321:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=cdf79cced0ed380052910c95b09b4022841474c87d06061f29791ea2ad9813a4_Device=CPU_Config=() +321:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.12_Type=f32_Shape=static_IR=431db89311a543581d104e2a2c498fe021da2e4026323817834670bf5bee67a2_Device=CPU_Config=() +321:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=155b8d9ccf06f4d8f9ada6024fbe66f39e4e6e96917c12d7ac02eac98c5473de_Device=CPU_Config=() +321:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=5bfbbb826bcb2c9e7b5364fcc5da23e737953150029c2ea7455ad4b09caaf01d_Device=CPU_Config=() +321:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=97181a6433949eaef7277fdfec4f8f94b27463ee3ed4a6aefc678fdaf7eab4db_Device=CPU_Config=() +321:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=8108f6881c436dfa59a0c27d173054c885f082306ae5af1694cdede13718bde2_Device=CPU_Config=() +321:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=f0853773e26eae3d051504ed8db7f182c0e90ef7b45625a1a72ac51a73e2208a_Device=CPU_Config=() +321:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=c3c821b46d994404c55856237eb70534cff33687df2bde0a86d0bcc9f20878eb_Device=CPU_Config=() +321:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a24dd1485e484f31d0c72f3a0c31f373f883f6ca4a751b1d2ce18132913506dc_Device=CPU_Config=() +321:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a3d6337c1ea3e8b67256696ea4231da4fc0e9d9f8bea169607a1287233086b3f_Device=CPU_Config=() +321:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=dd9fee8f7cd289b97050e22cb465637c6439230d0d3ebcb20452eb544b40617e_Device=CPU_Config=() +321:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=ef6e4b3910cac801199b1f6be74902b42105d23de549d426b1c4bcdd7361f79a_Device=CPU_Config=() +321:conformance_BatchNormInference/ReadIRTest.Inference/Op=BatchNormInference.5_Type=f32_Shape=static_IR=c602b01c85ee95a1d7deb1498c5f0494a5ee727ce8874d5beded8bf33631d0b4_Device=CPU_Config=() +321:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=f286960ead5b83e3e4015ee5751b114a9d70e90aa788e0fb004ac50b95a8fa2d_Device=CPU_Config=() +321:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceLogicalOr_opset1_Device=CPU_Config=() +320:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9575e384c23faea27b9011de8c0093099fbe0ee6462baaebaceb075529664665_Device=CPU_Config=() +320:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=5aaa81d6f07ed880b1e93a0fce7b6aab4c3c88bfb1b4b6cda4ead15eb145af63_Device=CPU_Config=() +320:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=7625f5af6c70a9d4bccb783dc369a11b53ef1f6492df030ae5404452ea0cdc79_Device=CPU_Config=() +320:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2f8ee6adb1573c51bcffdd8c24455ecd6b6fbf04f171e9aa5de36c5d6f18babe_Device=CPU_Config=() +320:conformance_Equal/ReadIRTest.Inference/Op=Equal.1_Type=boolean_Shape=dynamic_IR=0723b6d683bc65225624112929bd8f7a0adde9e9c2265a2ec1a54b10c4433735_Device=CPU_Config=() +320:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b4c737aec2f47947d1afbe26d9d8cd124c6fdd24e30cab1f563d91310d1b62c7_Device=CPU_Config=() +320:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=3d5c8f5c1545419050f982e8555a6ef9e5dcc06545b1a8573d710e8bc2375a6b_Device=CPU_Config=() +320:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=fabbe8bc510b0d1afb64f2fbe68d755be05fdccfadec5fe845dc6b3c4e6a2767_Device=CPU_Config=() +320:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=09dd33f661a07095dc47e3e5205c9fc6dceda72526e79be0751c34823c7e7cf1_Device=CPU_Config=() +320:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=f826a40d2d5d59d35300a911a15dfd8022c0fc486ecdc7f00c06a26f5dc44338_Device=CPU_Config=() +319:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c0cbd07b1517991754ef075284aedef586dd4b250e2b867379dacebdf99ce1e1_Device=CPU_Config=() +319:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=fa88ad79fad41544d799f0333f83b91322f2bb408689e27e53bd175786ed0979_Device=CPU_Config=() +319:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=f84bcf4f549ca0d6e75c7905f1463fbace4f3b955032fcae627e46e353b2aee9_Device=CPU_Config=() +319:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=299e5f0fc159bf294093a5e1f258f7083fc54a08cbaa3a55b2a2197d29ae780c_Device=CPU_Config=() +319:conformance_ScatterNDUpdate/ReadIRTest.Inference/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=d8a48dc7581c2ece0179d0ad668e8caebdddddfe492e365ea2e0e5f3a7302eea_Device=CPU_Config=() +319:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=f32_Shape=static_IR=b91a183b8c36d6e8358dad7056638b8091005393dd1ee6813728f25cd3e6a9f5_Device=CPU_Config=() +319:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=313740a93070bb3cb89143685b7521ea0ace30c3f6d510a4d83ed809808caeac_Device=CPU_Config=() +319:conformance_MVN/ReadIRTest.Inference/Op=MVN.6_Type=f32_Shape=static_IR=a7b7ec75e1b343acfa06ea53d7d5b631c06d44c68b1fc92555d7168c77aeadb3_Device=CPU_Config=() +319:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=d98330d8f9f03556036d103fb4ca3f8436be42fa4f0b21b185aaad3abb2fb53c_Device=CPU_Config=() +319:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=d98330d8f9f03556036d103fb4ca3f8436be42fa4f0b21b185aaad3abb2fb53c_Device=CPU_Config=() +319:conformance_GroupNormalization/ReadIRTest.ImportExport/Op=GroupNormalization.12_Type=f32_Shape=static_IR=139730a541ba475f22b71d8bbe850f280751594db3560e15590939b2f017fc02_Device=CPU_Config=() +319:conformance_BatchNormInference/ReadIRTest.QueryModel/Op=BatchNormInference.5_Type=f32_Shape=dynamic_IR=694ab408745deafb90f8515e002a393e790a8b1f83e58519081b983067d76967_Device=CPU_Config=() +319:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=cee58d2e3f2d6ef0061c5b245a15c60f0a26a58474c015f71dbdbc0c171b2a8b_Device=CPU_Config=() +319:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=525408cae199f0936f7552165ba12d61ced6b675d75d56f1d69be8281feec5d5_Device=CPU_Config=() +318:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=35d15ad61ee34c17abe50c4a67e568c2e253712c2d63cb828b0bccdb2175a6bf_Device=CPU_Config=() +318:conformance_Select/ReadIRTest.Inference/Op=Select.1_Type=i64_Shape=static_IR=6590ae34a784f81de25c016454fcc919ae1f9eab672c78c9da0daf83dcdaf1bc_Device=CPU_Config=() +318:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=c1c38223834d99f4481cb74db2bc302710629de5807b4f08381fd01655b9d44a_Device=CPU_Config=() +318:conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=dynamic_IR=85a35059512fed9e0c70cdcbd5e73c1e247ef97821d5193cbc4f7f7c3ebbaef8_Device=CPU_Config=() +318:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=72c58b462f61521af4eab9c890e568b5676c7a3194c4e35f8e04f98596013c47_Device=CPU_Config=() +318:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=674e2446a2f5929d12d36f14451d68e7b55ad61d2d8df755e85c27c4a52943e3_Device=CPU_Config=() +318:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=0cc60631ab50733ce6b7a2256c0db1f9d9338505ae85b30fee02026c28511383_Device=CPU_Config=() +318:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=bc52d884c8eb9ffc1a5c6af9467b8f285933b715def03c4e5cadf426ba186c3a_Device=CPU_Config=() +318:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=9b9521ed73100b54a3c5920107db944380157eea1b72f4e4d94f8e2ced1f2e4f_Device=CPU_Config=() +318:conformance/OpImplCheckTest.checkPluginImplementation/Function=TensorIterator_opset1_Device=CPU_Config=() +317:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=static_IR=6e1207753b08d53b18c551ad07a245243197370051be78218db028f3d3b835a5_Device=CPU_Config=() +317:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=d7fbbe9f8f446b009ea2de8594e4cfaad46432734cba27596e3fa721f04c04ee_Device=CPU_Config=() +317:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=6e8dbb054c99609e5aedd642130e867c22091118e0bb7ddd870a66dcfd11452f_Device=CPU_Config=() +317:conformance_NonZero/ReadIRTest.ImportExport/Op=NonZero.3_Type=i64_Shape=dynamic_IR=7d0265450b8fc92464273ac05d685952ea3877be45b4d745959f2f373fef1431_Device=CPU_Config=() +317:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=84f6f3544adcc7c68df5ca411844cf36c2232c1b6c820094e5693a444faa143d_Device=CPU_Config=() +317:conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=fe615eeceb735b046b190d844931c56223d45439021da3b6b23227a1f9cb73c7_Device=CPU_Config=() +317:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i64_Shape=static_IR=488c8e933df63c1368e021869a92fd48929ac252863ed4c2acfab7174b449581_Device=CPU_Config=() +316:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=20450a7796284bbdcb011ce027d5c7260ed7dcdf07e4d39e48d99a2162eaae51_Device=CPU_Config=() +316:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=178677f6c6e3857b2c3aa8765c8e3186bd25b73154ba6463ff33a9e1c911e6bf_Device=CPU_Config=() +316:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i32_Shape=static_IR=9aba412b059ee77c603bebe3e49240d6f2183168002d25bb7bfe62f1224be2fd_Device=CPU_Config=() +316:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=2d6f6b955cd431e0f4786aae35f5a1f7f69a6b627e88c42643ded0477f1cfef7_Device=CPU_Config=() +316:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=dynamic_IR=0f5965e2daa2a1f6b050813850956d9a4bbd771cb234ec814617099e1541ea0c_Device=CPU_Config=() +316:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=i64_Shape=static_IR=1c06ff77487507dddcddf290d75d4812bfc8a7b2c9bc78176da5212eab029966_Device=CPU_Config=() +316:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=dynamic_IR=21a3318957d920b39d8b3d84c76cfd2a5ad98515824f88145326deead0961486_Device=CPU_Config=() +316:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=2e70eb484f4bac4cd11e9f643d2531cd0e78994af07c015183edf9d62a709d47_Device=CPU_Config=() +316:conformance/OpImplCheckTest.checkPluginImplementation/Function=LogicalXor_opset2_Device=CPU_Config=() +316:conformance/OpImplCheckTest.checkPluginImplementation/Function=LessEqual_opset1_Device=CPU_Config=() +316:conformance/OpImplCheckTest.checkPluginImplementation/Function=GRN_opset1_Device=CPU_Config=() +315:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=97f6fd9998be395222e6878ccaab47f5d50561d1ab8f988987f7f292e784fe2d_Device=CPU_Config=() +315:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=53828d433bfa231cac709949db0e4ff72010e5cf9df167ecda7ac72bd5a69e10_Device=CPU_Config=() +315:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=2027d5da17dab73d23b4984fe88696fb770ba2fa479a194b3531d30ac75dc840_Device=CPU_Config=() +315:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=6a8fb5f2948de2436a33999ee2a01e239193c268f61634f1e80692b0c45aa3da_Device=CPU_Config=() +315:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=99820651f05bae979a287a8644f1b739637d684efad288b48044c2a664e43a3f_Device=CPU_Config=() +315:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=a8ca0b23e0a0f66247fc693c6a8982e4f7daa11e14da296db0dbc9277fcad4df_Device=CPU_Config=() +315:conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=f32_Shape=static_IR=35c61b2251b78ad9f9804bd3f9e301e1f974c6dc138ce0466b8b940d106ddd72_Device=CPU_Config=() +315:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f55c473236715e5c4e6ec21a9e07d1c73b14d529b57fae0cb38ef9d6cd383b53_Device=CPU_Config=() +315:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=750599c4cdfcbe7468328647a8760c7249a9f5dba8bc33ebd00c151d9f3b13f6_Device=CPU_Config=() +315:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=dynamic_IR=6b70264ed3eb3831e0e034230813ce1a1e71c157a302822b56335e587bd200b3_Device=CPU_Config=() +315:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=7b2199d0ea56102a7c6737be2334b9717ee292c13cdb692d07fddfd173ea5b82_Device=CPU_Config=() +315:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=5d68272f8318c073e481b5353e6e4350e6b3b5e120f389a98859dbd5af43db9d_Device=CPU_Config=() +315:conformance/OpImplCheckTest.checkPluginImplementation/Function=ShuffleChannels_opset1_Device=CPU_Config=() +314:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=07849f3111a0f12a712cb0deb7ec9c4778e70120385bdff7f17c1af30e31062c_Device=CPU_Config=() +314:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=dynamic_IR=9feb072b58552898ff80a05dffe8f39c880b4f2a2382d56cb24a78e278ea1756_Device=CPU_Config=() +314:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=e9539332df9388555564db1da36679acc7b505b8c1fa687731f2052999bfe1fd_Device=CPU_Config=() +314:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=c87c002bc627f4adfa58547da4c2b1f270e07e9961a1b4ae99dda72d88980550_Device=CPU_Config=() +314:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=2d6f6b955cd431e0f4786aae35f5a1f7f69a6b627e88c42643ded0477f1cfef7_Device=CPU_Config=() +314:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=5aa10dbbcee8d7434796180d5fbe8f0a954b772c441c8d6046439c615d3b9011_Device=CPU_Config=() +314:conformance_Proposal/ReadIRTest.QueryModel/Op=Proposal.4_Type=f32_Shape=static_IR=ea8cc682a9a36cc61498573e967ec64d289af84a9e3da1911085b1de4fea4c82_Device=CPU_Config=() +314:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=6b0185f2e61c010924a76c5f136ed90d0e154f507028c500ee78bdc5a7ed65ac_Device=CPU_Config=() +314:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=1245c8dbd9027cc56d2eeb58e1bd23774ce945522f66a17ecc3c03ca1ca163b0_Device=CPU_Config=() +314:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=447c546ed54e81edcfea77cafa8d18261923bf25c050666029828ea72e3a875c_Device=CPU_Config=() +314:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=dda009a1f3191e35286b7515f5741905e303f27287041248e2ce15f6954af810_Device=CPU_Config=() +314:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=b93daedfdba7331025c12a5eb4b881bd7df445d80bd4fac34833087fe6d65bf5_Device=CPU_Config=() +314:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=00d8728cd472d05b1eebf4b4d0ffa4a5d7c7dd34b3a99055b0f8ff5b0173af53_Device=CPU_Config=() +314:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=2ce56cfc77884dfc61f7e9fab9a0ce04a4b016f9b3d13465cde1576b9374a2a6_Device=CPU_Config=() +314:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=c808434d1d2cbd9ea66373f22c7e635c5bb2e3a6294f93421d1d9d34ac62515d_Device=CPU_Config=() +314:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=9b9521ed73100b54a3c5920107db944380157eea1b72f4e4d94f8e2ced1f2e4f_Device=CPU_Config=() +313:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a4b3740eda9e6bbd3968dd39e6abb33b22a90a811298df6a761958216acb389f_Device=CPU_Config=() +313:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_Shape=static_IR=36b9b7be1407243aad0792e7a49ef25f7c3e3791dc1ff93cad40480837ba87cf_Device=CPU_Config=() +313:conformance_Select/ReadIRTest.QueryModel/Op=Select.1_Type=f32_Shape=static_IR=ca5d2626f2066e0c806addc4b6ffb4b3a71f1183b93783b92f44de62d82faaf8_Device=CPU_Config=() +313:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=7685da6dcf91a208b72d5961c2c248d816de501366163d61b1ee3c148787fe77_Device=CPU_Config=() +313:conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_Shape=static_IR=acdcf37615b571d8a1275b71cfe0c43a6410e56f5f18db8e9d795e46aac73d0c_Device=CPU_Config=() +313:conformance_NonMaxSuppression/ReadIRTest.ImportExport/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=d12f2033cdee7e244afad462ca1d9295c314836b593b2a30730861c2a3c8e9f2_Device=CPU_Config=() +313:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=c39d76c89bb03fe251dfffdd9b8eb85c0585904ed9c5bb4660c3dedfdc451efb_Device=CPU_Config=() +313:conformance_MVN/ReadIRTest.QueryModel/Op=MVN.6_Type=f32_Shape=static_IR=a7b7ec75e1b343acfa06ea53d7d5b631c06d44c68b1fc92555d7168c77aeadb3_Device=CPU_Config=() +313:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ee1f9348ff09a058dc09cd63581663590521d463d14b785a23ccd3cd28110b5b_Device=CPU_Config=() +313:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=static_IR=315fa20f952b6c7678cc93dbfd340097847826fea7928eabcec46d7ccacdb224_Device=CPU_Config=() +313:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=5df86bdcc255998a0b5b18e64e3059afb2c80e37b5695208d04a6fc0f1410b50_Device=CPU_Config=() +313:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=a815b68b6a8d36546d3ac0112c60283bd69ae1059e8deeb98b21f538c8089beb_Device=CPU_Config=() +313:conformance/OpImplCheckTest.checkPluginImplementation/Function=Select_opset1_Device=CPU_Config=() +313:conformance/OpImplCheckTest.checkPluginImplementation/Function=Gelu_opset7_Device=CPU_Config=() +312:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ede6f5f8a1d9bcfd1979965f575c8f267870e0d6a5d3a62d229ea029893525b6_Device=CPU_Config=() +312:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2758266e894d04cd7283921f678a468cc1fced81d1a09a3c95add3ed9e5d6719_Device=CPU_Config=() +312:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=125ec4e4ba4158d3a6d1a7725cda9a18a220926d5ad6ed623a1433688c79b579_Device=CPU_Config=() +312:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=edb5dc5a42b36879d5ced77fc2db7d8b331c888534602893ffb277f742da1005_Device=CPU_Config=() +312:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_Shape=dynamic_IR=2c47f1ee19359a486a72bdafc2614159d48fffc80ddabe0f897212a454a75b18_Device=CPU_Config=() +312:conformance_Sigmoid/ReadIRTest.QueryModel/Op=Sigmoid.1_Type=f32_Shape=static_IR=697bdfc59094203ea1616203d64759a40193f1a23a4a51f11340a7912e355cd1_Device=CPU_Config=() +312:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=fbd54c37e1db9cd3cd3fc7c571117f65c26d9f5ff0674711a326e02ebd3f9d57_Device=CPU_Config=() +312:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=e22e40a4f300567612f963b17707be4de09093cb9a248aed62af594e7986f7dc_Device=CPU_Config=() +312:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=753b524e2aad8fde7e7206fa8c3e7ca15c52c49f22f41d48cfb6b4d814cb40af_Device=CPU_Config=() +312:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=718d6ff3b19f498cf4edeb9f7f4a7528fef578dd6fc7edb0796d476505472e46_Device=CPU_Config=() +312:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=b4fecfa9b5d565a02a9f0d0ed19a11127ea9c8c4e70a0e5f7b920701e0665d51_Device=CPU_Config=() +312:conformance/OpImplCheckTest.checkPluginImplementation/Function=Xor_opset1_Device=CPU_Config=() +311:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9ce6a2f4787ef120c486a68cc02bacb95d6cb1c4cdb5e2054275cde409a39803_Device=CPU_Config=() +311:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i64_Shape=static_IR=5f8b64ad8dd9ccd202ae8d5080ce166fe9f47b909e803da49546dbffdfb4ab3d_Device=CPU_Config=() +311:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=a65e17fc28c74df4f3b1bad89635ccfc376a857f2d92ba646ca830b03eafab7c_Device=CPU_Config=() +311:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=a05339454f3f2a599ee9b041f1f01a124bad7d7e5fc1e6d133e00e43d002a086_Device=CPU_Config=() +311:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=ad5c5df7cea37955709ef71d9967828ce3f0011e68aa1c6085984f1422944058_Device=CPU_Config=() +311:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.4_Type=f32_Shape=static_IR=f03721e9c346ede7ba78d0a2466e38cec6d1e08b3395b38c8f47ebcbfba35d3e_Device=CPU_Config=() +311:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=c985b086d155654f9db8470da3af5245c4fbb0139015d049b8b3b20f393c2545_Device=CPU_Config=() +311:conformance/OpImplCheckTest.checkPluginImplementation/Function=Eye_opset9_Device=CPU_Config=() +311:conformance/OpImplCheckTest.checkPluginImplementation/Function=DetectionOutput_opset8_Device=CPU_Config=() +310:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=04e25f2a56de557c8da87110ba02c02ae45277d029964d932fe6837acc0f1b10_Device=CPU_Config=() +310:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=bc90fb9a95a74efb937b6cf808584dd1e91aa6c4d774640b51f4325f0aca6b42_Device=CPU_Config=() +310:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=5995707c0c9656ffe179147e29d03df5a35286481a4140b7ef019434d83aaa61_Device=CPU_Config=() +310:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=static_IR=2a8fce2d85c65eb0e8b40c2923338675276902296daf8744322876552dcd68f7_Device=CPU_Config=() +310:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=61091e18fb5e9eddcab243ec79234ef3b93a5d01d9b2611a3a0e027eed4e4b31_Device=CPU_Config=() +310:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=31ce051edcf02344a693eb2d200fa02b53412a5707faaffc2907cadcf81192f4_Device=CPU_Config=() +310:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d2d4814c8ab7cbe5107a556fb3e73998aafae0278b0d304fa07fc4ac9fad4559_Device=CPU_Config=() +310:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i32_Shape=static_IR=c8ec200fa8fd8ec9c185d9d45ee1380be5e0e4a6f3157e5900401e9fce999553_Device=CPU_Config=() +310:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ec60ac68ad3b748ccd56a7c91b3a2461510f05d66e4b64e12a2069483d8243ae_Device=CPU_Config=() +310:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4a3c7edd1efc847f3d1255738c19cdaa682c9348c0b0bfc466ea9d5749d5eca4_Device=CPU_Config=() +310:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=9d26a7c321db2d87b29b93baeca20dd25357e7777261ea6a4cbf968a203969ea_Device=CPU_Config=() +310:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=00b85178c2e7f891c89e99a6692b94a56ab0882f4a30167997e104db1429a9c9_Device=CPU_Config=() +310:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=dynamic_IR=79cffe28ff617b42488d33b204b0f50bcf4e304c74d2a11820c830e091c6383e_Device=CPU_Config=() +309:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9e21c0af425c90066d92577a0b8aadb6e9fdee50c197b15eea040b89eb715a6a_Device=CPU_Config=() +309:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=2b02493e0e09536d01441e885df61f27f2202a3e16742695bcc4d1d0d892c56d_Device=CPU_Config=() +309:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_Shape=dynamic_IR=7cb8f8f3f3b4335221f85190d4bc29dd28a6b99133ab630a5ee04640af0843a0_Device=CPU_Config=() +309:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=c078bcf5a6a207fd76d9cddc1a35df577529e71ba0a120b28c7ed17bd12673bb_Device=CPU_Config=() +309:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=727e029d6373e823f7500e6bdfd1c07ba87fdb3ba428fd0a089885d7a1e91552_Device=CPU_Config=() +309:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=a1862e486a20c8de71dd94c12a157098ac5f222ba8ba3e1d3edaf9362331e185_Device=CPU_Config=() +309:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=3cef1c65fc41c5f96e90007517fb5c911435e8d8ae7db1a1398ae63c2525d6c3_Device=CPU_Config=() +309:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=61091e18fb5e9eddcab243ec79234ef3b93a5d01d9b2611a3a0e027eed4e4b31_Device=CPU_Config=() +309:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=29a633b64671e28103c44b79ec5c329118c0d7c4f70466ad44482116aa2a3b6c_Device=CPU_Config=() +309:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=78a5e7f340d63660dc0710d0e390dea2d3f68ac98f16e8dbc11b4c28ac0440e0_Device=CPU_Config=() +309:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=bbb0129fbafd6d1874ccef37a1bb60379733012c502d58326dae70f413e387f2_Device=CPU_Config=() +309:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=755b95b2e9c5cb5da4d4cd2c46ced327e10dbfc67a0d934667177b5fab73d431_Device=CPU_Config=() +309:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=e05af92d21ebd869cf6e9554a4aa0bfc60c8b0c64baebee798f0be5a0a01019e_Device=CPU_Config=() +309:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=b8e37f2c8e2574b3f3554578b72e9df771c290c1bb47238fc4de9754c6e6f126_Device=CPU_Config=() +309:conformance_Ceiling/ReadIRTest.Inference/Op=Ceiling.1_Type=f32_Shape=static_IR=fb5c74aa3b17b4a8d5e1603b9179b60bf3f0b8301c74a8fb632b6869896439d6_Device=CPU_Config=() +308:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=dynamic_IR=c838ac42d5464130a9049a63f7020166b34e2ef974c257a4060fa02c3b70ff76_Device=CPU_Config=() +308:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=9c32e47cd885805256c3e3053412f7d8c448762b4b509507f6e4dd78e2aeb56c_Device=CPU_Config=() +308:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=b7a53df966d640f075cea7421ca5989ca91ca638e7af16aff33bc275eb7dfe9c_Device=CPU_Config=() +308:conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=static_IR=99a80c495a8fb4626995167a3ad2efa0efed7696459f6219125414a2bd20dfc5_Device=CPU_Config=() +308:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=570d13e19f312cf288f0f5d651f051c01f0fb65999579c3b06960c2936a18181_Device=CPU_Config=() +308:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=cdf79cced0ed380052910c95b09b4022841474c87d06061f29791ea2ad9813a4_Device=CPU_Config=() +308:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=b6e3f37ddee609d492f47b36b8fe937ee401d01e6d43d7e0b7c06d1a1781b501_Device=CPU_Config=() +308:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=2c5e9a1cd59ec2d5786132697bfcb1519a7857cdfe06038bb39abed39c09e9a2_Device=CPU_Config=() +308:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=12b6ad1cd462f676c9add533f2fb2a5d98698e72fc5d0e6dc984abb27f54475d_Device=CPU_Config=() +308:conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_Shape=static_IR=cf9b80bd696164fc7c8f077eb532fffc455eaf648589c54943cd1b5668e2f077_Device=CPU_Config=() +308:conformance_HSigmoid/ReadIRTest.QueryModel/Op=HSigmoid.5_Type=f32_Shape=static_IR=85df90c3ae7b84d89ec4eae30556ebf4af996c318afa45d90dbb219f73033f31_Device=CPU_Config=() +308:conformance_GRUSequence/ReadIRTest.QueryModel/Op=GRUSequence.5_Type=f32_Shape=static_IR=9f7a30c4f90df2edf8e70468ac22f325bc97e99613fa6ee2aced93e71ea5896a_Device=CPU_Config=() +308:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=bc52d884c8eb9ffc1a5c6af9467b8f285933b715def03c4e5cadf426ba186c3a_Device=CPU_Config=() +307:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=fcab2b4b3bf1a04070e3fd3490e6317f2d6870335d302d96c768f40da8565c8d_Device=CPU_Config=() +307:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=797bfeedb05fe1883757101c44e78eb807ff9c3570aa58b0891172e729d4b384_Device=CPU_Config=() +307:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=5f18fb02adfd683f379dd5a15d38f01cf744e6940754f6a40e2646a1d9c97be8_Device=CPU_Config=() +307:conformance_ScatterElementsUpdate/ReadIRTest.ImportExport/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=5b185120e46fc0a2238ff4de19e278888ecda5fbae130c62e1ec21b4883ee61d_Device=CPU_Config=() +307:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=b6984001a616b3dd3ef4b835b2dc6a48bcaf8882bfde7761b4e141733364f66a_Device=CPU_Config=() +307:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=3ca9994321c7492af9bff158852a484636638e711ae39a6acb66d273f696906e_Device=CPU_Config=() +307:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=41ea59b807081adea7869609c65776a42f88079ec22180807905d5c2e8ca0777_Device=CPU_Config=() +307:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=997a090766babacae10464bab19af5db238eb28704c6d463cfcba48767a90c8b_Device=CPU_Config=() +307:conformance_Einsum/ReadIRTest.Inference/Op=Einsum.7_Type=f32_Shape=static_IR=810f13adb3f7342c7d514bec2aa3f20d7a59527b54c7f6954b038efb194c5ceb_Device=CPU_Config=() +307:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=static_IR=d661093ec9006177e5d47e7f666d7c98353f9c3d5290ba6284145f60822f2573_Device=CPU_Config=() +307:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=c56cf3dc39ed0072f3e5a8cadd1502fef904b32de3b7760ee4c6964c0e505ac9_Device=CPU_Config=() +307:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=8337ad383956ad96ca95f4aeb967e05c694fe586b4ed6e46547e3ffa0217c59b_Device=CPU_Config=() +307:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=f3d84b4cb7f301c6b64c64927dd1e8c20e144671419843ed3d20692f0773445c_Device=CPU_Config=() +307:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=a3e2f08143425d4c6ed46ee301de31c5942694f79af0d297e4d1801e9a6a0ff8_Device=CPU_Config=() +307:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=d673fdf688abaeaf4cc6239ff762f8df557ab445bf9f031ab3bd87782717f2ef_Device=CPU_Config=() +307:conformance/OpImplCheckTest.checkPluginImplementation/Function=Reshape_opset1_Device=CPU_Config=() +306:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d99c03088bad009d9be7f29ec5bad7e3b6c7534fe2649f9670b6f713bf017e7e_Device=CPU_Config=() +306:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=6095afd484c177267854bcab902c3057a2a1bbf37b2188d3a31fd2cec48de2fe_Device=CPU_Config=() +306:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=13dad9a80315de728323f8d84534389c4840a92e74073be42c312c46107fd964_Device=CPU_Config=() +306:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=c219261f655fdb1bcfbcc367ca8f6c4bdf0dc1fbeb7413343a3f0bdd74a70857_Device=CPU_Config=() +306:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=bc90fb9a95a74efb937b6cf808584dd1e91aa6c4d774640b51f4325f0aca6b42_Device=CPU_Config=() +306:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=ba1a7c4cca6d39b8bc7be7d52a0680d055e33a776f4048ecf38335a2ccdd8d51_Device=CPU_Config=() +306:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=d56533ce961113b2ca0baf02f3ff9f8ff210264343f6bebf26418a35ecf36b02_Device=CPU_Config=() +306:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=e3a5a7f1a73793457fae9520ae122c6bbbfa92f1daac0ef214e47a2ec7ea18e2_Device=CPU_Config=() +306:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=49b05f6b6a636d84beca451fdc1fc81e3411a100ea105fbcd49ef72ef1fa0934_Device=CPU_Config=() +306:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=7293f99e38d76387b64632d06503c539c369e1ab78d9388e1af42d7071d8230e_Device=CPU_Config=() +306:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=33297e2649e2f0c53b0bfb5e349d83ede580471764202480855e3f1efc8017a5_Device=CPU_Config=() +305:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=6513dbb80f00e325d6dfc953d1208c5834199f75a60430fc85925ed6eb0d9bb5_Device=CPU_Config=() +305:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7c43bd989494b4ef0f2ca40c3b0c57b471d58b21491456e9588938f702721be0_Device=CPU_Config=() +305:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=f84bcf4f549ca0d6e75c7905f1463fbace4f3b955032fcae627e46e353b2aee9_Device=CPU_Config=() +305:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=99820651f05bae979a287a8644f1b739637d684efad288b48044c2a664e43a3f_Device=CPU_Config=() +305:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=0d62db1843ef7e470a613f9f4d4999ce0e6c94365bd667b78c283cb9406e915d_Device=CPU_Config=() +305:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=dynamic_IR=33e67497d576ce6af4a214d55862646d034effd328ef5beed8d7b0f380b6b689_Device=CPU_Config=() +305:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=0a7b1efc8d314c5e37062e482a9398f718082ba0528c6ca2d2f6c88e7a4a2bb0_Device=CPU_Config=() +305:conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_Shape=static_IR=5b9cbac8797158a77d5616e8b7e5d8132360e23e26d31d845f0d129df7bfd7b5_Device=CPU_Config=() +305:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=fdb744ee1deeced50395d992d949989a5e8bac5d4f73a6d4b51a56f22359f4f1_Device=CPU_Config=() +305:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=57d49137431cc7fe4364cc2fef13111fb9f7a5a908b2d7b6f5663100ba5d636c_Device=CPU_Config=() +305:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i32_Shape=static_IR=c70693ee2f825a40f3e1fc8dd2ce9355690bc33ff27030f674d082a0cb343cc9_Device=CPU_Config=() +305:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=efeea353bf41d0aac1f5400e451346d6cb407610566018f368726328cafca221_Device=CPU_Config=() +304:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a0b3d7813e380f287a758c35e56e8e8edbb72b8c64fab6194a8890dacd5e2f16_Device=CPU_Config=() +304:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=92f5c3aa4427a89ad6ef275c0beb2139cbd0c6ce2eb71205117448adf592ad20_Device=CPU_Config=() +304:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=776ce5493890837f137a7abc7851ff04164468d7c13ef1022f73f1f68e058c1c_Device=CPU_Config=() +304:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=a7242174afe3f7c2e95d31cd14d56ceb0a566e2e8d65ba97e07d004200f4f517_Device=CPU_Config=() +304:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=876a77d1e2efb758a87bce1dd2fe35cd8e455c6f3dd7cd2bed8e10504c426de4_Device=CPU_Config=() +304:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=5beb9762188e985c9554ffb0a05fdc1608fb7d970baacebbbd7118186a324617_Device=CPU_Config=() +304:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d2d4814c8ab7cbe5107a556fb3e73998aafae0278b0d304fa07fc4ac9fad4559_Device=CPU_Config=() +304:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=dynamic_IR=50ebc9636f3321fe9bc87cbfe301c8ca3ea27f56cf429c983ceed6ae63bb3885_Device=CPU_Config=() +303:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=610a8f8c44b0e133d4b5684c37017859d06bb2251482eca0cdece0a1c216b936_Device=CPU_Config=() +303:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=20450a7796284bbdcb011ce027d5c7260ed7dcdf07e4d39e48d99a2162eaae51_Device=CPU_Config=() +303:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=506b15e531d5a643d3276fd84af8e10eb2a62ce20fe3aeda90c50cd7442e0a88_Device=CPU_Config=() +303:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=static_IR=8fc296db9f7dd10289217cb81cdf5991c6b5f3c89369936a94c8ac484702bfa3_Device=CPU_Config=() +303:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=22707f4dd48a39013f543e7eea951a8feb16952bb25f9dd34a0f05dcc28883f6_Device=CPU_Config=() +303:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=00d924b3557896a41b0be32897f7b7293fcc44d79a285e91695a5fd2f29f3b8c_Device=CPU_Config=() +303:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=static_IR=6d5907929d59d1f99e85183238e29d6602c84721d099284dcb8900ae5fc3c45f_Device=CPU_Config=() +303:conformance_Range/ReadIRTest.Inference/Op=Range.4_Type=i32_Shape=static_IR=8d3863956a8a6a5067c45d40ae0207b14b9f1736bdf2a5b8c01979fbc012a5e9_Device=CPU_Config=() +303:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=94b08f3c309048124724d9de0d120698fed90ff0237b07c4a4a2b7ccf843d76a_Device=CPU_Config=() +303:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=b81d993247e604272e6df01b8c4ba016be7f60263c892e8469deef67a8a6afba_Device=CPU_Config=() +303:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=16ccecc11352f2c476db041adea21d67a96e03cf33902b37f4f6855b5113c202_Device=CPU_Config=() +303:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=5d5dd8756ccd01ee77e0c17d26f248c9e35d07aa812dc64bc39ac1ffe17ae585_Device=CPU_Config=() +303:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=0cc60631ab50733ce6b7a2256c0db1f9d9338505ae85b30fee02026c28511383_Device=CPU_Config=() +303:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=281f1852405ad37d0606184e81d8534d769f50b3fe99f5f17ebfda6954f4a584_Device=CPU_Config=() +303:conformance/OpImplCheckTest.checkPluginImplementation/Function=ScaledDotProductAttention_opset13_Device=CPU_Config=() +303:conformance/OpImplCheckTest.checkPluginImplementation/Function=BinaryConvolution_opset1_Device=CPU_Config=() +302:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b2ca18b9d9f9e7c05f66a1f197b65ef9ca1d59319ed5f30d4eadf6f8befcd9bf_Device=CPU_Config=() +302:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=562ad06104aa1fed1781e5e3438d71855e1ee7e0126457f2d8d8d415f9c30c03_Device=CPU_Config=() +302:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=i32_Shape=static_IR=61760c9c95110bf88cbfb8aa09378cc214d4cbbbd6c39c98feec1dcfbb7d47fb_Device=CPU_Config=() +302:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=8368b4f6e208aa4cfbf0aeaa648e9408c281a71d98d15ee09407d26274fb349f_Device=CPU_Config=() +302:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=f32_Shape=static_IR=1366ff72dd5b68a3faf25de8f98e4ac5500663b1aac4941af11532ea2ee769d3_Device=CPU_Config=() +302:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=670a0d513277b4508e8edcddae6361e98fd03c2fff31293637c36f97e59a6b9c_Device=CPU_Config=() +302:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=ba1a7c4cca6d39b8bc7be7d52a0680d055e33a776f4048ecf38335a2ccdd8d51_Device=CPU_Config=() +302:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i32_Shape=static_IR=92dc9b12889f441d7a93e95851a15849139787b0ecc080e70d266fe4cb6dd9c1_Device=CPU_Config=() +302:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=dynamic_IR=c5ff38504273a230addadadf4fef517ef73154c5f9f10ef2ace961b1dc3cb794_Device=CPU_Config=() +302:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=b459cd78b41e36a6c3823301811fd3322a77f802ffc3399eefdfd8ffa4ce6e6c_Device=CPU_Config=() +302:conformance_GRUSequence/ReadIRTest.QueryModel/Op=GRUSequence.5_Type=f32_Shape=static_IR=98a6da6e0972a1b70caa5df788a6921d4e470565dc3880faa59e913fdc15f460_Device=CPU_Config=() +302:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=7b8eedb1c6be0db4a0c041ec3b04498d6dc68b326c35533ae16258e750f21e3f_Device=CPU_Config=() +302:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=f0d5131a073c03932316e3f20f40c527ddabafc926f0d10824a96158c03524b8_Device=CPU_Config=() +302:conformance/OpImplCheckTest.checkPluginImplementation/Function=Tanh_opset1_Device=CPU_Config=() +301:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=445a2c47e85b116d03e5f6fe43863a39778b78ca5175fba1bb0eec669f7610cf_Device=CPU_Config=() +301:conformance_Split/ReadIRTest.QueryModel/Op=Split.1_Type=f32_Shape=static_IR=7f806d6c4a0ff3515dd9a092fee2ab14a5f363fd5fbc7503d64a8cec4bb1cca3_Device=CPU_Config=() +301:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=dynamic_IR=c5ff38504273a230addadadf4fef517ef73154c5f9f10ef2ace961b1dc3cb794_Device=CPU_Config=() +301:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=100761a180c245ecb5f949d8a3ea0d4e26d7bb15d679ab797362f695bff03be9_Device=CPU_Config=() +301:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=fb8064c0836e50254162e2a9cab01514f76b19f78084410b6d1b69bd54f93168_Device=CPU_Config=() +301:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=30f4b90114764377dcd8e010019eefe0ec9c21dc6f0503b52323dfe867a51df5_Device=CPU_Config=() +301:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f069cbce6f4c3276869b6d9c4a6c843d7a1e1c9d299e8680218636b04339a9dc_Device=CPU_Config=() +301:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d0aad85620a1b97486758b17c69043a6a9cf75a459bf6e283b28ca132e917dcb_Device=CPU_Config=() +301:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a7d9ffa60c8d1f330ec303edf6a6c0f8d8e0fe8657c561431bfb91a94c2639e8_Device=CPU_Config=() +301:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=d673fdf688abaeaf4cc6239ff762f8df557ab445bf9f031ab3bd87782717f2ef_Device=CPU_Config=() +301:conformance/OpImplCheckTest.checkPluginImplementation/Function=Pad_opset12_Device=CPU_Config=() +300:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=7340b50308272b86e1b98e6962ee280e9575fc0d7042b9cc076c530268e2ca74_Device=CPU_Config=() +300:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6d705ecceb3a026a9be0b5963705b0c3c6be0123fb7d25885d3ae21213f1716b_Device=CPU_Config=() +300:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=5e31c7022ed7bf2adff14876be4bbf6562afdc2239a08ddcdb507e3d1a20071b_Device=CPU_Config=() +300:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i64_Shape=static_IR=6e53e1fedd57631f3ec70d6825d8d1029ac95905b82b6bef7fd44ba87373e9c6_Device=CPU_Config=() +300:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i64_Shape=static_IR=d4acbcb1930b26610eaa33c0bb8aa7fd866d8142afda9fd007226f0ee6fa5c36_Device=CPU_Config=() +300:conformance_TopK/ReadIRTest.QueryModel/Op=TopK.3_Type=f32_Shape=static_IR=a56b3f758c88a5723e4a2cf04ce46c92681ed7fb0d6dd7f4d5b937dbf00b0eff_Device=CPU_Config=() +300:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=i64_Shape=static_IR=7b9883414482f3b1108e549a9c47bb8a8aa162d962813c7e99411d000e02690e_Device=CPU_Config=() +300:conformance_Split/ReadIRTest.Inference/Op=Split.1_Type=i64_Shape=static_IR=68115f3a18f8ea201078166547e9c2a8587a5bb37646adf6f90da976f7298386_Device=CPU_Config=() +300:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=7fb9c2cdb4c82a4b65d110fc84c03948917cc1921c372cc645cab00a3377fad8_Device=CPU_Config=() +300:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i32_Shape=static_IR=7988ae4f263061e530c61f5987afd5e7f1945ecef9fcded2bc9799afdcec0df6_Device=CPU_Config=() +300:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=static_IR=33d84638f606d759354e190991899e47d2f4c63b0e378aac985e5fb9132dcd01_Device=CPU_Config=() +300:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=static_IR=ae7b6a45a538bb7f65d5895f2f7941fd9048645482faa40adb1f773e282a946c_Device=CPU_Config=() +300:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=7b8eedb1c6be0db4a0c041ec3b04498d6dc68b326c35533ae16258e750f21e3f_Device=CPU_Config=() +300:conformance/OpImplCheckTest.checkPluginImplementation/Function=Gelu_opset2_Device=CPU_Config=() +300:conformance/OpImplCheckTest.checkPluginImplementation/Function=EmbeddingBagPackedSum_opset3_Device=CPU_Config=() +300:conformance/OpImplCheckTest.checkPluginImplementation/Function=DeformablePSROIPooling_opset1_Device=CPU_Config=() +299:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=08fa156c3f25fc8836356fd1a8edb73222f9fe2b3476c0ae32a26636b5870247_Device=CPU_Config=() +299:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=cc5e06594accd8694073f3ebe702319fe0711c3b7d4db5e06072d83eeb7cb096_Device=CPU_Config=() +299:conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=fdfd59e3d316eea2f9fc3c56664cf1a07603bb6e26d1b367987d5046526ac60e_Device=CPU_Config=() +299:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=136768c6c28210cc47eacf6667103eac8106e3f255618e067d351cb700e62cbf_Device=CPU_Config=() +299:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=644274eaea5cff1fa9976380a2c024a8510f88826d0c1a6036aea3b18e3ecd8e_Device=CPU_Config=() +299:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=dda9a81656cbcb4ab5484fea52e7172baf67d46babce886726c96eaa1980766d_Device=CPU_Config=() +299:conformance_ScatterNDUpdate/ReadIRTest.Inference/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=d42cb628111ca80a33a558dcd1c2c310aa7b95d6c48549075291f49ec59c302d_Device=CPU_Config=() +299:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=05e89f7690a9c7d235c753aa4af28229a44fab527f44ff4832ebcebf0c9debfe_Device=CPU_Config=() +299:conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_Shape=static_IR=e177da00e93cb595c18d142e92898135415f0de01a3b1ea763f3ffef3d7ce96b_Device=CPU_Config=() +299:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=f31f6d969e04a7a1c964c02f107a7291c85067ac31d935921bc418363c2a7a46_Device=CPU_Config=() +299:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=c4d1a1fdd0a336620be37a8ce7578ca0dd0c74f89fdb32ee86e7004792aa8445_Device=CPU_Config=() +299:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=491b849a7ce8fdb2190df5415fe037ff02fc23814efc520c343e872f539d6e55_Device=CPU_Config=() +299:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=58b9cf97229bd8293e747a47979c3d98261275f9da473dc942b746a06a1fa214_Device=CPU_Config=() +299:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=922699707423c4110bf8a551eaf7dc3689fd3673fff79cca21442cda90c22dda_Device=CPU_Config=() +299:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=55d83e2240e88295a78084f92162888c9b0beef46ae468cd7ab93a1c0a432835_Device=CPU_Config=() +299:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=3fbff9f870428a19ed434cdf72834eec251edc3dddd149491c94319d63a8438e_Device=CPU_Config=() +298:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=18763287c1afb7684d3f74e91fbb8a8c17a13aa52908a5d97b6ad220c5c4f633_Device=CPU_Config=() +298:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=d51bc4204bb6079e79da8d0cf95ab8a3454c90a040aee0fc6fedb00f0795c577_Device=CPU_Config=() +298:conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_Shape=dynamic_IR=6c91ebbae26ffbeec9778f2db476ad7ecb6eca6710cba24a86d3a2a262f68e43_Device=CPU_Config=() +298:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=dynamic_IR=3e669c3f90fc7b2209d3d588932f8eff3827309a5928f4b27722139964e2c46f_Device=CPU_Config=() +298:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=dynamic_IR=debf36fea706c02dc67354edf761f0dc931ebcccbed285f186164fc4b9532766_Device=CPU_Config=() +298:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=05e9fdd5183bd179e5ef996ebcdc53f239900ca46a8122ee8bb1e885c2c091ce_Device=CPU_Config=() +298:conformance_ReduceMin/ReadIRTest.ImportExport/Op=ReduceMin.1_Type=f32_Shape=static_IR=61bca82940fd4a54bcb587a88272b81c191b8feeab37bfafa044ef768240977c_Device=CPU_Config=() +298:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=cf02be750ce25545f7bfd694603192667eb3fdb07a186eaa7f3ecf5767547651_Device=CPU_Config=() +298:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=aeabe9639d6dcd5ab6e09f9905ffa8bdfe7cafcc7f5c8598e20e4ff39bdb50a6_Device=CPU_Config=() +298:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=246f55d43a6e986a8ba35f711c43dd32cfb1ca097598b0a01690d4765e0d5019_Device=CPU_Config=() +298:conformance/OpImplCheckTest.checkPluginImplementation/Function=Round_opset5_Device=CPU_Config=() +297:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=90deb33b54746ec16cf8594f8aa0792c6aab2e27ff12ed97523da583402aad95_Device=CPU_Config=() +297:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=37f1a0a9bb9b948ed78217a65a5a2de7f0234b1e000fe5ee11ede68767240f1b_Device=CPU_Config=() +297:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i32_Shape=static_IR=fe8bea06bc602fce2362c5c7671e3c8cfc63fee6bace0be9baa41e9874e86b26_Device=CPU_Config=() +297:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=f32_Shape=static_IR=adee3d4d6728b17fb5ab17a9915c5b7c8808f949ad358e8a16a0bb12dad7c958_Device=CPU_Config=() +297:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=f7e1aae2dbc817ca8f64a6bb0742e476055c239cc6e31a4233b7580205feeb41_Device=CPU_Config=() +297:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6c1aeced5aaaecd99f3917a0f38e01902dbe81614ae4dc9a99fc09a379990abc_Device=CPU_Config=() +297:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=0a5f9fad12bf5e2592c6f720232bb38d94a5fb9ac1fdc5a8f7d474ed9e9d2504_Device=CPU_Config=() +297:conformance_LRN/ReadIRTest.QueryModel/Op=LRN.1_Type=f32_Shape=static_IR=c1a0f6661ad306b82e66063988835c1a17072608792f3423bb058fe38c4b14d1_Device=CPU_Config=() +297:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f4b78bee713f23abfda124ca92d58828eeab6118710d93572a491cfd85cd05b4_Device=CPU_Config=() +297:conformance/OpImplCheckTest.checkPluginImplementation/Function=ConvolutionBackpropData_opset1_Device=CPU_Config=() +296:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8f622d39d560109549e99d37f3c9cb476f4d69e8525e7a0ad8fce6fe79a6f982_Device=CPU_Config=() +296:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=526afcc4dff58aaa019466b0440b94dbd2d5f14c060d47b8ec40183deafecd83_Device=CPU_Config=() +296:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4c794e0e6b27bbef5d21922537d8b23d0d2b5955622c1f5ee724a4d8faf2c86b_Device=CPU_Config=() +296:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=461dc8aa282946831fdc86d1c024a273ac0f29f5ad615cd55b879feea6d23007_Device=CPU_Config=() +296:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2c20f6aace24bf601953b848c173ad475502b91b667c903638acf41fb9a67d3a_Device=CPU_Config=() +296:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=04e25f2a56de557c8da87110ba02c02ae45277d029964d932fe6837acc0f1b10_Device=CPU_Config=() +296:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=e5092af5c0f683044b1df5a45f211f4a692436d1112181a5d613bbf335941684_Device=CPU_Config=() +296:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=b288dfcaf8fd8fefe24212a70255bb280e7e695badf6fad6538042701d77073e_Device=CPU_Config=() +296:conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=i64_Shape=static_IR=6590ae34a784f81de25c016454fcc919ae1f9eab672c78c9da0daf83dcdaf1bc_Device=CPU_Config=() +296:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=2b927ee73187f1c1cbdb071ad3c0a72c9eb8a8631f2e7c6c3a8f8482c301fcf3_Device=CPU_Config=() +296:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=53108cff3836c47360380f3898c5de245a566a5d98040820d78befd46e56955b_Device=CPU_Config=() +296:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=46b077d7466eecbadbb7ceba5ed90724db3d9e216d22171f5dee02e44b9a5377_Device=CPU_Config=() +296:conformance_LogSoftmax/ReadIRTest.ImportExport/Op=LogSoftmax.5_Type=f32_Shape=dynamic_IR=a3f02c85607891ecc34c484b433c6a78333e13f3d8cd231e651f8bec26e7d0ce_Device=CPU_Config=() +296:conformance_Less/ReadIRTest.Inference/Op=Less.1_Type=boolean_Shape=static_IR=8cac1c4c51c2eb61b9ec75320814acf81b9ac240a88e1cc68f29541f6eb546e7_Device=CPU_Config=() +296:conformance_LSTMSequence/ReadIRTest.QueryModel/Op=LSTMSequence.5_Type=f32_Shape=static_IR=f36a3f626860d7088b33d97a5a6ce009c89609c142158b256aeb6b5e6dac02d0_Device=CPU_Config=() +296:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=246f55d43a6e986a8ba35f711c43dd32cfb1ca097598b0a01690d4765e0d5019_Device=CPU_Config=() +296:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=dynamic_IR=e46ec3487f18188d1da4c029a2981033018c1f8f273f60d3f7d1bcbdae18c2c5_Device=CPU_Config=() +296:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_Shape=dynamic_IR=2d924ba2d56e6b5c7423c6d622e7bd250ab275e0a0ab4745e232046a3223ce7d_Device=CPU_Config=() +296:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i64_Shape=static_IR=5b466c4e4b53a5ea739df517da47f0764f9e31197b7d30fd9dabf17d1b33a489_Device=CPU_Config=() +296:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=d435aa8d2d045d69b2d187147f90c879205f27346ac991765ba97bd47d4fe0f6_Device=CPU_Config=() +295:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=df97761393479b4c56cc923a2b89888b7c3fb949f5c3a93f4bba0ac8a44178aa_Device=CPU_Config=() +295:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=f66bbeb796e4da5d462ef573e38fe52db5bdaf2367b2a07aeedae6ce33c6704f_Device=CPU_Config=() +295:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i32_Shape=dynamic_IR=9ec266d6550d7e0c9f4d6114272d7afc80ad822b0bf5078654598b3d623f356b_Device=CPU_Config=() +295:conformance_Select/ReadIRTest.QueryModel/Op=Select.1_Type=i64_Shape=static_IR=d106f0cba8d8311b75f6074c099f45e10400c0829fdd1826292b1310471076cb_Device=CPU_Config=() +295:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=109afa0426a29179db58e16917b829096af105f0def2375a589ea1391138ee2f_Device=CPU_Config=() +295:conformance_NotEqual/ReadIRTest.ImportExport/Op=NotEqual.1_Type=boolean_Shape=static_IR=8fe4bce2e674753d81a1516280769a06cdde538e658ae548087e4888ffa2905f_Device=CPU_Config=() +295:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=10cf1b7a4de1231ad721c9660697d6ee17bcaa2151f08eef596b41e6e3aa1b2f_Device=CPU_Config=() +295:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_Shape=static_IR=3c200607c5e2b90b5d75a439011d83643ba042c276c3033f58b3409c068faf8a_Device=CPU_Config=() +295:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=6e6c053ee1974a5d036c6d549508f6d43586d501c72db05df9930639ad745bc4_Device=CPU_Config=() +294:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=45b3506bf3dbe053fcb290dd1040a9d125c56086b37223e8480647bdd9b9372d_Device=CPU_Config=() +294:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=c9352ef8b6aae01025051f9c73f023e7b5a13f8987f81bfff4ce0ff9725c21b5_Device=CPU_Config=() +294:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=f9e738e5e947a25c9a0d18fe47597f10526e8a74e9d72b35fd848b73f4c80b0f_Device=CPU_Config=() +294:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=a16b5a0ea2fc8d89980db21cab743fbf776918ed2ed1f91f2e4d3ad3c304d4a4_Device=CPU_Config=() +294:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=static_IR=bec81407211db6e10d7c8811bc58b53c23c8aafa0e2083f262204f345b9bcfc6_Device=CPU_Config=() +294:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_Shape=static_IR=def60f5f3fb7a0d22cb3d23253e7c8e502aa9dd2d3756c54dd4343b66c2682ca_Device=CPU_Config=() +294:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=6dae5ccb2325826167ff4ec57e51280b4e125801e6405a33f4d95fd9ab9f3fc5_Device=CPU_Config=() +294:conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_Shape=static_IR=5150e1785d97b052a42873f9e9d23a511027248ff4b13ba7c269c8c3d4639e45_Device=CPU_Config=() +294:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=f03721e9c346ede7ba78d0a2466e38cec6d1e08b3395b38c8f47ebcbfba35d3e_Device=CPU_Config=() +294:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=9033954b258cdfa9fa858317ee4588b8c92cc946d7eb305bf130d3ca8ee0f1fe_Device=CPU_Config=() +294:conformance_Einsum/ReadIRTest.Inference/Op=Einsum.7_Type=f32_Shape=static_IR=f3d704d4f0da6c58c39e279d727dd82fe0e59a41dbaf09a3cbaa8f591daf95f7_Device=CPU_Config=() +294:conformance/OpImplCheckTest.checkPluginImplementation/Function=Cos_opset1_Device=CPU_Config=() +293:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ba28829f211d64d6d4922682b85f1bad6a3c28cc30b4f9651186b1e8fab39fec_Device=CPU_Config=() +293:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=14f15558b2c7699f7877a9e04e1e0e7d2a2d7e1307aaca519a98ea5f39afc415_Device=CPU_Config=() +293:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=43d0f2c259414c3e23105e2f5a13e8faaf322904d9b70ceb8a056bdb51677ef6_Device=CPU_Config=() +293:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=72373e9c2bc4cdf2f0aa0a5d14e30ed1a5e0545d9a96f4ab675f3b9dc69d8cf4_Device=CPU_Config=() +293:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i32_Shape=static_IR=592176a8c97f4d759a0c6b3ef56c3610df4a0df4743f3be7ba3ed2ffb5dcfaed_Device=CPU_Config=() +293:conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=static_IR=377acd11b0f7dfb4f3e57baec8a6c8a84737857b7e794614542f139982feaf73_Device=CPU_Config=() +293:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=static_IR=d50dd7c19583071293118e8b98f2bc749ef3e34ab8eb0149138e6b9fe49a153c_Device=CPU_Config=() +293:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=6837cea94eff6256c3c29807532662e123ccbffde1fcb6f75875d65aa7124a4b_Device=CPU_Config=() +293:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=ad5c5df7cea37955709ef71d9967828ce3f0011e68aa1c6085984f1422944058_Device=CPU_Config=() +293:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=f32_Shape=static_IR=424814fbe4a3ba7a49c506f11509c035212fbdf4ef44fb2bc708c5f201e4e1ec_Device=CPU_Config=() +293:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=b11ede8f1aee40577413d8bbe89704e02252e3f02805fcc0ded624857ddb8280_Device=CPU_Config=() +293:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=a815b68b6a8d36546d3ac0112c60283bd69ae1059e8deeb98b21f538c8089beb_Device=CPU_Config=() +293:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=d442b2d9df68f25f567a3e8da8d87866c200d391624cf1c339554a57a9a527a4_Device=CPU_Config=() +293:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=f8c3c9cefc3f7ab9b8e1fd3031be6eb34eba46f9c493b316439c24355a8a4978_Device=CPU_Config=() +293:conformance/OpImplCheckTest.checkPluginImplementation/Function=Selu_opset1_Device=CPU_Config=() +292:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=12e571ef61251520c35bd8c0429b1ee71277033ae88101f08dd769a300d86c5c_Device=CPU_Config=() +292:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=1e9f662cfa263a98c546e69de318268918914f2ddd0ee87cba23c2690a81ec19_Device=CPU_Config=() +292:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=15995a372d69998eb6a001f53486201fa9bbc89fb608c7d2a447203a404713ea_Device=CPU_Config=() +292:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i32_Shape=static_IR=db87efd37ce8dcbe14286197df3b7a345fdc46ccc03d7d8bda17e3791df332aa_Device=CPU_Config=() +292:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=9031b1919c35a9df591ff64fbe4748c02cc837649899099542716f35b5c68cc5_Device=CPU_Config=() +292:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i32_Shape=dynamic_IR=7f37f31081476643f5c279fddc3d25eae22d909730b4aca0211aa70fdd572843_Device=CPU_Config=() +292:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=431db89311a543581d104e2a2c498fe021da2e4026323817834670bf5bee67a2_Device=CPU_Config=() +292:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=ba4f511cc4a0870c64cc5027fa39b2bf91a6e7f39ea36cd43a693eb59de6d836_Device=CPU_Config=() +292:conformance_NonZero/ReadIRTest.Inference/Op=NonZero.3_Type=i64_Shape=dynamic_IR=7d0265450b8fc92464273ac05d685952ea3877be45b4d745959f2f373fef1431_Device=CPU_Config=() +292:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=fb8064c0836e50254162e2a9cab01514f76b19f78084410b6d1b69bd54f93168_Device=CPU_Config=() +292:conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=98526403db7eb1f67a41aed2c34fea684d99d8cb8225313136e55be7d326aaaa_Device=CPU_Config=() +292:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2fc01b66086ac5d8272dd81ab731188b62bbe8920bff1efe61bf3261a3a8b3e6_Device=CPU_Config=() +292:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=6af32fc288bcbd414ea63525c4345aeda74ab21c44aab5910f85b8b7fb5d1179_Device=CPU_Config=() +292:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ff39aa885f7ecc22a06f668b79fef4ac41b3adf8dea82f428711b241c0fa6059_Device=CPU_Config=() +292:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=c5c5d09465cec7f1477d5e02f3f1c4cf593c71aa090532c4e43451fedde7c2c5_Device=CPU_Config=() +292:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=1988b645a87be14c17740085aa8c4a38e88cd2111f0ba294f77ed0bf856b0561_Device=CPU_Config=() +292:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=bd3ed1b35506cb92c8e587acb102c70abbe02bdaa75f76e5792d48d8e1f2f33f_Device=CPU_Config=() +292:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=c202ffc0e1805a36e48ee4b06d06b68a9f179eef00dc353a092a13818e8ebbe9_Device=CPU_Config=() +292:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=bd99ad9e1d756435cca9c6309caf45043f34c6c3c844f60e17deb8dfef4234f4_Device=CPU_Config=() +292:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=87523dfccb2a9c8334d6810e33c2a2d3b6bc09db7623e7ae93ba4cea89b66a06_Device=CPU_Config=() +292:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i64_Shape=static_IR=489201dc4d1a937b4387f1b7d01f75fa42ff02d7035d39ac6a7f56536b0d3a20_Device=CPU_Config=() +292:conformance/OpImplCheckTest.checkPluginImplementation/Function=IRDFT_opset9_Device=CPU_Config=() +291:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=37337436d0d481c689caabec3bbc8f21ecec65560c70de4dd1f5b0ed9e444bf9_Device=CPU_Config=() +291:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=fb8283ecd8934dfc5340a41e9889a0a760b39869e4873efed4ef85606c162ce7_Device=CPU_Config=() +291:conformance_TopK/ReadIRTest.QueryModel/Op=TopK.11_Type=f32_Shape=dynamic_IR=6c91ebbae26ffbeec9778f2db476ad7ecb6eca6710cba24a86d3a2a262f68e43_Device=CPU_Config=() +291:conformance_Tile/ReadIRTest.QueryModel/Op=Tile.1_Type=f32_Shape=static_IR=9f4964a8b6440cdec94781121b408df16f0ef2283b0355583eb934b3cd2bcb66_Device=CPU_Config=() +291:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=b288dfcaf8fd8fefe24212a70255bb280e7e695badf6fad6538042701d77073e_Device=CPU_Config=() +291:conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=f32_Shape=dynamic_IR=f550a37ab884668f47ed232e7119c2a2baa814c98fbbcfa3129e7a00feebde0b_Device=CPU_Config=() +291:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=56fb4fb30ec6fd9ddd0ff2e394434eb87546ac7de273f47b663252efa2a380be_Device=CPU_Config=() +291:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=0db5765bcfeb7716699abd0cee850918cf5ef18e2cfdf1614b463734ca35a20f_Device=CPU_Config=() +291:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=dynamic_IR=923032e47821636c4c8098a7a9afa97b331a47d47357c780b7bced2e46ea9921_Device=CPU_Config=() +291:conformance_LogicalNot/ReadIRTest.Inference/Op=LogicalNot.1_Type=boolean_Shape=static_IR=66b8769b499fa31cfd7545411d16a17b04e1a336bb63a7e907707cd170a30fc9_Device=CPU_Config=() +291:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=474e4bfe52232239280bbe4e2d2aed15cf69c7ec8db86b010084c6e68a8d0e1d_Device=CPU_Config=() +291:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=596d0b6cfe8b39e0ceaa665f1fa82aeeeff78d09315fca7cef031b6dc210a1f3_Device=CPU_Config=() +291:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=03ebf297344daffba82d04292a767fcd7c959f56788ede32ff0d7c5af06ea504_Device=CPU_Config=() +291:conformance/OpImplCheckTest.checkPluginImplementation/Function=RegionYolo_opset1_Device=CPU_Config=() +291:conformance/OpImplCheckTest.checkPluginImplementation/Function=NonMaxSuppression_opset1_Device=CPU_Config=() +291:conformance/OpImplCheckTest.checkPluginImplementation/Function=NV12toRGB_opset8_Device=CPU_Config=() +291:conformance/OpImplCheckTest.checkPluginImplementation/Function=FakeConvert_opset13_Device=CPU_Config=() +290:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f0af28fe49c157f5f62f72f0ab209c50aa07d97c65477217fde6e3a3d0dc98ef_Device=CPU_Config=() +290:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=bf802641cd9b20a23b73202f401f4b32903ac7083d0ac7026098cfb4311b35c5_Device=CPU_Config=() +290:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7895fea00309326a052d47dbd2f9e562b86bb9d0501f2a2fd8843a0340359b67_Device=CPU_Config=() +290:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=71010d034cbc059af32ae6066fff1f27834db480e76042d1ef7bd1e7bc426a08_Device=CPU_Config=() +290:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=66abbc2c605a0f866880bd4730865ae6b5401a1f4beb242f346bf6f2f8138eb6_Device=CPU_Config=() +290:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=c90b6f528b750f144ddd29be0059c202d46b3bac799c0d70893f2f4f9f05f64c_Device=CPU_Config=() +290:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=boolean_Shape=static_IR=6d34694c9c8e71415be894a80a8ededc6a83657c6e7ce3aaf66dcd6f9ab99226_Device=CPU_Config=() +290:conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=static_IR=707190f77491e910ce61476cd3a9d5dc275e7c5ad820cd3894a37c348c2995ff_Device=CPU_Config=() +290:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=a50bcc7d92264c02627cb62bd0cac349b895311cef54b60a957a6366619e82f3_Device=CPU_Config=() +290:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=cda3b9bda63d065b5c27e6bce5ffe20968024d77efe5e174a9f4395db56a30c0_Device=CPU_Config=() +290:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=dynamic_IR=e306da3fedc4369302fb21159f2bbbe65849661eabe5bb83efdad3e83f64fd68_Device=CPU_Config=() +290:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=cc13aaec2a2bbe9b760651d358622114b4b0a20cb106472bd8519f0fade61dcd_Device=CPU_Config=() +290:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=9b9521ed73100b54a3c5920107db944380157eea1b72f4e4d94f8e2ced1f2e4f_Device=CPU_Config=() +290:conformance/OpImplCheckTest.checkPluginImplementation/Function=ROIAlign_opset9_Device=CPU_Config=() +289:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=40c74727a381659b1343c4083d7f903ac2519d5297703fd15979a32f820adfcb_Device=CPU_Config=() +289:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=dynamic_IR=3e669c3f90fc7b2209d3d588932f8eff3827309a5928f4b27722139964e2c46f_Device=CPU_Config=() +289:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=c7ce41820be10f17c8d48c005703d536d18e4f49b1d2022ac58f77b7b9afadec_Device=CPU_Config=() +289:conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_Shape=static_IR=baa256d53878b528f6bdba95bf1837cc570dd83b577220f95d9c24cb26d37c35_Device=CPU_Config=() +289:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=4212a754718adff414309fb1da18c4361792b5478366bfdc0994490c7bc716e3_Device=CPU_Config=() +289:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=43c8e8300f01242788a8cfdc37b48779f51f7ee7aef5b28e8de542320ba86e4e_Device=CPU_Config=() +289:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=32537f045cce3d13cb28dd292a0ebe06e13002877d9ed2e5b25d3ebdf5afcb58_Device=CPU_Config=() +289:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=static_IR=3c200607c5e2b90b5d75a439011d83643ba042c276c3033f58b3409c068faf8a_Device=CPU_Config=() +289:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=64bd2f48b3326db083653b5993c9a75d21be515cbc5af67c62c981e9744e2f0b_Device=CPU_Config=() +289:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i64_Shape=static_IR=2001ebb8291c8bc8cd1db17c172f216cfb3994c57e344eef65565ea9f9cda1d7_Device=CPU_Config=() +289:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=f8c3c9cefc3f7ab9b8e1fd3031be6eb34eba46f9c493b316439c24355a8a4978_Device=CPU_Config=() +289:conformance/OpImplCheckTest.checkPluginImplementation/Function=Convolution_opset1_Device=CPU_Config=() +288:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=4d2e12e00779d116e2192ca77f2be233d76bdd5ce366ddabcf436cc205a9f811_Device=CPU_Config=() +288:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=e2da6d928938b6445170cd69fd4a7aab40130a560cef3ffa2d268a428f56fcec_Device=CPU_Config=() +288:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c6b8f476c9b5cf1a102cb33d5e68033bb074a520d01e360ff46b3e479addf407_Device=CPU_Config=() +288:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9730f247ba4a13fb03274850f295de500156107d33db957188846fe49c2f4566_Device=CPU_Config=() +288:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=978c6fe274296020718998393e7fe94bbe0a0856fc377aa474df0454534824a6_Device=CPU_Config=() +288:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=63ba45deb14e56e09574bd3694e3d94caf6ab09f67f5278e6c299c6c924a3cf2_Device=CPU_Config=() +288:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i32_Shape=static_IR=61760c9c95110bf88cbfb8aa09378cc214d4cbbbd6c39c98feec1dcfbb7d47fb_Device=CPU_Config=() +288:conformance_Sigmoid/ReadIRTest.Inference/Op=Sigmoid.1_Type=f32_Shape=static_IR=936ac30f388261cb12776b5e94062a9b5f7b81aa16c9aa5d8f994b8d69231c40_Device=CPU_Config=() +288:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=6167830634e0b253aa78e883453d45bb737cd5df33c849e4b16b99164fd49d5e_Device=CPU_Config=() +288:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=8ea778d7d98fd08efe4b2efa501ef3599df00ca9bd036980ce86e0d6dc454b96_Device=CPU_Config=() +288:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=a58fb7847e59bb119656b143af0c6f65e29f8211034fe7aab03666cdb95d7fe1_Device=CPU_Config=() +288:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=360205b273a323d2cea16c9ac98847c904ed6cabb2412d3b49c27fd2eec52ab1_Device=CPU_Config=() +288:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=4819c2459dd2bf875545cc912152c6751ed5db8ef07aba31d3eae6c3dedc7aca_Device=CPU_Config=() +288:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=120b0e6b0c1f7bda754d62ac7c88e7c8bd9e96ddb85e7e5f29decdaa7c1cde96_Device=CPU_Config=() +288:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=023f3573ef77fb592345c68ee5e6a79191b120f9cb68f81194381da2cf68f21a_Device=CPU_Config=() +288:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=66cff13148d62809cced5a381c251525486476f7178eddd3c8e45eeed40afd06_Device=CPU_Config=() +288:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=29624e785b9377dbf03b9aae46e7d0049e93a94655059ec37a0fe308ff7cb9a3_Device=CPU_Config=() +287:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=53828d433bfa231cac709949db0e4ff72010e5cf9df167ecda7ac72bd5a69e10_Device=CPU_Config=() +287:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=c20603ac895226554bc910680f6be82589e053503b3067b3074bcc210f4d0ef2_Device=CPU_Config=() +287:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=582f7347a93cb2c9e51ade6c405ff25b23d009bdcd3d7a3c49902e627a041252_Device=CPU_Config=() +287:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=19f9f95d2205816fc002d8eaea7cfb19f19218fbc3528e4932b99f1486b62827_Device=CPU_Config=() +287:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=f32_Shape=static_IR=1e5127a9c21ad1ccabe67dd1f1e28a3730c09ba294ef1f9fc001c6dcd723ec62_Device=CPU_Config=() +287:conformance_ScatterUpdate/ReadIRTest.QueryModel/Op=ScatterUpdate.3_Type=i64_Shape=static_IR=f5ad439e65ed1e090d3d5744e9e5bcd9b8fed6ac6a191735cbb1cdd9af8bccf4_Device=CPU_Config=() +287:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=7744b2351d112ed761ebe0f43945f7dfd58fd2bfbd94bc5a4737549923caf4ed_Device=CPU_Config=() +287:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=18697d690af0de3ff8365a5aafa6ebc7d8e14418c3ab5dd55b3b505d2445ac86_Device=CPU_Config=() +287:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=b6669eb568f36e5d649ae67afdecaa481064561d7a71f1aab592968aca7d8bb0_Device=CPU_Config=() +287:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=dynamic_IR=fbb85f74ecfa0ffc50b9e6ce637911b406f1fd6ad054a886b9c6ddc6bc898739_Device=CPU_Config=() +287:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.12_Type=f32_Shape=static_IR=8919e05ab2b0d545cabc2e2732828fa693c8f364e9d4d03faf7097f787d4f628_Device=CPU_Config=() +287:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=0df360e6d6391827cd65ceefd3201c263a829e50ed375522d4e8700f3879de09_Device=CPU_Config=() +287:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=30680a7972de02e47d59c768730b8a64a06b011dc8b5be4fd25f190662cf1c1d_Device=CPU_Config=() +287:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=d81ef130a76622c79592b0b42acf5cd6dd357ccec28958dec6eb02a654beb9ab_Device=CPU_Config=() +287:conformance/OpImplCheckTest.checkPluginImplementation/Function=Atan_opset1_Device=CPU_Config=() +286:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e6aa73efa73e8b557d46457037aea3d6ba037b67ac1b52437354c2823abf2be8_Device=CPU_Config=() +286:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=37a75b89894d8a024fe6d1808e0674b4fb59534cd319f4bcd07c6d9caaaf97a5_Device=CPU_Config=() +286:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=fd10c1c5d33aef77d3428fb5c9789f3c2c2463ab9f6cb51184ad37951578320a_Device=CPU_Config=() +286:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a5e5b588f6223da1508413c42c21c3945994f492b039511b7ba2e576a052a52a_Device=CPU_Config=() +286:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=6a05cd292e71af9d96e456cbc515097d5224a9e41cd9c3d48cc73f1a4e6e2164_Device=CPU_Config=() +286:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=13dad9a80315de728323f8d84534389c4840a92e74073be42c312c46107fd964_Device=CPU_Config=() +286:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i64_Shape=static_IR=fe80951a0a44625457a6106d8613c9813c9c0b8fe3606fa5ac1c064217c8a0e6_Device=CPU_Config=() +286:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=20c2030cdd180dbbfad1e5b8a4f865d1757a9d427c3d5ff21651a429369f4341_Device=CPU_Config=() +286:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=1ab723c2a389a999b3b01158b82719358d802c6d62767d6dcd91b5d7fe5531fe_Device=CPU_Config=() +286:conformance_Less/ReadIRTest.ImportExport/Op=Less.1_Type=boolean_Shape=static_IR=953b15e350d9a27c4d048cbae41a278c732f3b3a6e8debd7fd2e75e99a015966_Device=CPU_Config=() +286:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e1f0357795d5676c5e4a38b6639cc90c924880ab961eb73e407b5ad0142ac0b4_Device=CPU_Config=() +286:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=3d5c8f5c1545419050f982e8555a6ef9e5dcc06545b1a8573d710e8bc2375a6b_Device=CPU_Config=() +286:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i32_Shape=static_IR=0add7fb1bf1500ea125aa6d245bad577d6dea18d038c020d18c2dcd56704c671_Device=CPU_Config=() +286:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=3fec5c6f9e39d8a15d58c5800a889e1660adb375cb7660af1526cd31e69f7cdc_Device=CPU_Config=() +286:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=fced0ff647e4ea9a4b1673016b017f68ed75cdc778cad156dbd6cc379bb815f9_Device=CPU_Config=() +286:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=dynamic_IR=c3d754fe46cacaaf519f39fdc6feb9df6b23d92f6271f6e731c2a8ddc24a948e_Device=CPU_Config=() +286:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=eed21ac7c17920ba437332691e231037113367ee3d256b4db1380c8d2e0db84f_Device=CPU_Config=() +285:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=14c8a8bb712c40d63edf76de9a75dd1dcd53a2df8c6098c80ee760119966f364_Device=CPU_Config=() +285:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=c20603ac895226554bc910680f6be82589e053503b3067b3074bcc210f4d0ef2_Device=CPU_Config=() +285:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=bbf75e5f7aa9f20f890a8eb204ddb5f159ca5eae0616fb99ee0b5169b165d595_Device=CPU_Config=() +285:conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=static_IR=dbc3b2f724614a68d750ae4adfd7d8239c77ced05d30f89deabe272f104a5e75_Device=CPU_Config=() +285:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=f32_Shape=dynamic_IR=fc75aba0dd172d6628de0b473569c672b52f070ac3c446cc3342cb1184ef076a_Device=CPU_Config=() +285:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=dda9a81656cbcb4ab5484fea52e7172baf67d46babce886726c96eaa1980766d_Device=CPU_Config=() +285:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=18697d690af0de3ff8365a5aafa6ebc7d8e14418c3ab5dd55b3b505d2445ac86_Device=CPU_Config=() +285:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=dynamic_IR=5038017e90f931327d5159938d422b2afc229aa4d776a4ac80a946724fee357d_Device=CPU_Config=() +285:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=065b3de2617f318d1376e9610f9fa1a2f2fc04292f9a7cc949780ae41d3539b4_Device=CPU_Config=() +285:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=58b9cf97229bd8293e747a47979c3d98261275f9da473dc942b746a06a1fa214_Device=CPU_Config=() +285:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=e4388b1379e224ea4849e6052827ef17b490cab3718159195ea2b2986719bb4a_Device=CPU_Config=() +285:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=269ec3789c76e21789e01e31f13f0f1a4895905b3f131e710e663ed2a0d8f632_Device=CPU_Config=() +285:conformance/OpImplCheckTest.checkPluginImplementation/Function=Abs_opset1_Device=CPU_Config=() +284:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=92ed2f40e1ecbb9a90904cfe8e8ceda94f73154a44ac28a50c0d7acb221e8835_Device=CPU_Config=() +284:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=69c68c20edefc8789e62a7cc8a0f8fe7e649f884649ac30833fb5a2ce43c4098_Device=CPU_Config=() +284:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=35525421f74fa15c49098ff1c7faed4fe65763d72ed13add33c6fe8d4dcfb0ed_Device=CPU_Config=() +284:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=97f8a2367c5590d5fe7e405d32ec48e5318a6cb3c0862f2b0e8705a7842e8105_Device=CPU_Config=() +284:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9403397dde8b4f6a240bdc928d0f080dfb42f6442f281d6b3fe8b6e348ccacfd_Device=CPU_Config=() +284:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=static_IR=2233a83397f11ea3c674c4845409c4f27f8bffbb8d0295712a2525c9e93d6041_Device=CPU_Config=() +284:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=24e44f75d91fe4e7e28db6c93870a47d536abeb87240841ff5b7e74b40189e42_Device=CPU_Config=() +284:conformance_ROIAlign/ReadIRTest.QueryModel/Op=ROIAlign.9_Type=f32_Shape=dynamic_IR=7260d5fcecb95f9632da5784702239161bdcab6bee60e0c1296a46e5120d5ca0_Device=CPU_Config=() +284:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=a72b942dc1915ccee8af871c00b16647db7c8935100b012f91ebd799bbe8d416_Device=CPU_Config=() +284:conformance_Mish/ReadIRTest.QueryModel/Op=Mish.4_Type=f32_Shape=static_IR=64374638dfe8bed8e9432c51d92d23b807172fc490c0dfc76428f2c49be92400_Device=CPU_Config=() +284:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=dynamic_IR=313740a93070bb3cb89143685b7521ea0ace30c3f6d510a4d83ed809808caeac_Device=CPU_Config=() +284:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=bc1a7618e707ddd2c4773d1a2234e6dfb39954ad872abdf38a18d653ec35b26f_Device=CPU_Config=() +284:conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=25f55a7cb5f72689bff67eb95af15c64b31c2d29bcde97611e74917fa6724ff3_Device=CPU_Config=() +284:conformance_IDFT/ReadIRTest.ImportExport/Op=IDFT.7_Type=f32_Shape=static_IR=cf47311b142dabf10271ebf5c2e359455d9bcea82d95ad2a1a2d58915c77bb16_Device=CPU_Config=() +284:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=66a4d902b67742a95e2d41d79b9d2434e57a55c168a88049624a0ccb62df9ca2_Device=CPU_Config=() +284:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e1f0357795d5676c5e4a38b6639cc90c924880ab961eb73e407b5ad0142ac0b4_Device=CPU_Config=() +284:conformance_BatchToSpace/ReadIRTest.ImportExport/Op=BatchToSpace.2_Type=f32_Shape=static_IR=f118f5911730937f9dab91ad5eb6f78cb1af6de7bae1dc745dab2d4f02257fff_Device=CPU_Config=() +284:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i32_Shape=static_IR=cd4d566c041357cdd7f8539933888956fff5cfd15e3c42872df59d9890c169b3_Device=CPU_Config=() +284:conformance/OpImplCheckTest.checkPluginImplementation/Function=Reverse_opset1_Device=CPU_Config=() +284:conformance/OpImplCheckTest.checkPluginImplementation/Function=CTCGreedyDecoderSeqLen_opset6_Device=CPU_Config=() +283:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=d7a96943c0264427eb83ab413f6e7b0f15f09f83525de581fba582655d0fa4af_Device=CPU_Config=() +283:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i64_Shape=static_IR=bb6a76dcb7d086a6f8dc96d3e0b17573b6dc2775ff9d0f19060947deda586bde_Device=CPU_Config=() +283:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=41c94561e79611e27aaf339205962d4967188b385d68c169b2bf4557173005d7_Device=CPU_Config=() +283:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=static_IR=44dceb7343477ff50d3de4be1567a57a97d2e3c6f92b48fc93d20eea80487862_Device=CPU_Config=() +283:conformance_Relu/ReadIRTest.QueryModel/Op=Relu.1_Type=f32_Shape=static_IR=707190f77491e910ce61476cd3a9d5dc275e7c5ad820cd3894a37c348c2995ff_Device=CPU_Config=() +283:conformance_MVN/ReadIRTest.QueryModel/Op=MVN.6_Type=f32_Shape=static_IR=25f55a7cb5f72689bff67eb95af15c64b31c2d29bcde97611e74917fa6724ff3_Device=CPU_Config=() +283:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=2737751bcc195e4aaa63ab6d86d803741817287d78fc864e18a31c328078940d_Device=CPU_Config=() +283:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=09dd33f661a07095dc47e3e5205c9fc6dceda72526e79be0751c34823c7e7cf1_Device=CPU_Config=() +283:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=1da672197f2c962a6cdfb059e9d09c10a03c3b082838f53d2faf6a761fee0637_Device=CPU_Config=() +283:conformance/OpImplCheckTest.checkPluginImplementation/Function=Erf_opset1_Device=CPU_Config=() +283:conformance/OpImplCheckTest.checkPluginImplementation/Function=CTCGreedyDecoder_opset1_Device=CPU_Config=() +282:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=7dcfe3f43645f6b9f3290b524024a1a3d48efa3ce346eacc2330be7e27a046fd_Device=CPU_Config=() +282:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=9aba412b059ee77c603bebe3e49240d6f2183168002d25bb7bfe62f1224be2fd_Device=CPU_Config=() +282:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=u8_Shape=dynamic_IR=81bbb9658ad214babb825fa4b576aa83a9ceaae7dc0b878a84e42ea194f3ec13_Device=CPU_Config=() +282:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=6cf01dbf95872b3fc0c914e73415ed8e4dd52cb355031002a65e3e974559d6d6_Device=CPU_Config=() +282:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=af272d91ad67b0c830585f82cd83729fd832744707be8a2be800f76f3faadf6f_Device=CPU_Config=() +282:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=075342290aa43542c81f7ed4e804c905f110edc23440452c6d0c0f0c312b65c1_Device=CPU_Config=() +282:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=7293f99e38d76387b64632d06503c539c369e1ab78d9388e1af42d7071d8230e_Device=CPU_Config=() +282:conformance_MVN/ReadIRTest.QueryModel/Op=MVN.6_Type=f32_Shape=dynamic_IR=85a35059512fed9e0c70cdcbd5e73c1e247ef97821d5193cbc4f7f7c3ebbaef8_Device=CPU_Config=() +282:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d8574c324ded923f1ea3ab0d8e09c626f3e8a04efe08258b665539c639b7958b_Device=CPU_Config=() +282:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=c808434d1d2cbd9ea66373f22c7e635c5bb2e3a6294f93421d1d9d34ac62515d_Device=CPU_Config=() +282:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=b005a58abf8192face35451602a847d378849223e4d433924581d28ef8141303_Device=CPU_Config=() +282:conformance/OpImplCheckTest.checkPluginImplementation/Function=ScatterElementsUpdate_opset12_Device=CPU_Config=() +281:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=db5c391cca1031cb4ec32def18ce3a4776c53f71e861c39b350fe5856da4fa43_Device=CPU_Config=() +281:conformance_Split/ReadIRTest.QueryModel/Op=Split.1_Type=f32_Shape=static_IR=c14da825d470c9141af0ea87eb82edd0866a415cb5ac59f1014c2ded35340201_Device=CPU_Config=() +281:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=8973f2f4c2be5d0ed57c94e1aed24bf809e51854c03c2abd73ea37ef7221d328_Device=CPU_Config=() +281:conformance_Sigmoid/ReadIRTest.QueryModel/Op=Sigmoid.1_Type=f32_Shape=static_IR=e939c4d2a27e1d7dba93827ab807881c32e47d48b726fec701712bc85c3404a8_Device=CPU_Config=() +281:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=2b927ee73187f1c1cbdb071ad3c0a72c9eb8a8631f2e7c6c3a8f8482c301fcf3_Device=CPU_Config=() +281:conformance_ReduceMin/ReadIRTest.QueryModel/Op=ReduceMin.1_Type=i32_Shape=static_IR=a2b9f0b4c044e23f536d137b6e157d1357df657d1af119cb8f71294d7dc098cd_Device=CPU_Config=() +281:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=static_IR=990cce0ce92df99ae74ad8840f7b89d1c48c0044deb9cb71619b44a565eed911_Device=CPU_Config=() +281:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=d84c7cd2094853de1602906a47c4265442c727a532d85199772fdfaaaf7007dc_Device=CPU_Config=() +281:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=a72b942dc1915ccee8af871c00b16647db7c8935100b012f91ebd799bbe8d416_Device=CPU_Config=() +281:conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_Shape=static_IR=c307ba8fc5f5d81037e40e46cb8ce1057d0bab7433138943596e5b21bb84221e_Device=CPU_Config=() +281:conformance_GRUSequence/ReadIRTest.ImportExport/Op=GRUSequence.5_Type=f32_Shape=static_IR=9f7a30c4f90df2edf8e70468ac22f325bc97e99613fa6ee2aced93e71ea5896a_Device=CPU_Config=() +281:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=9b64733aa0a8994cb3695a7c26f905f4d2b86c2e157edbd8a9970d33970a4015_Device=CPU_Config=() +281:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=boolean_Shape=static_IR=3c200607c5e2b90b5d75a439011d83643ba042c276c3033f58b3409c068faf8a_Device=CPU_Config=() +281:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=0df360e6d6391827cd65ceefd3201c263a829e50ed375522d4e8700f3879de09_Device=CPU_Config=() +281:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=e61665bc5590265246ab882bb55b9487e81412012ed98ac9cb16154bc8eddd17_Device=CPU_Config=() +281:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=769e7bb56fd0d0fa75fed14765279f68841e300b1450909cdcc802d347446b52_Device=CPU_Config=() +280:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=22dc864b06ef0c7deb8aecd74a26c7bcf75eee316288284413fb61381d79425f_Device=CPU_Config=() +280:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=42afa027ada245d36900a89c54a870ba5fc7fe3cc3bc0fc7dbda23af3e5111d8_Device=CPU_Config=() +280:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=ba15b8b85609531d91c7809eb90c3a0079d19d36b83c8767306cb276c9d67ace_Device=CPU_Config=() +280:conformance_ScatterUpdate/ReadIRTest.QueryModel/Op=ScatterUpdate.3_Type=i64_Shape=static_IR=b5f5ffd783aa251498c2011f19a63c1d68991e426384ef9728bc0b46587faa2f_Device=CPU_Config=() +280:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=0bbbd97c4428b9565666e9a1e56acc70035b378e16abafc54559a155583d9e6b_Device=CPU_Config=() +280:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=8c5831a53b504e86ce404e5a521921ef86bf4e130e79819c1abdb0e88a6543c5_Device=CPU_Config=() +280:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=7744b2351d112ed761ebe0f43945f7dfd58fd2bfbd94bc5a4737549923caf4ed_Device=CPU_Config=() +280:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=ed75de35729f20a3285506937672f78d2d5137851a3043d15f4eafc040768fc8_Device=CPU_Config=() +280:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=64d3761db7bdfd0de19878c66fa4465d084f7462c332fd978de458e328f97875_Device=CPU_Config=() +280:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=0f670e49f962b0a7abc6b4f1fbf9592db592a6a78eb3e083dd4027b9f9607430_Device=CPU_Config=() +280:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=dynamic_IR=5c05bbc013fc857a8f2b340df778f3ad5bdbc1b7273cf41b23d6da410205c612_Device=CPU_Config=() +280:conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=1c6cbe8477d09b0b193ddf9a453c1b6a8a79e3d1adcdf1c096709cee7a4866db_Device=CPU_Config=() +280:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=3d5c8f5c1545419050f982e8555a6ef9e5dcc06545b1a8573d710e8bc2375a6b_Device=CPU_Config=() +280:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=b8e37f2c8e2574b3f3554578b72e9df771c290c1bb47238fc4de9754c6e6f126_Device=CPU_Config=() +280:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=e8a26a33d6dbe0bb560820295fb6b8aafc3da0d2b78e29199d2f09e952722efe_Device=CPU_Config=() +280:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=c70693ee2f825a40f3e1fc8dd2ce9355690bc33ff27030f674d082a0cb343cc9_Device=CPU_Config=() +279:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=3421ca968a9f4061cea0492ac3920fe1a29fb35093314cbb56a78bbb136d8fc7_Device=CPU_Config=() +279:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=5d738cba54fcfd696b0cb7e808dd466b4510900ccba26c728b5eb272a55d6bab_Device=CPU_Config=() +279:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=dynamic_IR=3e669c3f90fc7b2209d3d588932f8eff3827309a5928f4b27722139964e2c46f_Device=CPU_Config=() +279:conformance_Split/ReadIRTest.QueryModel/Op=Split.1_Type=i32_Shape=static_IR=a142d6fb0ae0c0decec2ebeba376ed65852e1c60b1c1abee7bc574d5ef3a6a3e_Device=CPU_Config=() +279:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=469d09b261b88011c82288ea622dde06d63805eb41dc256c901b0d206ac5780b_Device=CPU_Config=() +279:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=4212a754718adff414309fb1da18c4361792b5478366bfdc0994490c7bc716e3_Device=CPU_Config=() +279:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=08cdbd5ea904a12dde32bce43e6c512aacd0ff990d5df3a90ff625226c936edd_Device=CPU_Config=() +279:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i64_Shape=static_IR=d46d4fc3e7b3b2cea07f7ba710f77f7d99b4799e7fb0d3127ea6862f3f731ae9_Device=CPU_Config=() +279:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=899cf50d8feefa9c5e02f6fe88b79e66b59c4a53478755d51b3e82570683613b_Device=CPU_Config=() +279:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=46a3135a1078cd8732e84754fa66872648997791d16caa379a179e1a90960608_Device=CPU_Config=() +279:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=c0c33bc628fffda062b4f013c7d41d0f9080f14f41e084ac547099384a9b3d20_Device=CPU_Config=() +278:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=ab760f0d90b0fef133a0555cb2a5d40fb525aef88e6568c5387a87d7e82f67f8_Device=CPU_Config=() +278:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=45d612bd5bc0895879f727cffcc13c978977a0aa10dfc726d00d6450faeff068_Device=CPU_Config=() +278:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=017d4b1dac18731e05634414942698ecbc750e306eb86e773ffe5007bfa9feee_Device=CPU_Config=() +278:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6bbd8d7f90e7c210514c28d527eb33bf0889b1fafbd5cf7d9660532f5d6bd940_Device=CPU_Config=() +278:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=f5d63cfc40e19fff35078633a3354fe5e3a8b6dbadbc89e20747398d87e02176_Device=CPU_Config=() +278:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6c1aeced5aaaecd99f3917a0f38e01902dbe81614ae4dc9a99fc09a379990abc_Device=CPU_Config=() +278:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=c3ef1d1e09e7c0917298070d6909b455d5962c4bf3adf8d2d4c04f0741141f1f_Device=CPU_Config=() +278:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=5aaa81d6f07ed880b1e93a0fce7b6aab4c3c88bfb1b4b6cda4ead15eb145af63_Device=CPU_Config=() +278:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=f097978a7f18dafc7577a9dcf2306d82d397faf1bedb106ca3de70b3d9ada557_Device=CPU_Config=() +278:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=704412b992d55bf9ff00d823458e5d3b3a369e47b3eca3429fed94b87c8da554_Device=CPU_Config=() +278:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=i64_Shape=dynamic_IR=08776190d0fddfcb15ad75cdbf6892de03f79e89d57e02b7c3e80b4a7a125d35_Device=CPU_Config=() +278:conformance_GroupConvolutionBackpropData/ReadIRTest.QueryModel/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=3e893f54d0ed092823ca8e256e66c367f53e466f30573a7b5911a432d88299a2_Device=CPU_Config=() +278:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=4a80814933ec1c6198745b1caa4d5b7c9171395b6d8a53cd791dcdf64fa6c91b_Device=CPU_Config=() +278:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=be59de0f93d8a22736d98d0aab618839905eb9a04f79c8d88d7ef08c7267f4ec_Device=CPU_Config=() +278:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f49b212b59261888a5ea4652f9a4cdfe25657c7a0e4d3b6ecc16255e8d2e8cd5_Device=CPU_Config=() +278:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=3e4364d93433ea741efe178b0c83cfb13c46259888aec468f59f77cd3f1bb39f_Device=CPU_Config=() +278:conformance/OpImplCheckTest.checkPluginImplementation/Function=Range_opset1_Device=CPU_Config=() +277:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6d705ecceb3a026a9be0b5963705b0c3c6be0123fb7d25885d3ae21213f1716b_Device=CPU_Config=() +277:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=259cf71b937e6d184948130afa5684d7539769988cee7a74b06138ad4d09c689_Device=CPU_Config=() +277:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=aa2c77112641e46dd617356a9cae765813b93353cd8a0f0508b915e0b03eede4_Device=CPU_Config=() +277:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=1837f66989053233e19b617ab462b5c608981c0be175b57a2366fd41ca1a9fdb_Device=CPU_Config=() +277:conformance_Relu/ReadIRTest.Inference/Op=Relu.1_Type=f32_Shape=static_IR=707190f77491e910ce61476cd3a9d5dc275e7c5ad820cd3894a37c348c2995ff_Device=CPU_Config=() +277:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=6b0185f2e61c010924a76c5f136ed90d0e154f507028c500ee78bdc5a7ed65ac_Device=CPU_Config=() +277:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=i64_Shape=static_IR=75c36f65570966e7f975e5c839036e0e13fe30e6d24ce4be8e6a0e8449173951_Device=CPU_Config=() +277:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=1f7f2d40b938416773b13282d8ac09d81a50e4d5d7548f42fc5fd575f84e1385_Device=CPU_Config=() +277:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=95884fb8d74cae609a67146ef94a84eadda8f3bd6369a9cb465bc413264a1d0a_Device=CPU_Config=() +277:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=03ebf297344daffba82d04292a767fcd7c959f56788ede32ff0d7c5af06ea504_Device=CPU_Config=() +277:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_Shape=static_IR=0add7fb1bf1500ea125aa6d245bad577d6dea18d038c020d18c2dcd56704c671_Device=CPU_Config=() +277:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=256f748d7b98b0eb70cc659403910bac929d62a2b153e63438f8746f602a83fa_Device=CPU_Config=() +277:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=c0c1a43608279d8870258be63005b38e23fe5501876c87840cc16a0bb2cf8dfe_Device=CPU_Config=() +276:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=ccef47523d9134720116dbd4a37d5038c9d15e2c393ccf1a6d24c3790529c282_Device=CPU_Config=() +276:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=aa2c77112641e46dd617356a9cae765813b93353cd8a0f0508b915e0b03eede4_Device=CPU_Config=() +276:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=01d609bdfca9f2a499a564f66ab9dd71b394310593d27b8739283b19980e2dc2_Device=CPU_Config=() +276:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=2f23f1158754aa494abbf61ab15118173a7ccfe90523b2b9ab7cc3a6fdaa0e37_Device=CPU_Config=() +276:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i32_Shape=static_IR=ddacee38f2bf3dd45ddd36ba236440ae28b9737487e0fb186c2b9777c0b557e9_Device=CPU_Config=() +276:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i32_Shape=static_IR=683b86794b415f893e4d426a8c68aa38f46c250e4c31bc5f5807a86c20ffb34b_Device=CPU_Config=() +276:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=a05339454f3f2a599ee9b041f1f01a124bad7d7e5fc1e6d133e00e43d002a086_Device=CPU_Config=() +276:conformance_NonZero/ReadIRTest.QueryModel/Op=NonZero.3_Type=i64_Shape=dynamic_IR=31f428e60ddfdb3cb3c98c2cc858d0409fd35c5e6e97f9dcdfbb20a876c475a6_Device=CPU_Config=() +276:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e0293184207036f6016f557f8df813c6536b18332f589245c5c606a3b36df1e4_Device=CPU_Config=() +276:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=2737751bcc195e4aaa63ab6d86d803741817287d78fc864e18a31c328078940d_Device=CPU_Config=() +276:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b91f26a0b7b56224c507de772631016119cd0bc3fd49527013f571e2db477402_Device=CPU_Config=() +276:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=50a0e83d438a3220ed14dd8ae783e92c96381f645b10719669054ea944297244_Device=CPU_Config=() +276:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=00709ceadeb9692263607310765b0957f34a8af1ebd17a13cc28d9587d360465_Device=CPU_Config=() +276:conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=785551399ba4bb8eb76271bf698b3ca795b8388338f110843d5c78c03009625d_Device=CPU_Config=() +276:conformance/OpImplCheckTest.checkPluginImplementation/Function=Mish_opset4_Device=CPU_Config=() +275:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=93dee798d72e36c04cf60499e95f84cd6b63d84226d7dd1dc0edcf0875cf301f_Device=CPU_Config=() +275:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=3147f462ceda9b383de633ac08d6014a7779e74b169d3745990fa2b2799b1dbd_Device=CPU_Config=() +275:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f7cf7cbc88dec99af8d35e65e926745ad318706c454b90740a19589285733fe9_Device=CPU_Config=() +275:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a29be1e2e5f78c12657221f33e5309470a7a4dbb9061a8100d7c454215198f7c_Device=CPU_Config=() +275:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=5f43b4d027388fff204c9c64df9f62bd2a72034143bd655e45121ca886c5d15a_Device=CPU_Config=() +275:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=bff490cc95cf384b15409e96ee7d0995aa91640e23409cda381b85b2fef69e01_Device=CPU_Config=() +275:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=5c5e10f28ed3a8d4ee0d3c8af982df5f383a4a1a713baba556dd17ee52e9ef32_Device=CPU_Config=() +275:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=1bde2f2a7294810531e23de80f25a451b3033487b5919c949b708b273dc3973c_Device=CPU_Config=() +275:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=56fb4fb30ec6fd9ddd0ff2e394434eb87546ac7de273f47b663252efa2a380be_Device=CPU_Config=() +275:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=aed658319c31cdb1d3a47a2a93c7a4f524d9af8540e2019af10e8e1cebc3c2bc_Device=CPU_Config=() +275:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=c4e2668f98d5c21fc085695c9b6037f08a1e6710e1854fa73b7465a618e99b95_Device=CPU_Config=() +275:conformance_HSigmoid/ReadIRTest.QueryModel/Op=HSigmoid.5_Type=f32_Shape=static_IR=4a55e1cc1410675b7789f083f2cd3f6ff851f49c8a0818f5bf0dd27280b197f9_Device=CPU_Config=() +275:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=b12c40f6d576482396a94e28e0814488b87eb6844583bc87384ed385d45bd6e0_Device=CPU_Config=() +275:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=57ba21d45369359487dc3b6a8feb0aa2b6fb21ffa328dc8e8eed58ee2896fdad_Device=CPU_Config=() +275:conformance_Einsum/ReadIRTest.QueryModel/Op=Einsum.7_Type=f32_Shape=static_IR=282e24ea7ef9130becb8db8f0251c907b02a534119d08162e07091212d67f290_Device=CPU_Config=() +275:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=a9fdcbd778622e442a42d8d2a1a12a1be0cf7e9d79c4d7ad56d5802c7a84d337_Device=CPU_Config=() +275:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=362638bf065f1917d2b4dac3008a8f46f8f8d64a80d2442c1ad98f4fb943cff9_Device=CPU_Config=() +275:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=941fa6fdaa34b8082171350da966423497232e44077f333cf3a46488bf237aeb_Device=CPU_Config=() +275:conformance/OpImplCheckTest.checkPluginImplementation/Function=Sign_opset1_Device=CPU_Config=() +274:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=6289210c93bab9199850c9aef5ac3144ad0a900007dbca3e889a9f875318e9b5_Device=CPU_Config=() +274:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i64_Shape=dynamic_IR=bc8918b82285bb58c2cf1b4b60b023262426de4044e0c2d50ae07f4b22ae0eb0_Device=CPU_Config=() +274:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=f2df871f255156043f03f34333d59d9213fd52ea24f69dda1b04888ed269acad_Device=CPU_Config=() +274:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=727e029d6373e823f7500e6bdfd1c07ba87fdb3ba428fd0a089885d7a1e91552_Device=CPU_Config=() +274:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=6fefc3626ba6ef60433d3635bd5abeb3e7025277a86e2fd9d92234ff099c303e_Device=CPU_Config=() +274:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=67a5010efb429e6dedf35481443b40a77cb01c1b4fb51ec5890fcfcb010fd6f7_Device=CPU_Config=() +274:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=fbb85f74ecfa0ffc50b9e6ce637911b406f1fd6ad054a886b9c6ddc6bc898739_Device=CPU_Config=() +274:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=86cd07b4db06e4210732553cace1797b55c19f590e2d9b7814eb30485d8599ef_Device=CPU_Config=() +274:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=i32_Shape=static_IR=22a8f509c3f76bc2dd6bc9a26ec4ab92a5b9ae4678532c886c1438669d627323_Device=CPU_Config=() +274:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=a29bdaa31edbcf7b3dc392625c0aa0a27e827e1363d52519858c93defbf9ebac_Device=CPU_Config=() +274:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=5fd7b424cb32653589798a45526ac4b3f3aafd29a58e5ed1cef16a958fd4a859_Device=CPU_Config=() +274:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=bf7feb979b2eab03afc780965804a3f6b8471b574c36125654fcaf3ebc2c30f5_Device=CPU_Config=() +274:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a46f51b7498c921515a53b67480ec4d413ed43ff809e1fa6a4deb7365f4a0460_Device=CPU_Config=() +274:conformance_Elu/ReadIRTest.Inference/Op=Elu.1_Type=f32_Shape=static_IR=1cb500b61fe11278cc50fca509be3e7b654190294dd581c7862ea3f108e0c192_Device=CPU_Config=() +274:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=dynamic_IR=346617ba1990b67ca1fec8ec219645b16aafa6c94a4a0f752c2f3633b85df679_Device=CPU_Config=() +274:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=0df360e6d6391827cd65ceefd3201c263a829e50ed375522d4e8700f3879de09_Device=CPU_Config=() +274:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReadValue_opset6_Device=CPU_Config=() +274:conformance/OpImplCheckTest.checkPluginImplementation/Function=LRN_opset1_Device=CPU_Config=() +273:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=104a69286d09ab8a5a88403ce6b421979659231fe5c5f973393216607a995dcf_Device=CPU_Config=() +273:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=7d73fec5a605ca6fc06cb014fb723236fd2ddfa1820648acb7fdae8530866f45_Device=CPU_Config=() +273:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=u8_Shape=dynamic_IR=b12ccd794c23494b994608015d049eec0f2ca30dc319bd35c1adddb3e4b8e631_Device=CPU_Config=() +273:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=d4e6cfc9844e29087dc5bb222a1822c26ec71f2e751575790add7c9b98a5a23f_Device=CPU_Config=() +273:conformance_ReduceMin/ReadIRTest.Inference/Op=ReduceMin.1_Type=i32_Shape=static_IR=a2b9f0b4c044e23f536d137b6e157d1357df657d1af119cb8f71294d7dc098cd_Device=CPU_Config=() +273:conformance_Equal/ReadIRTest.Inference/Op=Equal.1_Type=boolean_Shape=static_IR=857447d7e14c7516667094409cf5ef351000344fe170570671be0f71834d04f9_Device=CPU_Config=() +273:conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_Shape=dynamic_IR=0723b6d683bc65225624112929bd8f7a0adde9e9c2265a2ec1a54b10c4433735_Device=CPU_Config=() +273:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=d34bccebe88a4093c9810d56088e4bf07b55bdab1801d7d830360aea1be22499_Device=CPU_Config=() +273:conformance_BatchNormInference/ReadIRTest.QueryModel/Op=BatchNormInference.5_Type=f32_Shape=static_IR=8f1629e9b003409304f12c3e315e8ae8246b3bc80208c3f612d5c5c179082a7b_Device=CPU_Config=() +273:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=a3e2f08143425d4c6ed46ee301de31c5942694f79af0d297e4d1801e9a6a0ff8_Device=CPU_Config=() +273:conformance/OpImplCheckTest.checkPluginImplementation/Function=Result_opset1_Device=CPU_Config=() +273:conformance/OpImplCheckTest.checkPluginImplementation/Function=Elu_opset1_Device=CPU_Config=() +273:conformance/OpImplCheckTest.checkPluginImplementation/Function=DeformableConvolution_opset1_Device=CPU_Config=() +272:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=bbe05f014b2e4602f4e44d9c07795321404d2459bf782d2dd406de14bd2bd523_Device=CPU_Config=() +272:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=bee11d430236dcbd0fb5efbae712d8d89d84beeb89e0ee60e0ba3ba9512079f8_Device=CPU_Config=() +272:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e4b374c3afdeb45605c3ac745c03fc9eb938cf3f3828c119917ca92a6e9135f0_Device=CPU_Config=() +272:conformance_Sqrt/ReadIRTest.Inference/Op=Sqrt.1_Type=f32_Shape=static_IR=8952b1ce6fc7bfd900e669e12b520b624c02026b458bae41afe28e1f76058315_Device=CPU_Config=() +272:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=b94d2ed6a2b113922805a69578ec5ba2ba3d8f0ea46ca37f095b4ccc94d76b77_Device=CPU_Config=() +272:conformance_ROIPooling/ReadIRTest.QueryModel/Op=ROIPooling.2_Type=f32_Shape=static_IR=556c6863ca3b12d255c4c81d92b4573203f02c5588e064fb22dd4aa23c8283c6_Device=CPU_Config=() +272:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=dynamic_IR=fbdf008803736374dd213f1d7e0a041fc0e9b3f025c212a588fa05842ee5ee56_Device=CPU_Config=() +272:conformance_NormalizeL2/ReadIRTest.Inference/Op=NormalizeL2.1_Type=f32_Shape=static_IR=e177da00e93cb595c18d142e92898135415f0de01a3b1ea763f3ffef3d7ce96b_Device=CPU_Config=() +272:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f69e74dc680137ec5ef0b63e38d451da7bf1b61d2acabab77df46b76c9777402_Device=CPU_Config=() +272:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f13dcb47235a9516298088a0c45ff56fdb7f95144da257a3dfa1c618c7373ce9_Device=CPU_Config=() +272:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=0da39d97a2f46fcbdf524727d0283243d3bf0c3fab75f76f529b6480c84f67c1_Device=CPU_Config=() +272:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=03e7b025285b1369ca39bcf887783a843fe06ea29f7f394efc8201d1b7ad3a09_Device=CPU_Config=() +272:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i32_Shape=static_IR=d5cd3fb647dd4a57feb28366d922a151a3ffb1707864f2ac85595fcc30f222be_Device=CPU_Config=() +272:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=e2d1f4fde3dc1889d4f86004173ea34a9d9836f645730727f5cdf90bc0738361_Device=CPU_Config=() +271:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=af1f864a9f4bc94bdb713b0fed3f4c39dbd290cf7464f3cee8f1aded11981d4d_Device=CPU_Config=() +271:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=f0ae8e6b136d1db7e5e7748c03eeaed6907460d3d3941fcb1a6651cff61be113_Device=CPU_Config=() +271:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i32_Shape=static_IR=98932a2171e1c93b2bec3991892faaac027e1c319e91b9008ef0d0f469bcb0e7_Device=CPU_Config=() +271:conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_Shape=static_IR=9c57b92a55a929edae54a9705d80d730f7682ef015aa6923bd4658e244e9ca89_Device=CPU_Config=() +271:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=e9539332df9388555564db1da36679acc7b505b8c1fa687731f2052999bfe1fd_Device=CPU_Config=() +271:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=f32_Shape=static_IR=9031b1919c35a9df591ff64fbe4748c02cc837649899099542716f35b5c68cc5_Device=CPU_Config=() +271:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=8b759b2f1999be207aeb39763bde3eba4aee028e9369a86a87493ff86f3fa014_Device=CPU_Config=() +271:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i64_Shape=static_IR=2233a83397f11ea3c674c4845409c4f27f8bffbb8d0295712a2525c9e93d6041_Device=CPU_Config=() +271:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=dynamic_IR=b94b5361ee75b3684455c2b871b656a50c72e325564787c302a714f222845b26_Device=CPU_Config=() +271:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=static_IR=d04bc06efa76ef2937aa1539893ec9c79ac61c765cb50cd4a26dbf5586bfc904_Device=CPU_Config=() +271:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=7cfae687d408da17a0405d88f47e2b6623a608861114dc76018b8a2142453139_Device=CPU_Config=() +271:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=933c6450f6856b32e879034662cf60eca53970c10106f8a11eb925e5621042e9_Device=CPU_Config=() +271:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=static_IR=8834a8881c2da907f6ae38d4c45100dde754e653f3e4994cf9add141c217c781_Device=CPU_Config=() +271:conformance_Minimum/ReadIRTest.QueryModel/Op=Minimum.1_Type=f32_Shape=static_IR=c307ba8fc5f5d81037e40e46cb8ce1057d0bab7433138943596e5b21bb84221e_Device=CPU_Config=() +271:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=28dbc474828462a812108c43a47aa4e70fa0d2e8e814bef5916092f3e8c7a2fd_Device=CPU_Config=() +271:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=a50bcc7d92264c02627cb62bd0cac349b895311cef54b60a957a6366619e82f3_Device=CPU_Config=() +271:conformance_LSTMSequence/ReadIRTest.QueryModel/Op=LSTMSequence.5_Type=f32_Shape=static_IR=b8e32896d2ab304fb4fdca3924e0110852da92be25307f30709cd7d897c2f038_Device=CPU_Config=() +271:conformance_GroupConvolutionBackpropData/ReadIRTest.ImportExport/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=29c89ebfa45163b40be304d7bfc96f3068cd96175db94e6ebda942d3c4af538f_Device=CPU_Config=() +271:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=3d24c272ca88d4ee24f437a310abc05340e110f8596beb6a1ef96dd18818ebbe_Device=CPU_Config=() +271:conformance_GRUSequence/ReadIRTest.ImportExport/Op=GRUSequence.5_Type=f32_Shape=static_IR=556de70b55386fc9a264a24a9000d075a07636de6461cc5f4cd41af639b0597e_Device=CPU_Config=() +271:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=4520f02da2bc674bf781c84ea3cca92375a1eeaa77f4f4f7e4cfc3ef75fb2964_Device=CPU_Config=() +271:conformance/OpImplCheckTest.checkPluginImplementation/Function=Parameter_opset1_Device=CPU_Config=() +271:conformance/OpImplCheckTest.checkPluginImplementation/Function=LogSoftmax_opset5_Device=CPU_Config=() +271:conformance/OpImplCheckTest.checkPluginImplementation/Function=DeformableConvolution_opset8_Device=CPU_Config=() +270:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=80bc3dff7b0901463ccc52bd8e4a8e7522b1e9768421de45e63bdf8db601b9d6_Device=CPU_Config=() +270:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7b904365e0652437dcb59aef3b84da17f4205a821586224e41db1409d96e910b_Device=CPU_Config=() +270:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f8408a1d4e8c11ebbda01e0431217a5ff4ac6a869cc4cd3208cc9adc59d227fa_Device=CPU_Config=() +270:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a9c40d7a1ada834400ffbdff779b9970c83bd576891dfa7f637182cadf9e9681_Device=CPU_Config=() +270:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=243d5b44a22822e90c2f6b7c2817f8110bd6a060331e686c1fde1869f3392db1_Device=CPU_Config=() +270:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=15197edec77da431c491f42f64e86a811d89a337bf44615824226425b1c64d28_Device=CPU_Config=() +270:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=ca72f78cc4db6d46ce969f61c5bf707507ed8204785159e1ac5130e7aa251858_Device=CPU_Config=() +270:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i64_Shape=static_IR=83b83dd13b1733a50ec728ca6e7f09eb75641a573178816d1d33f30390464d87_Device=CPU_Config=() +270:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=0d413b2d40036984ce2b85933c4b5ffda416e8779a20b602095d2654db296d58_Device=CPU_Config=() +270:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=6d5907929d59d1f99e85183238e29d6602c84721d099284dcb8900ae5fc3c45f_Device=CPU_Config=() +270:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=9bb8728e6e9f68cf68a9e39d1aa4c618c4aca4187d4262f735c0647d680c0506_Device=CPU_Config=() +270:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=static_IR=f9377788ac0fd1ad0a7f51d16543722cb5acb69640745df17d9f41f5d1d0b544_Device=CPU_Config=() +270:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=927c151867c504f57aea681772afe32ec9c67cdaa4a0dcbc9055a8725c0296dd_Device=CPU_Config=() +270:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=i32_Shape=static_IR=201b881bba09ed67334d9489a1a8971e483120bd1cc75a1aa1c9f015f760e002_Device=CPU_Config=() +270:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=static_IR=41bcf70f8013164bdfeb7e348c05e6d43d9a1afc49087c49745679bc3aaf1e10_Device=CPU_Config=() +270:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=486675b6412030beffb4209c326672af07d343d5e1bbca31b9bfeed3cc339e3d_Device=CPU_Config=() +270:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i64_Shape=static_IR=3f0c39b97aeab67748bd4039950e926a9d9f33b6d3261c4d65d048500adb5b7f_Device=CPU_Config=() +270:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=4520f02da2bc674bf781c84ea3cca92375a1eeaa77f4f4f7e4cfc3ef75fb2964_Device=CPU_Config=() +270:conformance/OpImplCheckTest.checkPluginImplementation/Function=LSTMCell_opset1_Device=CPU_Config=() +269:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=57b104f3a1796c31d59d676d9f6d65789ed72fb21beb382bf418c452b8452d27_Device=CPU_Config=() +269:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i32_Shape=static_IR=5224ffd078708e8917b14b4118bc4a42317c123dc0a5dca8234ad73d44daf845_Device=CPU_Config=() +269:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=dda9a81656cbcb4ab5484fea52e7172baf67d46babce886726c96eaa1980766d_Device=CPU_Config=() +269:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=e1ea320702cf8065ce85c325507a199b95dc9ffce3fa715b4d8195ca67a5a374_Device=CPU_Config=() +269:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=b6984001a616b3dd3ef4b835b2dc6a48bcaf8882bfde7761b4e141733364f66a_Device=CPU_Config=() +269:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=c39d76c89bb03fe251dfffdd9b8eb85c0585904ed9c5bb4660c3dedfdc451efb_Device=CPU_Config=() +269:conformance_Einsum/ReadIRTest.Inference/Op=Einsum.7_Type=f32_Shape=static_IR=282e24ea7ef9130becb8db8f0251c907b02a534119d08162e07091212d67f290_Device=CPU_Config=() +269:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=5980eb1b6c7a44c7812f89f10f0741e5925abda9ad07e1a82ae2a3310abae74a_Device=CPU_Config=() +269:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=b5a1e5c47a0597ee9c9d0c0aca9909c596cbe71ebb069254460c2e97acfc1c0c_Device=CPU_Config=() +269:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=3e016da7faeea7044ea204d1c3a2f1729d3d7ef0be27f5b769484bc7aebea5ab_Device=CPU_Config=() +269:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i32_Shape=static_IR=a7f6c704686f1b0e6fd4ab522930aa3fb5b4cd4683b204aa31e5c73b427e7058_Device=CPU_Config=() +269:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=2c2cec03b3ec1da29ad4d5fbb3530ee7343a436e27be923ee1f9dd97d29731a3_Device=CPU_Config=() +269:conformance/OpImplCheckTest.checkPluginImplementation/Function=Gather_opset1_Device=CPU_Config=() +268:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f208ab78a0ef0497856952f499578a17818269d066f4281183ef92ac2f9ce449_Device=CPU_Config=() +268:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=41c1e13447cce632ccd478ec2bf36f09e510942449b0bffd3271f3b1f0b48d54_Device=CPU_Config=() +268:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=0f3e035b6571da159267ff1f89b5f2b2d3bbd599760dc5d5721a1fb2ab2ea75d_Device=CPU_Config=() +268:conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_Shape=static_IR=7798cef9c8734d0908103b3c42fd7fc791806ad61d35dc680dc43d9597c6f1fb_Device=CPU_Config=() +268:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=dcd71a51a6682c9bc461a6cb72d59082352ab8a020e1f79e64c3cc44a37b55ba_Device=CPU_Config=() +268:conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_Shape=static_IR=697bdfc59094203ea1616203d64759a40193f1a23a4a51f11340a7912e355cd1_Device=CPU_Config=() +268:conformance_ReverseSequence/ReadIRTest.Inference/Op=ReverseSequence.1_Type=f32_Shape=static_IR=1ff07d9b87513cab5bbcf5059507b8c998fdb25d2802b267bb6c0b90eb3e231d_Device=CPU_Config=() +268:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=static_IR=725aaeceedd7eba9be6ba4203e31cead733ed80dbafc33e902465d4338dc8f4c_Device=CPU_Config=() +268:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=99820651f05bae979a287a8644f1b739637d684efad288b48044c2a664e43a3f_Device=CPU_Config=() +268:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0e78ae14fcef33de9637ac99e87f672b3247ea32c221a4b555b2e5bbdff88788_Device=CPU_Config=() +268:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=i32_Shape=static_IR=eabe482de99e120ef1260cc91a746df95f8db04fa1cf6832dc45b3ee1b38f9c5_Device=CPU_Config=() +268:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=7147d6ead151efc24a53c196b63fc441e240c34b41ad2226a535580eb2a3f3d2_Device=CPU_Config=() +267:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e2734d3e803c031e2fd56d0c9f7a72818227bc7981d9f7d9d1148f1cf07135fa_Device=CPU_Config=() +267:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=28bb0064e4cb56c497227ec69899b08dc09cccbf7d390555416aff617a393f81_Device=CPU_Config=() +267:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=564cd54b2564c7e39fda0c5e580c274b7bf99603760f6c66f03b4450f23cc4bf_Device=CPU_Config=() +267:conformance_TopK/ReadIRTest.QueryModel/Op=TopK.11_Type=f32_Shape=static_IR=8c82cead166c3db4616f034b66c4795cb4bed653de41d2b6dc71b48ce76a296e_Device=CPU_Config=() +267:conformance_TopK/ReadIRTest.ImportExport/Op=TopK.3_Type=f32_Shape=dynamic_IR=fb3cc70d8993f96508516aa7a36cdcb9973edd563c78a7d6d5ac5ca9f816e3fd_Device=CPU_Config=() +267:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=36783f31e83ed0f978f00a1cdd87a25b4b881c251fe059e5d2829be3d0b45c5c_Device=CPU_Config=() +267:conformance_ShuffleChannels/ReadIRTest.QueryModel/Op=ShuffleChannels.1_Type=f32_Shape=static_IR=46e851dee1f7bead1a6e2459157df33266c45559375a1caff90a2732cacaf881_Device=CPU_Config=() +267:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=223a34e46344a0dff7f35a637c9bd08e2a76a552ca87e5bf0134c9fc6d6be41d_Device=CPU_Config=() +267:conformance_ReduceMax/ReadIRTest.ImportExport/Op=ReduceMax.1_Type=f32_Shape=static_IR=a3b350b1516cb0391e088317ea67433757a08847710c4a4bff143922873208df_Device=CPU_Config=() +267:conformance_Less/ReadIRTest.Inference/Op=Less.1_Type=boolean_Shape=static_IR=953b15e350d9a27c4d048cbae41a278c732f3b3a6e8debd7fd2e75e99a015966_Device=CPU_Config=() +267:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=57921f181e48af2b294b923633e457650e5ab2a9ac7f5d4d07930974ad5e03e1_Device=CPU_Config=() +267:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=360205b273a323d2cea16c9ac98847c904ed6cabb2412d3b49c27fd2eec52ab1_Device=CPU_Config=() +267:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=2538d525d8f11b9f4961c2a4a8cc36fd27d8b3d97271ef7db4f7eac9732b71f4_Device=CPU_Config=() +267:conformance_Gelu/ReadIRTest.ImportExport/Op=Gelu.7_Type=f32_Shape=static_IR=4ee688aa25b818f6e6986c7070e544d0eef9ce888124d85c0e5e126802213a46_Device=CPU_Config=() +267:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=c98e1e2347c7b6939804dfcfcebbbd57d4c05e8d13b35b2611912290d06107ff_Device=CPU_Config=() +267:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=0d782801290370c7c390ad549171ec3500ab344b8b34ce4b8fd8b05339fe5557_Device=CPU_Config=() +267:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=fe80b80ced0033aef6f7f97abd22de1271430f700d7dc9aad9a2a819f91e11a5_Device=CPU_Config=() +266:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ef702f626a20bec33a58f2596e4e6e15f105860ebfff1d6f42116a514d853c4a_Device=CPU_Config=() +266:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=22dc864b06ef0c7deb8aecd74a26c7bcf75eee316288284413fb61381d79425f_Device=CPU_Config=() +266:conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=i32_Shape=static_IR=75f0349e33d0151c276e3f5ce34f7c1a71f5572331157b2e34f889773d7d8754_Device=CPU_Config=() +266:conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=4d10da0860e049587221c12f55c3bca9fc587b74dd3fec194c8ba5854a736d93_Device=CPU_Config=() +266:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=64186bbc89d54f073554e029b8972fbbfba2abce8026a379b7ac3833f84ac9d4_Device=CPU_Config=() +266:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_Shape=static_IR=2055c46f29a25953e331656839e227b0472b10695ea23410b64428d14232345a_Device=CPU_Config=() +266:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=static_IR=8ea778d7d98fd08efe4b2efa501ef3599df00ca9bd036980ce86e0d6dc454b96_Device=CPU_Config=() +266:conformance_Minimum/ReadIRTest.QueryModel/Op=Minimum.1_Type=f32_Shape=static_IR=a5a2ba7fff85401feb05248462e85d334440769790e7e6ba1a75ffb413f7fc64_Device=CPU_Config=() +266:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=f32_Shape=static_IR=78239cbf0f8d473af2209ad3d9297e02208c110efa7af981f8c09ea7d7290032_Device=CPU_Config=() +266:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=dynamic_IR=eeeaf32688af20dbc39dd3705dc09fc804c0636d4d5807b003c002eaab1e79dd_Device=CPU_Config=() +266:conformance_Gelu/ReadIRTest.Inference/Op=Gelu.7_Type=f32_Shape=static_IR=4ee688aa25b818f6e6986c7070e544d0eef9ce888124d85c0e5e126802213a46_Device=CPU_Config=() +266:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i64_Shape=static_IR=57d49137431cc7fe4364cc2fef13111fb9f7a5a908b2d7b6f5663100ba5d636c_Device=CPU_Config=() +266:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i64_Shape=dynamic_IR=c3d754fe46cacaaf519f39fdc6feb9df6b23d92f6271f6e731c2a8ddc24a948e_Device=CPU_Config=() +266:conformance/OpImplCheckTest.checkPluginImplementation/Function=Acosh_opset4_Device=CPU_Config=() +265:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=05690f7225eecae70805d45641cd02c02c46bc61f9fa4cf91d3ec7ce94f6fd3f_Device=CPU_Config=() +265:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=deec30214c79ceb43a503bf521937a2bd554588775195d0e6302c521cd2b55ab_Device=CPU_Config=() +265:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b01e9e819c2e610a4fdedcb693f536f99b9dbdeccfcf9b0e70dc37c19c365421_Device=CPU_Config=() +265:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=d0bade0811581a6fae53c343866f1bdb63acfe07776fd60b7e791f8edd3f88b2_Device=CPU_Config=() +265:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=41d80c815a196293f7d22af59f5f602f7e4f11e06208a693b19743fb796b98a8_Device=CPU_Config=() +265:conformance_Tile/ReadIRTest.Inference/Op=Tile.1_Type=f32_Shape=static_IR=9f4d316675c933ea5d6511324e3d664440a8ba287cb2ffe768517f9cbfb613e7_Device=CPU_Config=() +265:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i32_Shape=static_IR=461bf15d226b7ee3cbdcbc8cf1806e98267c5f14f0aef49dfb9de094f56347b7_Device=CPU_Config=() +265:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=109afa0426a29179db58e16917b829096af105f0def2375a589ea1391138ee2f_Device=CPU_Config=() +265:conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_Shape=static_IR=206184d6fe0a3ab9fe71914c66d3804e145caed7cf3ac09cb1d50183144d6ac7_Device=CPU_Config=() +265:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=7244cd4799e0eab987f823edc7d6038b76afa7585e4663278be826124c5596ed_Device=CPU_Config=() +265:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=6b69e46c11a2a82ac7ad6697cd768d88da6e870e75f489779bbd1714bad23450_Device=CPU_Config=() +265:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=5e7e2adae49fae3a376e9a5a971513a9b23b5fe4008ce51814e0fa1fd91f1f22_Device=CPU_Config=() +265:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=941fa6fdaa34b8082171350da966423497232e44077f333cf3a46488bf237aeb_Device=CPU_Config=() +265:conformance/OpImplCheckTest.checkPluginImplementation/Function=SoftSign_opset9_Device=CPU_Config=() +264:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=42062545b0991e415aad8d29d47de2a278e5791996ea55974411694aa821b54c_Device=CPU_Config=() +264:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=14c8a8bb712c40d63edf76de9a75dd1dcd53a2df8c6098c80ee760119966f364_Device=CPU_Config=() +264:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=f89e84d6fb931cf0cd074acd01a50e50daa47ad88b1b74e4b3671d63bd7889f2_Device=CPU_Config=() +264:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i32_Shape=static_IR=db87efd37ce8dcbe14286197df3b7a345fdc46ccc03d7d8bda17e3791df332aa_Device=CPU_Config=() +264:conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=static_IR=f5807b455d9031305e8225f2b65fd5cc289f61785d762f19a275280085a2e5e8_Device=CPU_Config=() +264:conformance_Sin/ReadIRTest.ImportExport/Op=Sin.1_Type=f32_Shape=static_IR=54a909996c38d86ec830295e37f0fc0070260101390dbaae2cc6eaabea82a7b5_Device=CPU_Config=() +264:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=bff490cc95cf384b15409e96ee7d0995aa91640e23409cda381b85b2fef69e01_Device=CPU_Config=() +264:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=30897cde05f349bface3d90a8d730da4c4c3e5133c59495d59258224dcc29ae6_Device=CPU_Config=() +264:conformance_NonMaxSuppression/ReadIRTest.ImportExport/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=802164adc9e651b0a3ec0b5f96341fc3cbd098042412236b65e0c8f77b5153f2_Device=CPU_Config=() +264:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=139cc84777f1e0d489245d058877303e72a93eba3cffbf5f919de21b4514bb0d_Device=CPU_Config=() +264:conformance_GroupConvolutionBackpropData/ReadIRTest.QueryModel/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=74f34c8b7abfe0f7afe021ba5d4861e29f9f3915beba5cdb2af936f1f2409fb6_Device=CPU_Config=() +264:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d12581f68d14d140f4b982b47b97000f6b666cd115483247d369fed87267556e_Device=CPU_Config=() +264:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i64_Shape=dynamic_IR=8029d5dae7f4721807eb717310512bad44630efdd0a64962496a0fd802a12325_Device=CPU_Config=() +264:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=33297e2649e2f0c53b0bfb5e349d83ede580471764202480855e3f1efc8017a5_Device=CPU_Config=() +264:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=33199e062b7b59c8244477fd2682a08876e72126701842265efc0c9fb4a90c94_Device=CPU_Config=() +264:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=29eeefa6ea54ff2530e2e17153db324026e85d4e45432c053ca066699187bbc5_Device=CPU_Config=() +263:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=65493d18baa6530c757031b74c5fbd51757e2b04bb79149d3acbf6c40bac11c1_Device=CPU_Config=() +263:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=ade98724a678a73bf789fc539dfa277031242ea3a694227dae29c11b45cdfb9e_Device=CPU_Config=() +263:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=b706dc1dbca4cc6c308f2cadf799fec41a8b3f08251de3a58444f0d760994cbb_Device=CPU_Config=() +263:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=348254d0e2b145f9e5443b4d4470b2ab29487acbb34a71285a5c0e1bd29cb942_Device=CPU_Config=() +263:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2f96ff03126561aa475067ad88e454b2da78fc8f0b816dc6c01ec5c81568288d_Device=CPU_Config=() +263:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=cb7ad9dd22a7bccd73ade4d4aa78f9a25cc2bb7f0c08a01064491200089b3718_Device=CPU_Config=() +263:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=ad640e033c11cf7d81ab237630f0ba656968f620eb4ed77f38cd79c6cbac42f6_Device=CPU_Config=() +263:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=2b02493e0e09536d01441e885df61f27f2202a3e16742695bcc4d1d0d892c56d_Device=CPU_Config=() +263:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=c55846f7a08af86fb1c914c925433852fd4bc735f671c87e965a6db9b6971708_Device=CPU_Config=() +263:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=7df296e0e156bb36cb643a292802f9db374c77035c6a05ee4a865fbe2c6ef92b_Device=CPU_Config=() +263:conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=a7b7ec75e1b343acfa06ea53d7d5b631c06d44c68b1fc92555d7168c77aeadb3_Device=CPU_Config=() +263:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=789949951bc3afd20fdff943ca2a706f79eb4f95be60086ddf632b43c3e401e6_Device=CPU_Config=() +263:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=dynamic_IR=e306da3fedc4369302fb21159f2bbbe65849661eabe5bb83efdad3e83f64fd68_Device=CPU_Config=() +263:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=09dd33f661a07095dc47e3e5205c9fc6dceda72526e79be0751c34823c7e7cf1_Device=CPU_Config=() +262:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=e6aef819fecf54f7c98b15f330074d33ea0ca6c8fc3edd895b362f04449b12a7_Device=CPU_Config=() +262:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=d5f5f2d39bfe4ccc6f12f76e5eca8e2e40ac7ac6c5f38a7cac21970df213d4cc_Device=CPU_Config=() +262:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=7e386c35d9d397e043876a23a2b9e5885964cee59bf46f1ae0660e6a84641ea4_Device=CPU_Config=() +262:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=5d791fd5b82a74a42073567349728035c4ac52ea64c1a154a73bd4e61d1b42dd_Device=CPU_Config=() +262:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i64_Shape=dynamic_IR=debf36fea706c02dc67354edf761f0dc931ebcccbed285f186164fc4b9532766_Device=CPU_Config=() +262:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=static_IR=4892263cb1ea7a434b5771aa16f07885c39710f67fa1411dd9235653a6b8622c_Device=CPU_Config=() +262:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=dbabd4c2992053ca70e9d6a489b437cf8d1f13807220adb5054204e9bede00e1_Device=CPU_Config=() +262:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=ea63b1a277de19e725624c4d57d7decf2a01f9764510b0849e0b9dc49ad24fbe_Device=CPU_Config=() +262:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=3801fd5b86bf772977c131734d8356c8dfa41b9056091937473be600e332fbee_Device=CPU_Config=() +262:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=static_IR=62b8aaf25e8c93387362b0c657886c31c39a7330cf3455486b8943a1e375ef5c_Device=CPU_Config=() +262:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=5ae2e8ce34957ac812bd04943714d0b0ca6e2098c46caccfd775620d7f373cbf_Device=CPU_Config=() +262:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a4e797de860d6e4dcec00062050168ba9745d3da953b9c644de654f4d2818b77_Device=CPU_Config=() +262:conformance_FakeQuantize/ReadIRTest.Inference/Op=FakeQuantize.1_Type=f32_Shape=static_IR=935369702948a57e71d169e75218162f370b48f344fe819f11112c011b6626fc_Device=CPU_Config=() +262:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=b38f11a07d752c83a5e4fc709d5b78fe9a40ef3394f4b617a30df29c21640338_Device=CPU_Config=() +262:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i32_Shape=static_IR=a3f2389f6a8a495885efa87742d53e1e154f58f8fd6e83df89bddf5922247095_Device=CPU_Config=() +261:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e27f0bcb3118a7cdb488f4685707bec982ae54ff8bf7e97aff9ea6ecedd66714_Device=CPU_Config=() +261:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=fd97b6aab7b86b0dd2f8c0ce622601e80f3b864d23d7d4f61d2dfa42195936b1_Device=CPU_Config=() +261:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=3c0b9fab07568e0eebb5e5d068cfccdd617ee6e98e4253a0461ea8d3f0f582e8_Device=CPU_Config=() +261:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=8fc5ce53f1f6b924371ab2cf156ddbf7aea234b17befdcb6206ba51a7ad988c9_Device=CPU_Config=() +261:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=745c0804609863998b4bcc6956b1e78fc221e0e4f1535ab09b89a9c966a16995_Device=CPU_Config=() +261:conformance_SpaceToDepth/ReadIRTest.Inference/Op=SpaceToDepth.1_Type=f32_Shape=static_IR=9296c80cc93d8ab7448140ad2f31b3b47a0759c383d1bc045704985503732195_Device=CPU_Config=() +261:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=7fb9c2cdb4c82a4b65d110fc84c03948917cc1921c372cc645cab00a3377fad8_Device=CPU_Config=() +261:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=70d4da84623a0af3bc8362a828bac5ef13285498b420a3df6bf2e88bf05311db_Device=CPU_Config=() +261:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=8de81b7de05bdd151427e1b5b03a8b4222284dafd31f9d4b1c3d0917995e9310_Device=CPU_Config=() +261:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=96117baf3ff208c696a9796404eec467b613c37977067ff0cc62e39355856d30_Device=CPU_Config=() +261:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=e1a894c49294c6930cb8f8c857ec745fa2c6d18cc3607389c89af4d13df4e411_Device=CPU_Config=() +261:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=i32_Shape=dynamic_IR=6a0218ea2e7eb0329e4915f2f6a7c215742d2469e868a4a8e43c683c2dddc01d_Device=CPU_Config=() +261:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=c7998d9fa7e16dedd52f8cbe3d0814f2f3b30ee6d728881d64c4743e0ff6fae0_Device=CPU_Config=() +261:conformance/OpImplCheckTest.checkPluginImplementation/Function=BatchToSpace_opset2_Device=CPU_Config=() +260:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=557b6184296452c25e420a307a2021cfb0eedcb73e42bb4bc247c34c15b18447_Device=CPU_Config=() +260:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=30eb0edc699f72085fb77a6cc31ad4aa9e62cf97befb64273493d234494fc64c_Device=CPU_Config=() +260:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=a110c620d27325938e9febcd9d757a5525c421bc29450fea960403fbca3507f4_Device=CPU_Config=() +260:conformance_Tile/ReadIRTest.QueryModel/Op=Tile.1_Type=i32_Shape=static_IR=75f0349e33d0151c276e3f5ce34f7c1a71f5572331157b2e34f889773d7d8754_Device=CPU_Config=() +260:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=81313f6065af987d98f37a1709f149d804bc1a36bb0a5c4a11223b29c6ccc3d2_Device=CPU_Config=() +260:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=static_IR=7e88dcf638caa6058b01dd6c31ba40efb0fca8077cc295ca63c2ebe4c7298926_Device=CPU_Config=() +260:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=6b0b123bc93e799aed7bee84e55ed9def25af4f11d27958d8368983eee9c527b_Device=CPU_Config=() +260:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=69f4c39c3fb2dfc55714893e1e45761238e74bf28ecfadbee3f4965b5a379888_Device=CPU_Config=() +260:conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=static_IR=03c3e6567da3c139c19e0ce0d301a6076b2e2446d191216c7bf38bc030ea7855_Device=CPU_Config=() +260:conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=f32_Shape=static_IR=05e89f7690a9c7d235c753aa4af28229a44fab527f44ff4832ebcebf0c9debfe_Device=CPU_Config=() +260:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=5a1aa66136ca2be83f714067139e11fcbf672d73f8b28c57d29333b885a17f83_Device=CPU_Config=() +260:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=de46537615051a46fea66871c5fc6ef3417b577ce42bd1f7e239d821e1ed5c51_Device=CPU_Config=() +260:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i64_Shape=static_IR=f26c1f41ef689dde33e9d61b0a1066788b8397ba6a170f5eb1362726ba9c0868_Device=CPU_Config=() +260:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=eeed611756b048927c290a65dd92a5833ad66d347bbc772abddaa751f2016ff1_Device=CPU_Config=() +260:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=u64_Shape=dynamic_IR=5f87db7fc306440f807b413acb7eb175932f29f59d1b5eb4a9df8945b9aef9d4_Device=CPU_Config=() +260:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=269ec3789c76e21789e01e31f13f0f1a4895905b3f131e710e663ed2a0d8f632_Device=CPU_Config=() +260:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i64_Shape=static_IR=3f0c39b97aeab67748bd4039950e926a9d9f33b6d3261c4d65d048500adb5b7f_Device=CPU_Config=() +260:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=dabed23c3f30d92c6fcca7a6845160022837de8cbfa1077c222e6f1224b745e1_Device=CPU_Config=() +259:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=eb98c3593d72ffaa01de42caf4832854d9486b4148c57742c6dd72a251f8cb45_Device=CPU_Config=() +259:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_Shape=static_IR=bcb10a9124f9b0471012f9e22d4aed5c2a47a55e652312e8a8382dc0d809a23e_Device=CPU_Config=() +259:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=d7fbbe9f8f446b009ea2de8594e4cfaad46432734cba27596e3fa721f04c04ee_Device=CPU_Config=() +259:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=d6250086b712a16042ee74438bb61b89fbfaa5bae433049207402d1da4cffaef_Device=CPU_Config=() +259:conformance_NormalizeL2/ReadIRTest.QueryModel/Op=NormalizeL2.1_Type=f32_Shape=static_IR=e177da00e93cb595c18d142e92898135415f0de01a3b1ea763f3ffef3d7ce96b_Device=CPU_Config=() +259:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=ee49657e646466b0c22aff01740a48c1cc271a828a8c3e10a21d75b04f511cb1_Device=CPU_Config=() +259:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=i32_Shape=static_IR=201b881bba09ed67334d9489a1a8971e483120bd1cc75a1aa1c9f015f760e002_Device=CPU_Config=() +259:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=d575b00d2b6e155319fe7120133d8e0c3dcb5c79bda710b0650fa48543dc5c84_Device=CPU_Config=() +259:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=165dc8f683138c4d731ee850aa6212a70851b91630cc42e2b4e9d46e0ab15b57_Device=CPU_Config=() +259:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=e0422b2fb57587a85d9ce1532f7fc28a6bd01e72a325d42d9045419dda4bbba5_Device=CPU_Config=() +259:conformance_Erf/ReadIRTest.Inference/Op=Erf.1_Type=f32_Shape=static_IR=2e5aed1612da0f720adb051e22460983a3911c38cb09184d812ceb949870f450_Device=CPU_Config=() +259:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=1988b645a87be14c17740085aa8c4a38e88cd2111f0ba294f77ed0bf856b0561_Device=CPU_Config=() +259:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=3c1c8bc7ce009c03509ca9d6a86f3d5cff89be49439e7513edcde4e62fbfb8ce_Device=CPU_Config=() +259:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=64bd2f48b3326db083653b5993c9a75d21be515cbc5af67c62c981e9744e2f0b_Device=CPU_Config=() +258:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=59d132b45e2ac60a670eb43efafa43f065bb43d492208ac670fc8234b4f618c9_Device=CPU_Config=() +258:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=952ad9af4561d61157cc5e73bbc5608bf8cbea1473c52a566ad1ae7252bcb35f_Device=CPU_Config=() +258:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0dafd9117cb3fba3a335f7cd28aaa3fbd9276878383657b357210e135a93d916_Device=CPU_Config=() +258:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=d1b4dff28b71e41d8106d3730f2705e537487aafe0dd53ae7dfba9ec21724287_Device=CPU_Config=() +258:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=u8_Shape=dynamic_IR=4d2c49ebbc46b60233510b63e280442319496782da33185f7c2d6003611f937e_Device=CPU_Config=() +258:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=f32_Shape=static_IR=2055c46f29a25953e331656839e227b0472b10695ea23410b64428d14232345a_Device=CPU_Config=() +258:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=4e14d87b7667a7900d4427ec46c72eb3c7bfd2e3d86e5bdf92eb2485059b4951_Device=CPU_Config=() +258:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=static_IR=5fd7b424cb32653589798a45526ac4b3f3aafd29a58e5ed1cef16a958fd4a859_Device=CPU_Config=() +258:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=a6f5b58b1d85e5a99389748ae14e507302127e583c436dd9e6015d3c33ab0536_Device=CPU_Config=() +257:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c90ac17f02f16c647a0a206326f24ac348a0f8a7787037486e52ecc8c091818e_Device=CPU_Config=() +257:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=945bd465761a4d9b013b0a5e88a3a9e041d8bd8bfa8df8044f28d71ba26f224b_Device=CPU_Config=() +257:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=fd97b6aab7b86b0dd2f8c0ce622601e80f3b864d23d7d4f61d2dfa42195936b1_Device=CPU_Config=() +257:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=4479acfb061c41832bd1f2ff0de0141dde3a3c496ee4471523fac0a37451311d_Device=CPU_Config=() +257:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=69c87ccfa0080f65ed28b9a088343db5ceef524ae917b8e259b1865a017df22f_Device=CPU_Config=() +257:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=525ed9b2af76610bf0ee3d11cb1dcfd46059335968359c143d0da7465736ac2e_Device=CPU_Config=() +257:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=cedd3bc0f0a8e20fe947135bd6ab9515283275867e1b837d36f2fac72363f449_Device=CPU_Config=() +257:conformance_NotEqual/ReadIRTest.Inference/Op=NotEqual.1_Type=boolean_Shape=static_IR=8fe4bce2e674753d81a1516280769a06cdde538e658ae548087e4888ffa2905f_Device=CPU_Config=() +257:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=a1b6d340122e8e3a7a665c69fb11b3c7b460eae79ec81ed3c32e878d10d5c3eb_Device=CPU_Config=() +257:conformance_GRUSequence/ReadIRTest.ImportExport/Op=GRUSequence.5_Type=f32_Shape=static_IR=98a6da6e0972a1b70caa5df788a6921d4e470565dc3880faa59e913fdc15f460_Device=CPU_Config=() +257:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=static_IR=66cff13148d62809cced5a381c251525486476f7178eddd3c8e45eeed40afd06_Device=CPU_Config=() +257:conformance/OpImplCheckTest.checkPluginImplementation/Function=SoftPlus_opset4_Device=CPU_Config=() +256:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=4479acfb061c41832bd1f2ff0de0141dde3a3c496ee4471523fac0a37451311d_Device=CPU_Config=() +256:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=557b6184296452c25e420a307a2021cfb0eedcb73e42bb4bc247c34c15b18447_Device=CPU_Config=() +256:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=37ed85c113d481da6d55c0a820d49090a8b256694e0f1b111feded60fe708279_Device=CPU_Config=() +256:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i32_Shape=static_IR=9aba412b059ee77c603bebe3e49240d6f2183168002d25bb7bfe62f1224be2fd_Device=CPU_Config=() +256:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=dbabd4c2992053ca70e9d6a489b437cf8d1f13807220adb5054204e9bede00e1_Device=CPU_Config=() +256:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=27e8804992c0d74c18c958f0876c06be6c7eda2b36fe7de3ab616b577dce13c6_Device=CPU_Config=() +256:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=i64_Shape=static_IR=056c07f9ad8e27e01b269b5136ee29b4cb4d1229a009cda07e4fd32c45d4e97f_Device=CPU_Config=() +256:conformance_Loop/ReadIRTest.QueryModel/Op=Loop.5_Type=i32_Shape=static_IR=5b9cbac8797158a77d5616e8b7e5d8132360e23e26d31d845f0d129df7bfd7b5_Device=CPU_Config=() +256:conformance_Log/ReadIRTest.QueryModel/Op=Log.1_Type=f32_Shape=static_IR=038bd1e152575a3b8ca28bfe18fdcc9cbf19c9489e7bb831b9d5f56f7499cb7c_Device=CPU_Config=() +256:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3d73edb68da4aee1c052b79ffce030b368f204c04bffd9a9dc01a9b54de932e7_Device=CPU_Config=() +256:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=effa926dbd9beaa9b2b7b660288ceab99da8cfb440c4b01b7779d1bc25be336f_Device=CPU_Config=() +256:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=1409169e395a3eb90f9235b74f2f8c94e0e27a63fae33cda153d991ae1cbb68d_Device=CPU_Config=() +256:conformance/OpImplCheckTest.checkPluginImplementation/Function=Pad_opset1_Device=CPU_Config=() +256:conformance/OpImplCheckTest.checkPluginImplementation/Function=Convert_opset1_Device=CPU_Config=() +255:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=686b6d84e29d87a91c8177396d2aa5a1fbb88656c79e41af9a0b30b42805f477_Device=CPU_Config=() +255:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=1a29918856ba0f88e99346fda6e6c21ff2bf129f5599d8a1c8611346ab41f2f7_Device=CPU_Config=() +255:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=984e628a0090ff9d04bf8f41b795f0682dd3083fb78b71397a51cc2efacee247_Device=CPU_Config=() +255:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=138e0258106faf2065b52655adfb8b45d49b677f9cd04850bc5ac9335a9d16d7_Device=CPU_Config=() +255:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i32_Shape=static_IR=e6ee69f681f9388da19dc9c17781710c5622ecda436aa2d4b018578548acebc7_Device=CPU_Config=() +255:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=19f9f95d2205816fc002d8eaea7cfb19f19218fbc3528e4932b99f1486b62827_Device=CPU_Config=() +255:conformance_Tile/ReadIRTest.Inference/Op=Tile.1_Type=f32_Shape=static_IR=4d10da0860e049587221c12f55c3bca9fc587b74dd3fec194c8ba5854a736d93_Device=CPU_Config=() +255:conformance_Relu/ReadIRTest.Inference/Op=Relu.1_Type=f32_Shape=static_IR=99a80c495a8fb4626995167a3ad2efa0efed7696459f6219125414a2bd20dfc5_Device=CPU_Config=() +255:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=d4e6cfc9844e29087dc5bb222a1822c26ec71f2e751575790add7c9b98a5a23f_Device=CPU_Config=() +255:conformance_PRelu/ReadIRTest.Inference/Op=PRelu.1_Type=f32_Shape=static_IR=86cd07b4db06e4210732553cace1797b55c19f590e2d9b7814eb30485d8599ef_Device=CPU_Config=() +255:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=ae817dcac1ed2395cc4098f67bf6d2bcbecd8b7e91ef7592622d1ee75ed4a3cc_Device=CPU_Config=() +255:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=14f550bd7e83223ffbf501918141376e6a144484865f03c9768fe9da49a9f06f_Device=CPU_Config=() +255:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=120b0e6b0c1f7bda754d62ac7c88e7c8bd9e96ddb85e7e5f29decdaa7c1cde96_Device=CPU_Config=() +255:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i32_Shape=static_IR=6eac2d4e0df77b93f566f0d226ce4972da143d2b3fd794f7d316faacce442035_Device=CPU_Config=() +255:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=922699707423c4110bf8a551eaf7dc3689fd3673fff79cca21442cda90c22dda_Device=CPU_Config=() +255:conformance_AvgPool/ReadIRTest.Inference/Op=AvgPool.1_Type=f32_Shape=static_IR=cee58d2e3f2d6ef0061c5b245a15c60f0a26a58474c015f71dbdbc0c171b2a8b_Device=CPU_Config=() +255:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=525408cae199f0936f7552165ba12d61ced6b675d75d56f1d69be8281feec5d5_Device=CPU_Config=() +255:conformance/OpImplCheckTest.checkPluginImplementation/Function=NormalizeL2_opset1_Device=CPU_Config=() +255:conformance/OpImplCheckTest.checkPluginImplementation/Function=Exp_opset1_Device=CPU_Config=() +254:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=516b04726c16c5c01fbeb1c97f8f9d9376b80e9341d2029c634f7fe4975cc4be_Device=CPU_Config=() +254:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=2606bb36cbc87d845c9f678ac84e47c0893f0b86a3b675e70018d1e535234875_Device=CPU_Config=() +254:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i32_Shape=static_IR=21950c433f50ded0f662b9e0591e756a8dd685bc11a8296bcacc57ca1a4968b4_Device=CPU_Config=() +254:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=683b86794b415f893e4d426a8c68aa38f46c250e4c31bc5f5807a86c20ffb34b_Device=CPU_Config=() +254:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=2463ef4b8684fd6b391fca0b123328e1d695b47017fe94ffe5a419a3c22ce93e_Device=CPU_Config=() +254:conformance_NormalizeL2/ReadIRTest.Inference/Op=NormalizeL2.1_Type=f32_Shape=static_IR=3bfa35b53e4bb74a9e450b1220a5d3c061f050e498cf86c8f72118052b7fa252_Device=CPU_Config=() +254:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=bc1a7618e707ddd2c4773d1a2234e6dfb39954ad872abdf38a18d653ec35b26f_Device=CPU_Config=() +254:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=60ab42bb613fe785777ed45bc99044f41dae00316065ed5e5f07e69f5c861fc4_Device=CPU_Config=() +254:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=b46a2ee4f7042328b2496382ed2bb9cf39621c3e3e27fd1d355c9682543effc2_Device=CPU_Config=() +254:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=c54189129febdb864ceaa5447a7a0011c8ccdf3711fcfd87424feca61b44c0b6_Device=CPU_Config=() +254:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i32_Shape=static_IR=a3f2389f6a8a495885efa87742d53e1e154f58f8fd6e83df89bddf5922247095_Device=CPU_Config=() +254:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=1da672197f2c962a6cdfb059e9d09c10a03c3b082838f53d2faf6a761fee0637_Device=CPU_Config=() +254:conformance/OpImplCheckTest.checkPluginImplementation/Function=Subtract_opset1_Device=CPU_Config=() +254:conformance/OpImplCheckTest.checkPluginImplementation/Function=Broadcast_opset3_Device=CPU_Config=() +253:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6d71ec3285f12c65001e4396546f6c8c02215560675397656d85777f0c9c2644_Device=CPU_Config=() +253:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=7cfdc0a97fd79a5d272b29850c24dad4a0a8f147ea89b7683c98fa203a448c52_Device=CPU_Config=() +253:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=80cdfe1797800671134d77fa9c7032cdc1b19b4905fcefb11399610216f6e623_Device=CPU_Config=() +253:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=dd6dd181ad2875cd08679b8554d2a85ea0fd15d7f09f733a8290f677fed6c757_Device=CPU_Config=() +253:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=effa926dbd9beaa9b2b7b660288ceab99da8cfb440c4b01b7779d1bc25be336f_Device=CPU_Config=() +253:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=b1477d38842775944964d18c13278454256d9610e0ef880fbce0cc87e5977556_Device=CPU_Config=() +253:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f28013382ca254b4538a5527896cdfcd9d404aa854af83ef1d417abcdd781ef5_Device=CPU_Config=() +253:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=be4d557c62a3a43e7f309d2276cd7549bf1976ca8593bf2be752e60c42237a19_Device=CPU_Config=() +253:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=a0336bba08291ea34d6271c83816fb349d163fc5989171b07fe1bce50a2f3ea9_Device=CPU_Config=() +253:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReadValue_opset3_Device=CPU_Config=() +252:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c6e38c3297ab303b166e2a613203a1f09f4ba5a15659c8d2b233febd8fd09d9d_Device=CPU_Config=() +252:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=7b1df1422bfecf1fdf9c25f72d938950cb1492ee1c7223d9c0d771f93b1fbdb8_Device=CPU_Config=() +252:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=5bf1e9348ae0ec7106a2231d8940acc74464f5ecf0cbc6a682defc3a9bc5c2c2_Device=CPU_Config=() +252:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=6ddd9bc0333c5b1ebae36dafbc24444ffcd34d29bdb58a9b20d4e584c2cc63eb_Device=CPU_Config=() +252:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=63de0838ea26e3575f49700f73fffb0d3415ab68b29b1a1da690b84f7a034822_Device=CPU_Config=() +252:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i32_Shape=static_IR=461bf15d226b7ee3cbdcbc8cf1806e98267c5f14f0aef49dfb9de094f56347b7_Device=CPU_Config=() +252:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.1_Type=i64_Shape=static_IR=36b9b7be1407243aad0792e7a49ef25f7c3e3791dc1ff93cad40480837ba87cf_Device=CPU_Config=() +252:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i32_Shape=static_IR=58961039245de875e98bf9501f24065bd4b7fd4bd962ffb488c93a61eaa401f7_Device=CPU_Config=() +252:conformance_Relu/ReadIRTest.Inference/Op=Relu.1_Type=f32_Shape=static_IR=03c3e6567da3c139c19e0ce0d301a6076b2e2446d191216c7bf38bc030ea7855_Device=CPU_Config=() +252:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=d84c7cd2094853de1602906a47c4265442c727a532d85199772fdfaaaf7007dc_Device=CPU_Config=() +252:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=f31f6d969e04a7a1c964c02f107a7291c85067ac31d935921bc418363c2a7a46_Device=CPU_Config=() +252:conformance_MVN/ReadIRTest.QueryModel/Op=MVN.6_Type=f32_Shape=static_IR=98526403db7eb1f67a41aed2c34fea684d99d8cb8225313136e55be7d326aaaa_Device=CPU_Config=() +252:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=static_IR=cbb80f496fd705f24fdb25f6de3734bb2a2b7f49c984bdb32c4f62ec4640797a_Device=CPU_Config=() +252:conformance/OpImplCheckTest.checkPluginImplementation/Function=Softmax_opset1_Device=CPU_Config=() +251:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=68c3856ae6a30752004a5ebfabb93bd0d98446a91ba7fd84e686226f45d326b9_Device=CPU_Config=() +251:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=7d73fec5a605ca6fc06cb014fb723236fd2ddfa1820648acb7fdae8530866f45_Device=CPU_Config=() +251:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=09d4b4ea324f91ba6006bad4c82ca08e723c83c1b862d8075475e986696220da_Device=CPU_Config=() +251:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=dynamic_IR=516ad610612780fdaf83c5dc151316e83772eda4700882f934c97b2a2bd86dac_Device=CPU_Config=() +251:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=aa757ffed4165beb3074da6ad09422d7823a1d0d6c8a654adc56343d0e43dc66_Device=CPU_Config=() +251:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=c55846f7a08af86fb1c914c925433852fd4bc735f671c87e965a6db9b6971708_Device=CPU_Config=() +251:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=b7a53df966d640f075cea7421ca5989ca91ca638e7af16aff33bc275eb7dfe9c_Device=CPU_Config=() +251:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=3bfc4cff938f4386af23d87ce10f8680a62a25ce1fa9178874f212edf45ee045_Device=CPU_Config=() +251:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=i64_Shape=static_IR=75c36f65570966e7f975e5c839036e0e13fe30e6d24ce4be8e6a0e8449173951_Device=CPU_Config=() +251:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=19a94fc5cfe3ab1b4e169b342ec8d9f0fdc4ef19484c8c34d6ab938c6e7bf5fd_Device=CPU_Config=() +251:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=95afe52c888afd5c641ad2d6d0c3f8491f039af2c6938b91fe6fca613ec0b6ab_Device=CPU_Config=() +251:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=e8c2981885674129fedb6fc6a376f3fd3db7bf6f9867ee8a3f4e5aede63ee168_Device=CPU_Config=() +251:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=ce2bcc21fba106cc8be4846179a73cb30f650e7ec48d443fed591f6b479fa9d1_Device=CPU_Config=() +251:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=29a544bbefe85bdabe1d5d36d83d8ee1d80c71f8b98ff6e898e1062671daa8ad_Device=CPU_Config=() +251:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=75bf24e3b7a4c4374c5c92331d9e48423d734d35b5cafb951222e39ea4c29613_Device=CPU_Config=() +251:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=dynamic_IR=a937747c04b70351d3632aab91189200e2c0a69b6467ed856b7075885c54d83a_Device=CPU_Config=() +251:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=6e614b9877c6dd3bf1ebd731443e5a1e0b7492edbc3a4683adcff53c965ca1bb_Device=CPU_Config=() +251:conformance/OpImplCheckTest.checkPluginImplementation/Function=Proposal_opset4_Device=CPU_Config=() +250:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=3e1e1cd684c1bcfcf06febedcb4eb0f4f62b5c0920098fa0715c828e9a9761a7_Device=CPU_Config=() +250:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0e9ccd2a8aded784ff21758802648777721176f1d112ff60aaf3f150d6292156_Device=CPU_Config=() +250:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=07b4c5d05754987a0524385690d79f74988302f437597b7477770e8d062d72a0_Device=CPU_Config=() +250:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=i64_Shape=static_IR=7adee81cf21b942334c25378325f61e13e9ee3ac95ae004d4d9efceaab6c0949_Device=CPU_Config=() +250:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=a5dc3f8dd6385eb7f6d4052af82e27b7af7e8a58bdcb6092ec79ea3087f141c6_Device=CPU_Config=() +250:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=static_IR=5840000517cf6690e3b881e338ab887afae81129e22bb0e3c71b049e42ccd68e_Device=CPU_Config=() +250:conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_Shape=static_IR=b6a75c5d2a686eae53cc25c6b107630b31a8a4d8c6514980ed1a97754f33bdcd_Device=CPU_Config=() +250:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=427900d25144ee6b8cd4b35cd53c6e9335375018f6328dd01ae4db304846d991_Device=CPU_Config=() +250:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=1bde2f2a7294810531e23de80f25a451b3033487b5919c949b708b273dc3973c_Device=CPU_Config=() +250:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=3141ed71fe3efbd7fb026a006824ec24e4673d8b97d23dce275548e92eedad91_Device=CPU_Config=() +250:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=f32_Shape=static_IR=62b8aaf25e8c93387362b0c657886c31c39a7330cf3455486b8943a1e375ef5c_Device=CPU_Config=() +250:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=d066432a0ddac020441582a98f139d063cf5f4e9f34deaa0be5ab9b9f048aa0b_Device=CPU_Config=() +250:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=d8432e7d31bcf4d95ff7ab845a6858ea67cf751c7ef0fca60a9bab1d187fe3cf_Device=CPU_Config=() +250:conformance_Less/ReadIRTest.QueryModel/Op=Less.1_Type=boolean_Shape=static_IR=8cac1c4c51c2eb61b9ec75320814acf81b9ac240a88e1cc68f29541f6eb546e7_Device=CPU_Config=() +250:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=7373e7e64fbb2fabed337c09be0d6b42c5cfad39b26d92c6dd74810499863448_Device=CPU_Config=() +250:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f3b3afbedffce0d70b40d78f882a0061ba05e26e385c37cf902aec88ea43a649_Device=CPU_Config=() +250:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=33199e062b7b59c8244477fd2682a08876e72126701842265efc0c9fb4a90c94_Device=CPU_Config=() +250:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=b16650eec74ddd46ff3bffc9eedb340b6bad99a338fbe6b11f7eca3098a324d2_Device=CPU_Config=() +249:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6ea8e16cab0d6f60ef13562706c941f5ba3c90d3a65447ab3844e100cec5a0ad_Device=CPU_Config=() +249:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=6ddd9bc0333c5b1ebae36dafbc24444ffcd34d29bdb58a9b20d4e584c2cc63eb_Device=CPU_Config=() +249:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=45bae87afb2c7e7f0b7315334e33b8a9baf42d81b95b844cb4987dd3540f1dff_Device=CPU_Config=() +249:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=1c6447222d58a16595cfdd8b216fac2cb3f005d2b236a6526ef5de8e272e4847_Device=CPU_Config=() +249:conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=f32_Shape=static_IR=da15c9ddbf446de00565c83e95b8a554d400b8b925481e56eb3df41f7efe26d9_Device=CPU_Config=() +249:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=dynamic_IR=a9636e6e43bc01f8b1cfcfcd8e60e4ffba20837d0d3b80429c93f23cd8da89e0_Device=CPU_Config=() +249:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=2620e86e1e6ce8f0ecb3eebce969f3e7df11f7f86c6f97309aa24993f9036033_Device=CPU_Config=() +249:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=b46a2ee4f7042328b2496382ed2bb9cf39621c3e3e27fd1d355c9682543effc2_Device=CPU_Config=() +249:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=6eb80b60b2162fc469f652535ee11822ae34c903ca44191dc95ad7f9678b9337_Device=CPU_Config=() +249:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=046798a0cf8d4c3fd8f1dc12bd0363a669628e748a6c964385eb50bb783924fd_Device=CPU_Config=() +249:conformance_Einsum/ReadIRTest.QueryModel/Op=Einsum.7_Type=f32_Shape=static_IR=1c6cbe8477d09b0b193ddf9a453c1b6a8a79e3d1adcdf1c096709cee7a4866db_Device=CPU_Config=() +249:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=8f731757a7c32fa8e4d602d7197af81a1a82ea228ec05f4baeae7c59eba11f2b_Device=CPU_Config=() +249:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=7147d6ead151efc24a53c196b63fc441e240c34b41ad2226a535580eb2a3f3d2_Device=CPU_Config=() +249:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i32_Shape=static_IR=c70693ee2f825a40f3e1fc8dd2ce9355690bc33ff27030f674d082a0cb343cc9_Device=CPU_Config=() +248:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=acc81187b83e3de7c3d0903f40daadcadff63455905c00ff2f98498f21bd68ea_Device=CPU_Config=() +248:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8710c3eaa10d25119059f4e15970d8a6381f978cd905fc8eb1b4d43a36d1d5f6_Device=CPU_Config=() +248:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_Shape=static_IR=9fc3d18a9496df4681f38d330d3d1ff7b83b29b8f4e08e19c26a0107c4b69157_Device=CPU_Config=() +248:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=i64_Shape=static_IR=5840000517cf6690e3b881e338ab887afae81129e22bb0e3c71b049e42ccd68e_Device=CPU_Config=() +248:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=cbd1111f323b8e6d78b59b531708defef64b90463f973f64f52251795ac5a7dc_Device=CPU_Config=() +248:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=8e098b9c129ab30efc257d55cfbc737d990d2ff0f7931039d3335c42d5f286eb_Device=CPU_Config=() +248:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=eeeaf32688af20dbc39dd3705dc09fc804c0636d4d5807b003c002eaab1e79dd_Device=CPU_Config=() +248:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=dc4bcacb769fc4d8f1ef4ff20ca7ba6b3b369d69ea3b1c65733d4cbd2cb0762c_Device=CPU_Config=() +248:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=739517c4c613063fc5ef734443f0a599400dec31cd5a56686735f3165b2dc2d0_Device=CPU_Config=() +248:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i32_Shape=dynamic_IR=60bd170e816e0c2345a1658fd88459775fe8b7cce5de31a16e4e6cdea199f264_Device=CPU_Config=() +247:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e62245706f4242ff86bcb70d4d221bf49aa31db3807698d574125166bff5f8aa_Device=CPU_Config=() +247:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=1bae1a851b5bf95921ad7666e48803dae416315a20a3ddbcc1c81243cb5bdede_Device=CPU_Config=() +247:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i64_Shape=static_IR=83b83dd13b1733a50ec728ca6e7f09eb75641a573178816d1d33f30390464d87_Device=CPU_Config=() +247:conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=static_IR=8952b1ce6fc7bfd900e669e12b520b624c02026b458bae41afe28e1f76058315_Device=CPU_Config=() +247:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=c87c002bc627f4adfa58547da4c2b1f270e07e9961a1b4ae99dda72d88980550_Device=CPU_Config=() +247:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=f76da5edfb7a9e3fa7cec034fa43307bce74eeb0629176ae5dd40d154baf858f_Device=CPU_Config=() +247:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=e1d727df48a0a74d8b9865c00e5c39c9d53a5023d83da3c58f281b6b1411b696_Device=CPU_Config=() +247:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=7d3d30fa9e2a8a839cf42249de3eb8228681229e8b302ff7f290cc0d00c10a1a_Device=CPU_Config=() +247:conformance_MVN/ReadIRTest.QueryModel/Op=MVN.6_Type=f32_Shape=static_IR=fe615eeceb735b046b190d844931c56223d45439021da3b6b23227a1f9cb73c7_Device=CPU_Config=() +247:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=6af32fc288bcbd414ea63525c4345aeda74ab21c44aab5910f85b8b7fb5d1179_Device=CPU_Config=() +247:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=dynamic_IR=453c1f5bb6c2e9c81a04475c49696c6e9e94f77853ef961e1839b541de7c7e21_Device=CPU_Config=() +247:conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=028177a440f430edc5dfd7a7f0f2c0dded422876a98b6da66a647ad9aca10e57_Device=CPU_Config=() +247:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=f8c3c9cefc3f7ab9b8e1fd3031be6eb34eba46f9c493b316439c24355a8a4978_Device=CPU_Config=() +247:conformance/OpImplCheckTest.checkPluginImplementation/Function=NotEqual_opset1_Device=CPU_Config=() +246:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=e2da6d928938b6445170cd69fd4a7aab40130a560cef3ffa2d268a428f56fcec_Device=CPU_Config=() +246:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ffc3cad64b8bf82ffa4d189a247a9434e71886cacd3582956c5dd98921fd2141_Device=CPU_Config=() +246:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9b4725171957a420a98f908742f18062fbcee198871d527ab5b4d939005ac4e6_Device=CPU_Config=() +246:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=d9937a6c3eb62ad6328d7367f15e45758ce5f2ebc0488931855a5b1925574d36_Device=CPU_Config=() +246:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=05c2f891e743416ad510bf0ebf713738bd41258123cc4bbdc5cf067f251e35d8_Device=CPU_Config=() +246:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=c7ce41820be10f17c8d48c005703d536d18e4f49b1d2022ac58f77b7b9afadec_Device=CPU_Config=() +246:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=08393711dca608a5beec54493fa162068673eb746a6223b6dab2640d411570c0_Device=CPU_Config=() +246:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=static_IR=7df296e0e156bb36cb643a292802f9db374c77035c6a05ee4a865fbe2c6ef92b_Device=CPU_Config=() +246:conformance_ROIPooling/ReadIRTest.QueryModel/Op=ROIPooling.2_Type=f32_Shape=static_IR=1a0e3f63698678d2e6bb8968fbadc98227d9ce548e77c53021412d80d7711753_Device=CPU_Config=() +246:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=21f786ad25c17eff66f16501d160439b96636a7d5d8512c1bd3db5fb5d5e6987_Device=CPU_Config=() +246:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=c0eaf7f2465de396f92db5829a30b7d887dc26bc8d49b86f0fd0d688c7129e18_Device=CPU_Config=() +246:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=4d9f16ede014da56824607d45502439f71b57275c332fbf15c6ba2ec1496466f_Device=CPU_Config=() +246:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=64d3761db7bdfd0de19878c66fa4465d084f7462c332fd978de458e328f97875_Device=CPU_Config=() +246:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=08cdbd5ea904a12dde32bce43e6c512aacd0ff990d5df3a90ff625226c936edd_Device=CPU_Config=() +246:conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_Shape=static_IR=2e5aed1612da0f720adb051e22460983a3911c38cb09184d812ceb949870f450_Device=CPU_Config=() +246:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a9fdcbd778622e442a42d8d2a1a12a1be0cf7e9d79c4d7ad56d5802c7a84d337_Device=CPU_Config=() +246:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=11477a6d571446d4e895d1cc6b0155c36606963d5c4a3a0a516802063a60906f_Device=CPU_Config=() +246:conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=static_IR=fb9febc1b0984c7d6887460d058a75a9444bd1ade793c5b945c9b79ad2c63e46_Device=CPU_Config=() +246:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=053d601716750db93af5ae01d67213086ed987370f9ff59723824dcd0a6c2462_Device=CPU_Config=() +246:conformance/OpImplCheckTest.checkPluginImplementation/Function=PriorBox_opset1_Device=CPU_Config=() +246:conformance/OpImplCheckTest.checkPluginImplementation/Function=Multinomial_opset13_Device=CPU_Config=() +245:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=10c19142631a9ac6d8026ec82820aa75ba1e14605fe5ea1e017fa4bde4a90c44_Device=CPU_Config=() +245:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=ccef47523d9134720116dbd4a37d5038c9d15e2c393ccf1a6d24c3790529c282_Device=CPU_Config=() +245:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=969c6a651dc204576d68d7d893ad2dbff1f7c74803b1763857d41aabdd19a72a_Device=CPU_Config=() +245:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=2d153f841ff4b6825fe5b8399105916112addb79300aa00df85409c88fdd70ec_Device=CPU_Config=() +245:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=ea8fff2db5032f5015f68d53904354d4bdfbe5288224c7f549a1573794455d80_Device=CPU_Config=() +245:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=2cc50ee05a039bf65fd7be2282284790d9d2e1fabb4cfec509f5bed121152d93_Device=CPU_Config=() +245:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=6ddd9bc0333c5b1ebae36dafbc24444ffcd34d29bdb58a9b20d4e584c2cc63eb_Device=CPU_Config=() +245:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=85d1eaa250a32acf89b675cc50f513ef3c7df50ed9d68f2cff2fc89db41b63f2_Device=CPU_Config=() +245:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_Shape=static_IR=a4fe57973b0bba01e6038a8050f07b8ad1bf6871c1ad86270920f9084dc84905_Device=CPU_Config=() +245:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i32_Shape=static_IR=28675c37d06426cf6895e7ffc15d6c212ef8be1b278fd199d1bfbd0678f825fa_Device=CPU_Config=() +245:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i32_Shape=static_IR=92dc9b12889f441d7a93e95851a15849139787b0ecc080e70d266fe4cb6dd9c1_Device=CPU_Config=() +245:conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_Shape=static_IR=3bfa35b53e4bb74a9e450b1220a5d3c061f050e498cf86c8f72118052b7fa252_Device=CPU_Config=() +245:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=9bb8728e6e9f68cf68a9e39d1aa4c618c4aca4187d4262f735c0647d680c0506_Device=CPU_Config=() +245:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=15dd996f113d962d9bb21424d1006af0aa28376a2af63d791a80f0ab95a604fb_Device=CPU_Config=() +245:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=77b3b21d35d3742f7abc1097b99d510453f42ebe921681685fbc457d2fa9912a_Device=CPU_Config=() +245:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=a714d51739b3d420cf27f476e338bacbeabb40d0ced1e1527587756265253d8a_Device=CPU_Config=() +245:conformance_Less/ReadIRTest.QueryModel/Op=Less.1_Type=boolean_Shape=static_IR=953b15e350d9a27c4d048cbae41a278c732f3b3a6e8debd7fd2e75e99a015966_Device=CPU_Config=() +245:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=86fb2ad636e51f682c83919d64217835cd9ab458695e3bdab295c4107516e733_Device=CPU_Config=() +245:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=b7b0a0b3921a1e1434a3fef630e32b124c810e8bd15a3e861fe7da79158947b2_Device=CPU_Config=() +245:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=45959eb5eb391b2bc86455cb1e86aca76799c6b082437e72b15c171037a6206d_Device=CPU_Config=() +245:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=225aaa01462e6e43c0c12cff65f96e7d9c07d368a820ff3c1b2939fefe86d492_Device=CPU_Config=() +245:conformance/OpImplCheckTest.checkPluginImplementation/Function=Tan_opset1_Device=CPU_Config=() +245:conformance/OpImplCheckTest.checkPluginImplementation/Function=OneHot_opset1_Device=CPU_Config=() +245:conformance/OpImplCheckTest.checkPluginImplementation/Function=GRUCell_opset3_Device=CPU_Config=() +244:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=d69304b651805edf18138147ec5a4c16e883ad5e5d9828db849a35249c28b263_Device=CPU_Config=() +244:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=f2685b40efb789012e69252fa0fe30803c68be724a52dbcda9b2cb796138ea57_Device=CPU_Config=() +244:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=i64_Shape=static_IR=c7a696f3217515ef4ff5eb46fbd15af6533f0fcd268398fbd434f105c0a11328_Device=CPU_Config=() +244:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=66abbc2c605a0f866880bd4730865ae6b5401a1f4beb242f346bf6f2f8138eb6_Device=CPU_Config=() +244:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=dynamic_IR=a99a5ab2de2d408c2e40ad5734c9bd5ab4d1d221f4dd24572e05538b134ef88c_Device=CPU_Config=() +244:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=i64_Shape=dynamic_IR=2c47f1ee19359a486a72bdafc2614159d48fffc80ddabe0f897212a454a75b18_Device=CPU_Config=() +244:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=223a34e46344a0dff7f35a637c9bd08e2a76a552ca87e5bf0134c9fc6d6be41d_Device=CPU_Config=() +244:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=static_IR=2233a83397f11ea3c674c4845409c4f27f8bffbb8d0295712a2525c9e93d6041_Device=CPU_Config=() +244:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=b434cd386e4c5e688aac8da3425d2ed0d72961223eaaa1cf2ff951a88a5fa001_Device=CPU_Config=() +244:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=dynamic_IR=49b05f6b6a636d84beca451fdc1fc81e3411a100ea105fbcd49ef72ef1fa0934_Device=CPU_Config=() +244:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=a714d51739b3d420cf27f476e338bacbeabb40d0ced1e1527587756265253d8a_Device=CPU_Config=() +244:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=static_IR=c4d8543f8e0b375407e428ef119ba4049d44cc273a10661b57645bcd1d36f5cf_Device=CPU_Config=() +244:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=60ab42bb613fe785777ed45bc99044f41dae00316065ed5e5f07e69f5c861fc4_Device=CPU_Config=() +244:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a1e0bbe02c433cb144b4825a9f1b2c30c03743f210830db5462736850b6db383_Device=CPU_Config=() +244:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=static_IR=f26c1f41ef689dde33e9d61b0a1066788b8397ba6a170f5eb1362726ba9c0868_Device=CPU_Config=() +244:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=717ea579a24839ee9c5ba7c59a07af667fea4fd44ee18bf60e8970264852bde7_Device=CPU_Config=() +244:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=d141b35e277394511f5635b2e395039c986ac392e6f49c2415da6a5071bee96a_Device=CPU_Config=() +244:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=fda1f84f5e911136f8daaf4fcebfb989f3216c066ddc1cae578882a41ca0f5bf_Device=CPU_Config=() +244:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=e8a26a33d6dbe0bb560820295fb6b8aafc3da0d2b78e29199d2f09e952722efe_Device=CPU_Config=() +244:conformance/OpImplCheckTest.checkPluginImplementation/Function=Split_opset1_Device=CPU_Config=() +244:conformance/OpImplCheckTest.checkPluginImplementation/Function=ShapeOf_opset3_Device=CPU_Config=() +244:conformance/OpImplCheckTest.checkPluginImplementation/Function=MulticlassNms_opset9_Device=CPU_Config=() +243:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ba28829f211d64d6d4922682b85f1bad6a3c28cc30b4f9651186b1e8fab39fec_Device=CPU_Config=() +243:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=7a3cae38e357ee1e5b0400c7e1256cc8a2d78da81911fbbb3ae6d9e510d78aac_Device=CPU_Config=() +243:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2d886a31e22f61d30c33ddd300ba7d8ba1cd9796ee1a4f688db9126b1d8d9c83_Device=CPU_Config=() +243:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=64186bbc89d54f073554e029b8972fbbfba2abce8026a379b7ac3833f84ac9d4_Device=CPU_Config=() +243:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=cd389fc4a9417c7136f75474e42dfb43d1f9cb35fa0e104632ffa69fce2b7e57_Device=CPU_Config=() +243:conformance_Exp/ReadIRTest.ImportExport/Op=Exp.1_Type=f32_Shape=static_IR=67632b67a0834136cf2f3bcd6b3fbaf0d2f2bbffc1da6c33fd5fce0d0b8a763c_Device=CPU_Config=() +243:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=a3add607f5e37633f3298794f8e32e409e3403666af3c0fc57c7d4427b714eca_Device=CPU_Config=() +243:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=08ba7fbf736896f373ea81dd727940aefae22a39e217e84dfc5617ed62133d10_Device=CPU_Config=() +243:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=ff39aa885f7ecc22a06f668b79fef4ac41b3adf8dea82f428711b241c0fa6059_Device=CPU_Config=() +243:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=07b257862a62290d7e8ae939147bb7422992528bf54209b8d1bff500b99b6f4b_Device=CPU_Config=() +243:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=14f4dcbc8e714fdb791d15b62646db0da2cf647d431dd6ea044ca6976ef51753_Device=CPU_Config=() +242:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=aa14d6e18f8580015dd7d32b167fba6ee137133b87fd617eab4599f407a51b69_Device=CPU_Config=() +242:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a0f8789f0f95beb6f28efc829bdf2f99d34a3e9397ad1a80d7831aaaf125b5eb_Device=CPU_Config=() +242:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=8a5bf21112b4a458a3323e615dfce41a8627c89ac692e1d568786634667849ab_Device=CPU_Config=() +242:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=43d871d4b2b3346c08f8582b892ba0c0017d77688e16fd6d69f83f8101e12a69_Device=CPU_Config=() +242:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=c662eb0004f431152ddc69e12826a6c0e7aa66b24be0169acf10ca95f2a63f52_Device=CPU_Config=() +242:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=33d8f6d258ae8dfd09b8e6fd39f0e74384eabfb685e0e72a3c798101ea56a1d2_Device=CPU_Config=() +242:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=static_IR=bd927dd60e7b65e84d03c2c01d29c6932961f801bed1312124c2212b5e22a921_Device=CPU_Config=() +242:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=f8b14e90b051624d56678dbe68f15e6db94e22878b22914d0be241047d1a3783_Device=CPU_Config=() +242:conformance_Minimum/ReadIRTest.QueryModel/Op=Minimum.1_Type=f32_Shape=static_IR=206184d6fe0a3ab9fe71914c66d3804e145caed7cf3ac09cb1d50183144d6ac7_Device=CPU_Config=() +242:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=a3370e3b46f385ea6e46137d49d5f1b4158fe08d0a3e9feb47a162f6b3640951_Device=CPU_Config=() +242:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a7b2c196b6ae12252522b2571af40b540eae94513bfbd88e15708fee816869f8_Device=CPU_Config=() +242:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3a17c045930ed967b45d1606b78fdc92e736731b198465e95ed7268d99eed246_Device=CPU_Config=() +242:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i64_Shape=static_IR=0d6cc305ea05df2178e3b4ea61ba2f296655e77af08556491e0dc8dfd46bdc6f_Device=CPU_Config=() +242:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=00d6c2465c4fa7ddab80d30c2fd8099a684bcc47cf9bdba89a39560beed737f6_Device=CPU_Config=() +242:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=7b8eedb1c6be0db4a0c041ec3b04498d6dc68b326c35533ae16258e750f21e3f_Device=CPU_Config=() +242:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=681b1f284fb69c16681d3efd2081d7f812496e3a027baef35a75bb0aeb9c003b_Device=CPU_Config=() +242:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=b005a58abf8192face35451602a847d378849223e4d433924581d28ef8141303_Device=CPU_Config=() +242:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceProd_opset1_Device=CPU_Config=() +242:conformance/OpImplCheckTest.checkPluginImplementation/Function=Greater_opset1_Device=CPU_Config=() +242:conformance/OpImplCheckTest.checkPluginImplementation/Function=Equal_opset1_Device=CPU_Config=() +241:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0d74ee98934e32799620ac90fd3ae8335bca026b9225782458949c64139d89c3_Device=CPU_Config=() +241:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f9f701a7d26d77a2b1eb3cc822efb5da95f1edbe614469f725a381ce892d8d91_Device=CPU_Config=() +241:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4746fb4d92aab20d21eeb0885d35c88abd50aa250298473f5bd143658eef2316_Device=CPU_Config=() +241:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=i64_Shape=static_IR=9fa81cf001e6c48dfcf4e75aa77f95b3dce4e8d48b6ec3cfc896dcc08006c62e_Device=CPU_Config=() +241:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i64_Shape=static_IR=055b7eb16539ce5cee62e165db9a6d51a11e0bdf90bc9f82eeca1f2faac2bf89_Device=CPU_Config=() +241:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i32_Shape=static_IR=8b8efa859c54f9cf2200c18953de243d469d2f04bf38ba5f3efe441de23ffe45_Device=CPU_Config=() +241:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=i64_Shape=static_IR=5840000517cf6690e3b881e338ab887afae81129e22bb0e3c71b049e42ccd68e_Device=CPU_Config=() +241:conformance_ScatterNDUpdate/ReadIRTest.QueryModel/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=d42cb628111ca80a33a558dcd1c2c310aa7b95d6c48549075291f49ec59c302d_Device=CPU_Config=() +241:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=58961039245de875e98bf9501f24065bd4b7fd4bd962ffb488c93a61eaa401f7_Device=CPU_Config=() +241:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=f32_Shape=static_IR=b91a183b8c36d6e8358dad7056638b8091005393dd1ee6813728f25cd3e6a9f5_Device=CPU_Config=() +241:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=5a1aa66136ca2be83f714067139e11fcbf672d73f8b28c57d29333b885a17f83_Device=CPU_Config=() +241:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=f5d3b4ec51e032e4df5dae00ecba1a3198c29cba96c72b8c89126c4638b715d3_Device=CPU_Config=() +241:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f13dcb47235a9516298088a0c45ff56fdb7f95144da257a3dfa1c618c7373ce9_Device=CPU_Config=() +241:conformance/OpImplCheckTest.checkPluginImplementation/Function=SquaredDifference_opset1_Device=CPU_Config=() +241:conformance/OpImplCheckTest.checkPluginImplementation/Function=Asinh_opset4_Device=CPU_Config=() +240:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=abefab3b34ee5f7da347f3c86a1a0b7b17617de416051dc18c3aee80862c3000_Device=CPU_Config=() +240:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c4ae9be783990e398b3e8f0af76cab50d72c40c705677a3fe1c5dea592952d1e_Device=CPU_Config=() +240:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=12c56cc6ebb22e8e31d97e0ef640fecab5f93e5c5b2810c4dde56b09a7ac7f48_Device=CPU_Config=() +240:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=f84bcf4f549ca0d6e75c7905f1463fbace4f3b955032fcae627e46e353b2aee9_Device=CPU_Config=() +240:conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_Shape=dynamic_IR=6c91ebbae26ffbeec9778f2db476ad7ecb6eca6710cba24a86d3a2a262f68e43_Device=CPU_Config=() +240:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i64_Shape=static_IR=81313f6065af987d98f37a1709f149d804bc1a36bb0a5c4a11223b29c6ccc3d2_Device=CPU_Config=() +240:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i32_Shape=static_IR=c377dc784ecf97aef916740686298f47bc82c7c007326042ffe748e91ccfde1a_Device=CPU_Config=() +240:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=i64_Shape=static_IR=def60f5f3fb7a0d22cb3d23253e7c8e502aa9dd2d3756c54dd4343b66c2682ca_Device=CPU_Config=() +240:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=36f17a498b10c140f8a319d82e5c8f2cc3cdb7eb3be9f82f7ef35d9c9470231d_Device=CPU_Config=() +240:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=c721fdd5d79e702e4ac48a31d0ebacc4977f050c67d1c415b085773042c8e93b_Device=CPU_Config=() +240:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=54009010668252832a2a755d277e9f574fd2486892184caa0eb4774e753ed094_Device=CPU_Config=() +240:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=d066432a0ddac020441582a98f139d063cf5f4e9f34deaa0be5ab9b9f048aa0b_Device=CPU_Config=() +240:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=7ad5da9c461223f21afd023e08220eaed788598f50e144e45fcdf3466c0810a3_Device=CPU_Config=() +240:conformance_GroupConvolutionBackpropData/ReadIRTest.QueryModel/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=29c89ebfa45163b40be304d7bfc96f3068cd96175db94e6ebda942d3c4af538f_Device=CPU_Config=() +240:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f9b090cbcb19663630a1490fe18357b752e430ad793c0e3aaabedcb74ab64934_Device=CPU_Config=() +240:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b7983ae70a4e7868ccbf4b25a5d8e795620182c29817ad1151d89f2e932d770b_Device=CPU_Config=() +239:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ae0e669fbddc34e8aaaefff248959e3fe53196e68bc1b3a9e66be16a495d7cd2_Device=CPU_Config=() +239:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=5e31c7022ed7bf2adff14876be4bbf6562afdc2239a08ddcdb507e3d1a20071b_Device=CPU_Config=() +239:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=8978b8e985b54cc12e2cefa8d9097f4a3a03d477129230b6c7e3daf8112e2c0e_Device=CPU_Config=() +239:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=138e0258106faf2065b52655adfb8b45d49b677f9cd04850bc5ac9335a9d16d7_Device=CPU_Config=() +239:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a9311932565e68fff052e15c1a0522e1c09270d06521541ca28b67c34184b1c5_Device=CPU_Config=() +239:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i64_Shape=static_IR=41a35ec8a58f581cb2558464a66077408e961b57821db604fe525d492d4f4fbb_Device=CPU_Config=() +239:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=f32_Shape=static_IR=bcb10a9124f9b0471012f9e22d4aed5c2a47a55e652312e8a8382dc0d809a23e_Device=CPU_Config=() +239:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=70d4da84623a0af3bc8362a828bac5ef13285498b420a3df6bf2e88bf05311db_Device=CPU_Config=() +239:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=5c5e10f28ed3a8d4ee0d3c8af982df5f383a4a1a713baba556dd17ee52e9ef32_Device=CPU_Config=() +239:conformance_NonMaxSuppression/ReadIRTest.QueryModel/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=d12f2033cdee7e244afad462ca1d9295c314836b593b2a30730861c2a3c8e9f2_Device=CPU_Config=() +239:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=i32_Shape=static_IR=22a8f509c3f76bc2dd6bc9a26ec4ab92a5b9ae4678532c886c1438669d627323_Device=CPU_Config=() +239:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=a50bcc7d92264c02627cb62bd0cac349b895311cef54b60a957a6366619e82f3_Device=CPU_Config=() +239:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=046798a0cf8d4c3fd8f1dc12bd0363a669628e748a6c964385eb50bb783924fd_Device=CPU_Config=() +239:conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_Shape=static_IR=935369702948a57e71d169e75218162f370b48f344fe819f11112c011b6626fc_Device=CPU_Config=() +239:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=346617ba1990b67ca1fec8ec219645b16aafa6c94a4a0f752c2f3633b85df679_Device=CPU_Config=() +239:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=5d68272f8318c073e481b5353e6e4350e6b3b5e120f389a98859dbd5af43db9d_Device=CPU_Config=() +239:conformance/OpImplCheckTest.checkPluginImplementation/Function=MaxPool_opset1_Device=CPU_Config=() +238:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=7d706b614d2b5d59c5e152bbb61a8fd558686bb3b8e9fda199c499ca49f03042_Device=CPU_Config=() +238:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=f32_Shape=dynamic_IR=214b1d4be2a141409b6b54847c952a282d9b2d7236d3d8ada3463f7dc8554097_Device=CPU_Config=() +238:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=cd2470c72fa7d2238d2eca4d067e49a02340ad187681be2fa7e0bac6eab3500b_Device=CPU_Config=() +238:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=075342290aa43542c81f7ed4e804c905f110edc23440452c6d0c0f0c312b65c1_Device=CPU_Config=() +238:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=4a80814933ec1c6198745b1caa4d5b7c9171395b6d8a53cd791dcdf64fa6c91b_Device=CPU_Config=() +238:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=fe70e0ee3f24f0bfe4391da7797647a01f66fcb109b481ca859c9f8f7dc7b411_Device=CPU_Config=() +238:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=cd5756749d3d73dc7b666f7f41dc292c73230e5d31ddbbd43aae77210b86220a_Device=CPU_Config=() +238:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=87523dfccb2a9c8334d6810e33c2a2d3b6bc09db7623e7ae93ba4cea89b66a06_Device=CPU_Config=() +238:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=281f1852405ad37d0606184e81d8534d769f50b3fe99f5f17ebfda6954f4a584_Device=CPU_Config=() +238:conformance/OpImplCheckTest.checkPluginImplementation/Function=SpaceToBatch_opset2_Device=CPU_Config=() +238:conformance/OpImplCheckTest.checkPluginImplementation/Function=NonMaxSuppression_opset4_Device=CPU_Config=() +238:conformance/OpImplCheckTest.checkPluginImplementation/Function=FloorMod_opset1_Device=CPU_Config=() +237:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=92f5c3aa4427a89ad6ef275c0beb2139cbd0c6ce2eb71205117448adf592ad20_Device=CPU_Config=() +237:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=5295b6c6090a820891e5754c34d03dc3347d3436fa16fa4a701422ce8ac78b92_Device=CPU_Config=() +237:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=7b904365e0652437dcb59aef3b84da17f4205a821586224e41db1409d96e910b_Device=CPU_Config=() +237:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=43e58b152a871421132d25894025e9f4e2b5294f4b22923ca549bb0f2b8ab50d_Device=CPU_Config=() +237:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0495648ac153ca7bb07160aed49b620b855a89b368d363a22fb45ff3428349eb_Device=CPU_Config=() +237:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=9281a7e3ea8124fdbe416d1f15434752a7e799fc77a63be64babddf60b6f2d8b_Device=CPU_Config=() +237:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=i64_Shape=dynamic_IR=84a8c7a897894ee6bb1c03759bced74ea6d773a2cb8335efdc8d193a534f3833_Device=CPU_Config=() +237:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=static_IR=bec81407211db6e10d7c8811bc58b53c23c8aafa0e2083f262204f345b9bcfc6_Device=CPU_Config=() +237:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=dynamic_IR=9337e101d74f6d35bf81e9be895ffba9e972cdab9d79b2802f1c1ec0f4d34a83_Device=CPU_Config=() +237:conformance_ReduceMax/ReadIRTest.QueryModel/Op=ReduceMax.1_Type=f32_Shape=static_IR=590a910a27283b92d7a4650bba546a3bec08a6ded604bbe8523ab3c6d734c70b_Device=CPU_Config=() +237:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=b729ddf6b689006067cfce88ec7d9e89268dd6cd904e4596717016541632b13b_Device=CPU_Config=() +237:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=cdd7ce044f231ae39fc0f7460a55473c0de6934124cd263444a5912b8cbbc0ce_Device=CPU_Config=() +237:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=acaf36c12445c608b306074ac4e2be9cfde2f5550905993d4b5bd1714dc96aaa_Device=CPU_Config=() +237:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=8f3e3716e8a1e8647454d124d7538ac1faacdc1b95873ccc1a760e09d48c30d3_Device=CPU_Config=() +237:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=2e70eb484f4bac4cd11e9f643d2531cd0e78994af07c015183edf9d62a709d47_Device=CPU_Config=() +237:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=b16650eec74ddd46ff3bffc9eedb340b6bad99a338fbe6b11f7eca3098a324d2_Device=CPU_Config=() +237:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=2c114b0035075d866c028f9a1168725375feac9a666a881ae6b7db6e9066bb3f_Device=CPU_Config=() +237:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=a3e2f08143425d4c6ed46ee301de31c5942694f79af0d297e4d1801e9a6a0ff8_Device=CPU_Config=() +237:conformance/OpImplCheckTest.checkPluginImplementation/Function=MaxPool_opset8_Device=CPU_Config=() +236:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=cc2f28d736d3c67fdd13fbea9b8cef7c0b075f06b37034581fc732966421802f_Device=CPU_Config=() +236:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=14c8a8bb712c40d63edf76de9a75dd1dcd53a2df8c6098c80ee760119966f364_Device=CPU_Config=() +236:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=396388d4dce8240937c39dcd24e583e775f7b4e84d6c85fa9b5930588dfb9b56_Device=CPU_Config=() +236:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4df4ab698c70278594efe8b4349a4c99c8b2ab7c4ee0182c5a4b7673da922ad6_Device=CPU_Config=() +236:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=64186bbc89d54f073554e029b8972fbbfba2abce8026a379b7ac3833f84ac9d4_Device=CPU_Config=() +236:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=87c65c520de106b146e91222609f5b25cd79e96cdd6b942c3293cddb656617ee_Device=CPU_Config=() +236:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=6cf01dbf95872b3fc0c914e73415ed8e4dd52cb355031002a65e3e974559d6d6_Device=CPU_Config=() +236:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=6e8dbb054c99609e5aedd642130e867c22091118e0bb7ddd870a66dcfd11452f_Device=CPU_Config=() +236:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=b6e3f37ddee609d492f47b36b8fe937ee401d01e6d43d7e0b7c06d1a1781b501_Device=CPU_Config=() +236:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=927c151867c504f57aea681772afe32ec9c67cdaa4a0dcbc9055a8725c0296dd_Device=CPU_Config=() +236:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=43c8e8300f01242788a8cfdc37b48779f51f7ee7aef5b28e8de542320ba86e4e_Device=CPU_Config=() +236:conformance_HardSigmoid/ReadIRTest.QueryModel/Op=HardSigmoid.1_Type=f32_Shape=static_IR=08a7845e89900ed725c984b42b6bc262a7f7956ec50e0a7bbdfe8e4a34d584e2_Device=CPU_Config=() +236:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=eace26dff7f6f0403126e78a4c93920ee5e54a721cd580b4b18c2c9989baef86_Device=CPU_Config=() +236:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b3e45847dae7906b7f320b6a751727593b35ad8659ee80a11caf445f44f392df_Device=CPU_Config=() +236:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d9db827de158568b8a10347c13216e92b37ec20d8eac92c38aabd86690114805_Device=CPU_Config=() +236:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=46a3135a1078cd8732e84754fa66872648997791d16caa379a179e1a90960608_Device=CPU_Config=() +236:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=e4388b1379e224ea4849e6052827ef17b490cab3718159195ea2b2986719bb4a_Device=CPU_Config=() +236:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=b077af9b63e937fc64589d3007372d5fb2e4accc392ea09889a2519e3885413d_Device=CPU_Config=() +236:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=00b85178c2e7f891c89e99a6692b94a56ab0882f4a30167997e104db1429a9c9_Device=CPU_Config=() +236:conformance/OpImplCheckTest.checkPluginImplementation/Function=PriorBoxClustered_opset1_Device=CPU_Config=() +235:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=37ed85c113d481da6d55c0a820d49090a8b256694e0f1b111feded60fe708279_Device=CPU_Config=() +235:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=a6b95dd49e84f2860b57f1f1ab6fe2baa265bb757112e53def3004a360053aa8_Device=CPU_Config=() +235:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=fad6766f10f7a0ffee665be437521766f5dd56b673293920d8b469bdcef8e7f8_Device=CPU_Config=() +235:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=7895fea00309326a052d47dbd2f9e562b86bb9d0501f2a2fd8843a0340359b67_Device=CPU_Config=() +235:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=104a69286d09ab8a5a88403ce6b421979659231fe5c5f973393216607a995dcf_Device=CPU_Config=() +235:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=3866cad522b1a4da567b64df204a69863faf25dd6e09f85dc5806d3101689458_Device=CPU_Config=() +235:conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_Shape=static_IR=8c82cead166c3db4616f034b66c4795cb4bed653de41d2b6dc71b48ce76a296e_Device=CPU_Config=() +235:conformance_Round/ReadIRTest.ImportExport/Op=Round.5_Type=f32_Shape=static_IR=f4cc9554ddbd189f18575e3a80afe6e8f8bce613dc8852a48d4171ab6916e087_Device=CPU_Config=() +235:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i32_Shape=static_IR=7988ae4f263061e530c61f5987afd5e7f1945ecef9fcded2bc9799afdcec0df6_Device=CPU_Config=() +235:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=3638f7714d7627d7536ec02891656e512fee1ec55d59bb4f68c7409ad82f3879_Device=CPU_Config=() +235:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=c843b49e26b9be555df454d4c63f0bff72e6ce29d3ae80e9193741500b08f424_Device=CPU_Config=() +235:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0ce1ec496e5d71728fc5daaba87809c5922406a65e85823913381de0d2112e01_Device=CPU_Config=() +235:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=d9db827de158568b8a10347c13216e92b37ec20d8eac92c38aabd86690114805_Device=CPU_Config=() +235:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i64_Shape=dynamic_IR=ea860537d420b0d1afe0ec9a10192912ec59d8f4ba01b27add362ce50fd6b380_Device=CPU_Config=() +235:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=f0d5131a073c03932316e3f20f40c527ddabafc926f0d10824a96158c03524b8_Device=CPU_Config=() +234:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=7b42d3a61f732f3639d1ae7011b86158d070acc922308a18f00a01b9c6a60ead_Device=CPU_Config=() +234:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6bbd8d7f90e7c210514c28d527eb33bf0889b1fafbd5cf7d9660532f5d6bd940_Device=CPU_Config=() +234:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=f9f031e1fb61fcf87468eb1f4b2005e7cecc5f073eca95c161fe62fbbfc983f4_Device=CPU_Config=() +234:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=1269afc1a9f9a4f71ca2167cc59274b7a3bead8cca474162919619b810eb9c1a_Device=CPU_Config=() +234:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f226723f90368b020cf11817ce0a39c002b9c30e07d16ac9297b7e574a010b0e_Device=CPU_Config=() +234:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=874c0fa19029457645c4cff20769f66ba7aaa1a35ade84c948f83aaa9c1ead19_Device=CPU_Config=() +234:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.1_Type=i64_Shape=dynamic_IR=45a9a897d75b175e3d805e74ec09322789564e0c0e8d9535724f262a9f534572_Device=CPU_Config=() +234:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=3cef1c65fc41c5f96e90007517fb5c911435e8d8ae7db1a1398ae63c2525d6c3_Device=CPU_Config=() +234:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=dynamic_IR=a9636e6e43bc01f8b1cfcfcd8e60e4ffba20837d0d3b80429c93f23cd8da89e0_Device=CPU_Config=() +234:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=287a7562757ef0295cc38442e3d775cff0fb1ea9b27e6897bd456f01ce82d455_Device=CPU_Config=() +234:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=be720054cd6d960249271114344ef2f4f36e2a2208376df70d4395a82386dd01_Device=CPU_Config=() +234:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=72c58b462f61521af4eab9c890e568b5676c7a3194c4e35f8e04f98596013c47_Device=CPU_Config=() +234:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=e0422b2fb57587a85d9ce1532f7fc28a6bd01e72a325d42d9045419dda4bbba5_Device=CPU_Config=() +234:conformance_Ceiling/ReadIRTest.QueryModel/Op=Ceiling.1_Type=f32_Shape=static_IR=fb5c74aa3b17b4a8d5e1603b9179b60bf3f0b8301c74a8fb632b6869896439d6_Device=CPU_Config=() +233:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=50c46a070e458a716446dafab20580095bfe902eeb4ad96c39bc2c617964c1d8_Device=CPU_Config=() +233:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=254937408e91c70536c4f3b3f81f1a7aede93b29f142631a46fa7d962c531131_Device=CPU_Config=() +233:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=6095afd484c177267854bcab902c3057a2a1bbf37b2188d3a31fd2cec48de2fe_Device=CPU_Config=() +233:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=ae538f87e4d49bbdc53184fcaa6082eee131a79b480dab9b46e12976d01ea913_Device=CPU_Config=() +233:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=2e06088cb191d8d26309843b1285b9ae4a1eb0722e1370875edde7fd2783851b_Device=CPU_Config=() +233:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=2d153f841ff4b6825fe5b8399105916112addb79300aa00df85409c88fdd70ec_Device=CPU_Config=() +233:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=i64_Shape=static_IR=7adee81cf21b942334c25378325f61e13e9ee3ac95ae004d4d9efceaab6c0949_Device=CPU_Config=() +233:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i32_Shape=static_IR=9fc3d18a9496df4681f38d330d3d1ff7b83b29b8f4e08e19c26a0107c4b69157_Device=CPU_Config=() +233:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i32_Shape=static_IR=58961039245de875e98bf9501f24065bd4b7fd4bd962ffb488c93a61eaa401f7_Device=CPU_Config=() +233:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=20c2030cdd180dbbfad1e5b8a4f865d1757a9d427c3d5ff21651a429369f4341_Device=CPU_Config=() +233:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=0a5f9fad12bf5e2592c6f720232bb38d94a5fb9ac1fdc5a8f7d474ed9e9d2504_Device=CPU_Config=() +233:conformance_NormalizeL2/ReadIRTest.QueryModel/Op=NormalizeL2.1_Type=f32_Shape=static_IR=acdcf37615b571d8a1275b71cfe0c43a6410e56f5f18db8e9d795e46aac73d0c_Device=CPU_Config=() +233:conformance_NormalizeL2/ReadIRTest.QueryModel/Op=NormalizeL2.1_Type=f32_Shape=static_IR=3bfa35b53e4bb74a9e450b1220a5d3c061f050e498cf86c8f72118052b7fa252_Device=CPU_Config=() +233:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=i64_Shape=static_IR=2e3f53e7b949e1dd0ab38890b0c9fc9e770dfb68569e37fa5cdd4e3ef03d6eb0_Device=CPU_Config=() +233:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i32_Shape=static_IR=22a8f509c3f76bc2dd6bc9a26ec4ab92a5b9ae4678532c886c1438669d627323_Device=CPU_Config=() +233:conformance_Less/ReadIRTest.ImportExport/Op=Less.1_Type=boolean_Shape=static_IR=8cac1c4c51c2eb61b9ec75320814acf81b9ac240a88e1cc68f29541f6eb546e7_Device=CPU_Config=() +233:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.4_Type=f32_Shape=static_IR=a3de81c04a0e7d5cab275045415ab4c294ed3270588c2ef704ab6db5514ed0dc_Device=CPU_Config=() +233:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=8ec74565f16a2ee1e322b4549ea19aa0b30719787abd90bd957e121705edb268_Device=CPU_Config=() +233:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=6b2c79edda9cc9cce61c98552d6a0d3a3555c9ccac3a56c6692f536a0abdb61e_Device=CPU_Config=() +233:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=68c6351cbee22a4783b3c592f69eea3778c17594c48972d5d0d1e9d728f5b47e_Device=CPU_Config=() +233:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=c98e1e2347c7b6939804dfcfcebbbd57d4c05e8d13b35b2611912290d06107ff_Device=CPU_Config=() +233:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=9c6d5cdaf19c92d1f994e4ae6cfdecf5a9ff04e47a2e0e68f3a08ec8f6e74479_Device=CPU_Config=() +233:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=55c7f63e25ddf106ebdab6f4eab66f1be6950cf7a68abdb5b7e9a395d2fa6add_Device=CPU_Config=() +233:conformance/OpImplCheckTest.checkPluginImplementation/Function=RDFT_opset9_Device=CPU_Config=() +233:conformance/OpImplCheckTest.checkPluginImplementation/Function=Mod_opset1_Device=CPU_Config=() +233:conformance/OpImplCheckTest.checkPluginImplementation/Function=HardSigmoid_opset1_Device=CPU_Config=() +232:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=43e58b152a871421132d25894025e9f4e2b5294f4b22923ca549bb0f2b8ab50d_Device=CPU_Config=() +232:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f99a212a117855e6e2dc4a338444a8ecee441f989638f7a0700ce24e037d29e3_Device=CPU_Config=() +232:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=134ff6b704123c583b694d7023c99cbcfd10a1afc48819ef35b46dc4d0bca500_Device=CPU_Config=() +232:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=136768c6c28210cc47eacf6667103eac8106e3f255618e067d351cb700e62cbf_Device=CPU_Config=() +232:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i32_Shape=static_IR=8b8efa859c54f9cf2200c18953de243d469d2f04bf38ba5f3efe441de23ffe45_Device=CPU_Config=() +232:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=5d791fd5b82a74a42073567349728035c4ac52ea64c1a154a73bd4e61d1b42dd_Device=CPU_Config=() +232:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i32_Shape=static_IR=683b86794b415f893e4d426a8c68aa38f46c250e4c31bc5f5807a86c20ffb34b_Device=CPU_Config=() +232:conformance_Relu/ReadIRTest.QueryModel/Op=Relu.1_Type=f32_Shape=static_IR=377acd11b0f7dfb4f3e57baec8a6c8a84737857b7e794614542f139982feaf73_Device=CPU_Config=() +232:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=ed75de35729f20a3285506937672f78d2d5137851a3043d15f4eafc040768fc8_Device=CPU_Config=() +232:conformance_Loop/ReadIRTest.Inference/Op=Loop.5_Type=i32_Shape=static_IR=5b9cbac8797158a77d5616e8b7e5d8132360e23e26d31d845f0d129df7bfd7b5_Device=CPU_Config=() +232:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=8108f6881c436dfa59a0c27d173054c885f082306ae5af1694cdede13718bde2_Device=CPU_Config=() +232:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=a0cee5b220a433f1d76460a1f452bfc26aae12f7b84983a063605b4a8cd0a5d4_Device=CPU_Config=() +232:conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=3d20de7392465c055c84dc20d0af64ae6d14809f5a6e4bb05e315a2654066f93_Device=CPU_Config=() +232:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceL1_opset4_Device=CPU_Config=() +232:conformance/OpImplCheckTest.checkPluginImplementation/Function=Maximum_opset1_Device=CPU_Config=() +231:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6d71ec3285f12c65001e4396546f6c8c02215560675397656d85777f0c9c2644_Device=CPU_Config=() +231:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=f5a74749f6c90dccecbb5e4a7d0fee72cca6247f0084487b5ca7d94d098c9b9b_Device=CPU_Config=() +231:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=87baad85c649084e386ca502375581e9dc47c68c076bacae5e5ac1ddbaaa7830_Device=CPU_Config=() +231:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i64_Shape=dynamic_IR=84a8c7a897894ee6bb1c03759bced74ea6d773a2cb8335efdc8d193a534f3833_Device=CPU_Config=() +231:conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=i32_Shape=static_IR=a142d6fb0ae0c0decec2ebeba376ed65852e1c60b1c1abee7bc574d5ef3a6a3e_Device=CPU_Config=() +231:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=5aa10dbbcee8d7434796180d5fbe8f0a954b772c441c8d6046439c615d3b9011_Device=CPU_Config=() +231:conformance_LogicalNot/ReadIRTest.QueryModel/Op=LogicalNot.1_Type=boolean_Shape=static_IR=66b8769b499fa31cfd7545411d16a17b04e1a336bb63a7e907707cd170a30fc9_Device=CPU_Config=() +231:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=aaafa4ff22a5fcab1e6e0f48065210ff790275fba7a5c16602aa4a00951a8cb8_Device=CPU_Config=() +231:conformance_Abs/ReadIRTest.QueryModel/Op=Abs.1_Type=f32_Shape=static_IR=083771171646a2eadcbb3384bd457e04d74ce8ea771813cdf67c56f7bbf20c69_Device=CPU_Config=() +231:conformance/OpImplCheckTest.checkPluginImplementation/Function=ROIAlign_opset3_Device=CPU_Config=() +230:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4722375e4770c972f87bc89a8ca16871aa57251a9c01ab2a14adc11f885cac91_Device=CPU_Config=() +230:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=6a9a72aca963de945d97658e484453191cf6af26cd6838c1603299aff3a49a8c_Device=CPU_Config=() +230:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=5bf1e9348ae0ec7106a2231d8940acc74464f5ecf0cbc6a682defc3a9bc5c2c2_Device=CPU_Config=() +230:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=static_IR=4892263cb1ea7a434b5771aa16f07885c39710f67fa1411dd9235653a6b8622c_Device=CPU_Config=() +230:conformance_Relu/ReadIRTest.QueryModel/Op=Relu.1_Type=f32_Shape=static_IR=03c3e6567da3c139c19e0ce0d301a6076b2e2446d191216c7bf38bc030ea7855_Device=CPU_Config=() +230:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=7201a55d869ac6072af38ff89dfac3cfd2e6720d25f7607c6cc5f80040a8e82a_Device=CPU_Config=() +230:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=dynamic_IR=17be9a027c25bbfbc08cf4dd106ee25d649680b30d16c74580fb3f8fcab54baa_Device=CPU_Config=() +230:conformance_Log/ReadIRTest.ImportExport/Op=Log.1_Type=f32_Shape=static_IR=038bd1e152575a3b8ca28bfe18fdcc9cbf19c9489e7bb831b9d5f56f7499cb7c_Device=CPU_Config=() +230:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=57ba21d45369359487dc3b6a8feb0aa2b6fb21ffa328dc8e8eed58ee2896fdad_Device=CPU_Config=() +230:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=c161ff64d4c506fdbe44d0ee76042f958f5dfce778833653628a026de01a3f9f_Device=CPU_Config=() +230:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i64_Shape=dynamic_IR=ea860537d420b0d1afe0ec9a10192912ec59d8f4ba01b27add362ce50fd6b380_Device=CPU_Config=() +230:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=d673fdf688abaeaf4cc6239ff762f8df557ab445bf9f031ab3bd87782717f2ef_Device=CPU_Config=() +230:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=b2dd13c363e41fef66b0dcc3e21e77b9a97e413c1c89f8c8a53179b05f01c2cd_Device=CPU_Config=() +230:conformance/OpImplCheckTest.checkPluginImplementation/Function=NonMaxSuppression_opset9_Device=CPU_Config=() +229:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c777366b6b37df3f4a3b19b637f66b707fbbb113972a9eff7eb4d793731f8c9b_Device=CPU_Config=() +229:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=7dcfe3f43645f6b9f3290b524024a1a3d48efa3ce346eacc2330be7e27a046fd_Device=CPU_Config=() +229:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=d7a96943c0264427eb83ab413f6e7b0f15f09f83525de581fba582655d0fa4af_Device=CPU_Config=() +229:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=08b46b9b2881764fde87811d2462a361d75c30fcec74f631f116f010953daced_Device=CPU_Config=() +229:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=a7b79789ba2466daa67ce8610753fbd89a2ca372d65e2326802c24cce03f795f_Device=CPU_Config=() +229:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=b0e3e542180f521cfd4651ae18d3a58962751d3c6de9265240be6d4fe9745bf0_Device=CPU_Config=() +229:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=be720054cd6d960249271114344ef2f4f36e2a2208376df70d4395a82386dd01_Device=CPU_Config=() +229:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=99377bd11138d36797502d82ac9adddc31dfe1e4cbb8bba8684b1479f8a16f26_Device=CPU_Config=() +229:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=c39d76c89bb03fe251dfffdd9b8eb85c0585904ed9c5bb4660c3dedfdc451efb_Device=CPU_Config=() +229:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=7ad5da9c461223f21afd023e08220eaed788598f50e144e45fcdf3466c0810a3_Device=CPU_Config=() +229:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=db85fabcfcf049a7225468036e29c949eb779253ba145485205596e72cb8cc7e_Device=CPU_Config=() +229:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i32_Shape=static_IR=d246ad7201844e04821cf31a7d0650c362d6684da5e02f625d28b1afc3789127_Device=CPU_Config=() +229:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=769e7bb56fd0d0fa75fed14765279f68841e300b1450909cdcc802d347446b52_Device=CPU_Config=() +229:conformance/OpImplCheckTest.checkPluginImplementation/Function=Broadcast_opset1_Device=CPU_Config=() +228:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=64358a022d0c072ff89427a2f3acd3a3afb49b8f76e57353eb95962fd2572ca9_Device=CPU_Config=() +228:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=5ba879b46e93286e4c880a726e28d6956a1c8415508733b5349079f899462679_Device=CPU_Config=() +228:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=3ebf4d995c8af299693b32b6adabb6a261a3761137ec6c5e68b35bdf0942bd85_Device=CPU_Config=() +228:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c1923c409aa2da9da8daf339b8b26be9ec6a106e65098182015c21881b0b5379_Device=CPU_Config=() +228:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=1a29918856ba0f88e99346fda6e6c21ff2bf129f5599d8a1c8611346ab41f2f7_Device=CPU_Config=() +228:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.1_Type=f32_Shape=dynamic_IR=7cb8f8f3f3b4335221f85190d4bc29dd28a6b99133ab630a5ee04640af0843a0_Device=CPU_Config=() +228:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=dynamic_IR=0b30cc6cee9ce5400085a0e78b44763bc169eeea93357f22fd716564f20226db_Device=CPU_Config=() +228:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=1c6447222d58a16595cfdd8b216fac2cb3f005d2b236a6526ef5de8e272e4847_Device=CPU_Config=() +228:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=823c1bd1ce8ee0ae28410bcea9f3c33ef9f9271e8f41f0871a7d6eb6b2850757_Device=CPU_Config=() +228:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=i64_Shape=static_IR=056c07f9ad8e27e01b269b5136ee29b4cb4d1229a009cda07e4fd32c45d4e97f_Device=CPU_Config=() +228:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=966eae58d5c103f24a598a5143d7b3a3c40a12fa2606a65431f0d1aef855cd32_Device=CPU_Config=() +228:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=e894ea68d25e2a0af6fe2e330929c3305710bd07aca8e099b727df78fb26cdf6_Device=CPU_Config=() +228:conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=static_IR=d05c1b7fcf976117a23e0284998d9ce21689411ff24530175787f1512ca25879_Device=CPU_Config=() +228:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=4fe95284f224758c29c5198a8b2e6f97e8e737435d36cb94b9cdf0bca3c89dc1_Device=CPU_Config=() +228:conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_Shape=static_IR=48256cdbf5a3d19f0b7bb6b0540cbd664a36885a88fa8f5f56da7057de97a608_Device=CPU_Config=() +228:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=00d6c2465c4fa7ddab80d30c2fd8099a684bcc47cf9bdba89a39560beed737f6_Device=CPU_Config=() +228:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=6b87ee29001d1d3b17ec72a66638e954796b7d6ec1d6f6be86890c7d5a3bcceb_Device=CPU_Config=() +228:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2058548f687014df36b4da1b2644f07fa117d5a1d303a13c4d913a3f979d3ed6_Device=CPU_Config=() +228:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=2f7925a034999529ce07a5c8bed2b2c7aeeb7936f74730d9c8ca5a5086dea4cd_Device=CPU_Config=() +227:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c5637c5151109c002830514b8b1450092dc52df14146ecee467dc54469a77718_Device=CPU_Config=() +227:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=bea169459345470ab5d89e5ae9a8b67d6e9401caf7dc35f5060805152e20d6cf_Device=CPU_Config=() +227:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=fa88ad79fad41544d799f0333f83b91322f2bb408689e27e53bd175786ed0979_Device=CPU_Config=() +227:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f0af28fe49c157f5f62f72f0ab209c50aa07d97c65477217fde6e3a3d0dc98ef_Device=CPU_Config=() +227:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=dbee34cd3b708559af1ceb5fcf89aac35add00fc1b9e3eda2beebb2d5b629fc1_Device=CPU_Config=() +227:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=f32_Shape=static_IR=136768c6c28210cc47eacf6667103eac8106e3f255618e067d351cb700e62cbf_Device=CPU_Config=() +227:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i64_Shape=static_IR=45bae87afb2c7e7f0b7315334e33b8a9baf42d81b95b844cb4987dd3540f1dff_Device=CPU_Config=() +227:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=i64_Shape=static_IR=168e02701204a8f0e325fa1a2a4407612df10c3218c9431981fa6f1f8300eec2_Device=CPU_Config=() +227:conformance_Split/ReadIRTest.QueryModel/Op=Split.1_Type=i64_Shape=static_IR=68115f3a18f8ea201078166547e9c2a8587a5bb37646adf6f90da976f7298386_Device=CPU_Config=() +227:conformance_Split/ReadIRTest.Inference/Op=Split.1_Type=i32_Shape=static_IR=a142d6fb0ae0c0decec2ebeba376ed65852e1c60b1c1abee7bc574d5ef3a6a3e_Device=CPU_Config=() +227:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_Shape=dynamic_IR=fc75aba0dd172d6628de0b473569c672b52f070ac3c446cc3342cb1184ef076a_Device=CPU_Config=() +227:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=e4baf41ae9a77441993eb0f95c3d7335e9a719e5eac8b1ffaf60d8f515f769a1_Device=CPU_Config=() +227:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=e14dc7fb2cf0b63fd08f616d407511ff2036109a3e105dcb87398053d1c334d0_Device=CPU_Config=() +227:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=84f6f3544adcc7c68df5ca411844cf36c2232c1b6c820094e5693a444faa143d_Device=CPU_Config=() +227:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=45e4a607b0964915174f6a14de049a61a5740f258a4a71817e5aae1b93be5ae7_Device=CPU_Config=() +227:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=ff96b044b0064dcc13dc7c1d80f2b2cddde0ead8c4501d5d741034833079d47b_Device=CPU_Config=() +227:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=08cdbd5ea904a12dde32bce43e6c512aacd0ff990d5df3a90ff625226c936edd_Device=CPU_Config=() +227:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i32_Shape=static_IR=c8ec200fa8fd8ec9c185d9d45ee1380be5e0e4a6f3157e5900401e9fce999553_Device=CPU_Config=() +227:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=fb83c1c4a2ce0a8860479916f23f3961a5c20481e62de79390573dd7859c09f0_Device=CPU_Config=() +227:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=bbb0129fbafd6d1874ccef37a1bb60379733012c502d58326dae70f413e387f2_Device=CPU_Config=() +227:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=150b1e03f5e8abf76f88e68ae56a3afc3cb3ae110fcb12af35192aaf93b20f5b_Device=CPU_Config=() +227:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=7244cd4799e0eab987f823edc7d6038b76afa7585e4663278be826124c5596ed_Device=CPU_Config=() +227:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=13e9472dcdeb5e6ce2928191ed13dde08b6cdd62c82c94e77469d8a3ed94e39b_Device=CPU_Config=() +227:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=cccecd6fd3e8f3d84fb98f219b212cd2b55ae0e4e34c099a25a1028e9e2f83e7_Device=CPU_Config=() +227:conformance/OpImplCheckTest.checkPluginImplementation/Function=ScatterElementsUpdate_opset3_Device=CPU_Config=() +227:conformance/OpImplCheckTest.checkPluginImplementation/Function=RNNCell_opset1_Device=CPU_Config=() +227:conformance/OpImplCheckTest.checkPluginImplementation/Function=ExperimentalDetectronDetectionOutput_opset6_Device=CPU_Config=() +227:conformance/OpImplCheckTest.checkPluginImplementation/Function=Concat_opset1_Device=CPU_Config=() +226:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a2006e1eaa808a3e78550535058de54c5cd83e9a32a52e488fef1f7883c321a3_Device=CPU_Config=() +226:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8fa841d409e36b6665e289f4963330eaff4124d5452c93b75d779937cabe14d8_Device=CPU_Config=() +226:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=f89eecd15ff45d6929f82696f96a68adfd694043ec3f859952d80080bd140627_Device=CPU_Config=() +226:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=3866cad522b1a4da567b64df204a69863faf25dd6e09f85dc5806d3101689458_Device=CPU_Config=() +226:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2acd53645519bc460dcc71923563fd462ed997366cc7ae08cb5a30245302a859_Device=CPU_Config=() +226:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=72373e9c2bc4cdf2f0aa0a5d14e30ed1a5e0545d9a96f4ab675f3b9dc69d8cf4_Device=CPU_Config=() +226:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i32_Shape=static_IR=8368b4f6e208aa4cfbf0aeaa648e9408c281a71d98d15ee09407d26274fb349f_Device=CPU_Config=() +226:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=745c0804609863998b4bcc6956b1e78fc221e0e4f1535ab09b89a9c966a16995_Device=CPU_Config=() +226:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=e08e84b17997c1b1279429161d287720e4c7deb0e6d055539149bc577ed3b104_Device=CPU_Config=() +226:conformance_NonZero/ReadIRTest.Inference/Op=NonZero.3_Type=i64_Shape=dynamic_IR=31f428e60ddfdb3cb3c98c2cc858d0409fd35c5e6e97f9dcdfbb20a876c475a6_Device=CPU_Config=() +226:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=df085870336c57084e22afa8b52ece7149abc21b5d1784965a7d36d5ada91e8b_Device=CPU_Config=() +226:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=0d782801290370c7c390ad549171ec3500ab344b8b34ce4b8fd8b05339fe5557_Device=CPU_Config=() +226:conformance/OpImplCheckTest.checkPluginImplementation/Function=IsFinite_opset10_Device=CPU_Config=() +225:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9403397dde8b4f6a240bdc928d0f080dfb42f6442f281d6b3fe8b6e348ccacfd_Device=CPU_Config=() +225:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=20af9ae4466332a072f3b04c1219146d272daabf2306b66c755980bfd31f2a76_Device=CPU_Config=() +225:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=978c6fe274296020718998393e7fe94bbe0a0856fc377aa474df0454534824a6_Device=CPU_Config=() +225:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=920aa0d732c7ace2bcfe73df0e7217e66b6388dce554ef827efa96f4e7d31a2f_Device=CPU_Config=() +225:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=f2eb693da69b0ad1af3bcef6c4af46ba2b92897f76989c310a65aac5c2027725_Device=CPU_Config=() +225:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=254aa036653eace9f3faddc8f2fb69e04ba0c788a2070c054b4c9fc059d33845_Device=CPU_Config=() +225:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=dynamic_IR=2af646407076eafcc1ed2d628158fc32eac4ef2fb34fb967962c06f81376d61c_Device=CPU_Config=() +225:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i32_Shape=static_IR=e6ee69f681f9388da19dc9c17781710c5622ecda436aa2d4b018578548acebc7_Device=CPU_Config=() +225:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=644274eaea5cff1fa9976380a2c024a8510f88826d0c1a6036aea3b18e3ecd8e_Device=CPU_Config=() +225:conformance_Sqrt/ReadIRTest.Inference/Op=Sqrt.1_Type=f32_Shape=static_IR=4420cfb7f4a734731dacfe5b0c27db41ccaac2ab8bbff56cac0f99ed96e976f2_Device=CPU_Config=() +225:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=00d924b3557896a41b0be32897f7b7293fcc44d79a285e91695a5fd2f29f3b8c_Device=CPU_Config=() +225:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=2f842d4b64513c6df5748c54a1166a3f14436dc1ca59b7a28530bcafcdcde2f6_Device=CPU_Config=() +225:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=445aa399303e82b524cce3e0b3522cfdb57200720b3b72584c785fad157117b1_Device=CPU_Config=() +225:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=dynamic_IR=c18d3d2fd8001cb07daaa5000258b36352807e3e81999d2d80a668e4d6add085_Device=CPU_Config=() +225:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=e14dc7fb2cf0b63fd08f616d407511ff2036109a3e105dcb87398053d1c334d0_Device=CPU_Config=() +225:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=776018866cd0a06171706794dcd0d7bb13b5960fd98a66b306ecfac7595feec9_Device=CPU_Config=() +225:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=3374f930d0ffd26ccd7cb542638f2386ae5f803b5bdce4d848ba1e93b4a173a8_Device=CPU_Config=() +225:conformance/OpImplCheckTest.checkPluginImplementation/Function=ROIPooling_opset2_Device=CPU_Config=() +225:conformance/OpImplCheckTest.checkPluginImplementation/Function=MulticlassNms_opset8_Device=CPU_Config=() +225:conformance/OpImplCheckTest.checkPluginImplementation/Function=Interpolate_opset1_Device=CPU_Config=() +225:conformance/OpImplCheckTest.checkPluginImplementation/Function=GatherND_opset8_Device=CPU_Config=() +224:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a917525b3e5a37fc2be5f35fd5a3d50b57627cd9b985333e082b169c29f848f3_Device=CPU_Config=() +224:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=1837f66989053233e19b617ab462b5c608981c0be175b57a2366fd41ca1a9fdb_Device=CPU_Config=() +224:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=8042d30c9796e8eca03cb2e3651f84b5167204aaf186ad08ad5f74a9b0a26b9d_Device=CPU_Config=() +224:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=ac40c4284a523b39af21eda7394a11b9ca2f2deb5263c03c92c0e217d34bedad_Device=CPU_Config=() +224:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9257d329b4cc9eff8545270d1693734adac9ac4ee44dcbaa21c774287e84aadd_Device=CPU_Config=() +224:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=50c46a070e458a716446dafab20580095bfe902eeb4ad96c39bc2c617964c1d8_Device=CPU_Config=() +224:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=1f6be1a43c786bfbf35baad6ff643b762e9d63c069c884a69b4ec6e89062ad7e_Device=CPU_Config=() +224:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i32_Shape=static_IR=f777fb31e1669cd58cc77e2a04c3f9a804b654b6d710432641a3dc34504460b4_Device=CPU_Config=() +224:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=8fc296db9f7dd10289217cb81cdf5991c6b5f3c89369936a94c8ac484702bfa3_Device=CPU_Config=() +224:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=c3ef1d1e09e7c0917298070d6909b455d5962c4bf3adf8d2d4c04f0741141f1f_Device=CPU_Config=() +224:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=15dd996f113d962d9bb21424d1006af0aa28376a2af63d791a80f0ab95a604fb_Device=CPU_Config=() +224:conformance_Minimum/ReadIRTest.Inference/Op=Minimum.1_Type=f32_Shape=static_IR=206184d6fe0a3ab9fe71914c66d3804e145caed7cf3ac09cb1d50183144d6ac7_Device=CPU_Config=() +224:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=i64_Shape=static_IR=75c36f65570966e7f975e5c839036e0e13fe30e6d24ce4be8e6a0e8449173951_Device=CPU_Config=() +224:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=8e098b9c129ab30efc257d55cfbc737d990d2ff0f7931039d3335c42d5f286eb_Device=CPU_Config=() +223:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=3fb25dbf33700d0b8ebc3c53fe328f2ee9f45c5a090240eec120b954998d17ce_Device=CPU_Config=() +223:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=686b6d84e29d87a91c8177396d2aa5a1fbb88656c79e41af9a0b30b42805f477_Device=CPU_Config=() +223:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=0fcad2ddd1c7b81bf5e88ef4d4abb26a33326a37fb0cceb1205c1efd2a2d3615_Device=CPU_Config=() +223:conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=dynamic_IR=8b79cf070ed44bdefd5afbe86a81199e189fa486c42190795419dbfc7cc26d6b_Device=CPU_Config=() +223:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=2ef8d38ce64fd0460d641e6f3bfcb1654bbe3d2c25f9dd244ae259eaa4b6941b_Device=CPU_Config=() +223:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=823c1bd1ce8ee0ae28410bcea9f3c33ef9f9271e8f41f0871a7d6eb6b2850757_Device=CPU_Config=() +223:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=d11097e7fa04dc0b540bf3b963cde252591b39b7dcbfae66e64ed19cd2b3b06e_Device=CPU_Config=() +223:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=f0edc45979b98d4401eea2c345bbcb794721dd3cdbfb3963be5a2842b27ccc5b_Device=CPU_Config=() +223:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=b06553539d6e27195623fcbce51610b5671dd70700bcf61703a1f7a8bbc7c5d8_Device=CPU_Config=() +223:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=776018866cd0a06171706794dcd0d7bb13b5960fd98a66b306ecfac7595feec9_Device=CPU_Config=() +223:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=static_IR=78239cbf0f8d473af2209ad3d9297e02208c110efa7af981f8c09ea7d7290032_Device=CPU_Config=() +223:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=f45b24f3bf21a2c94bc89cdc3d20c283d47f4e6ea386444897330e232bd7d90f_Device=CPU_Config=() +223:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=99377bd11138d36797502d82ac9adddc31dfe1e4cbb8bba8684b1479f8a16f26_Device=CPU_Config=() +223:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=2a3d6c0476c17897fd4cc6d3623519fc033ac4022a01fbebd40b461f414f6c15_Device=CPU_Config=() +223:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e2d2eef3e776af9379eb35540d8f4c888491082d8333aeb70f58822aa5cee878_Device=CPU_Config=() +223:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=68c6351cbee22a4783b3c592f69eea3778c17594c48972d5d0d1e9d728f5b47e_Device=CPU_Config=() +223:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=fb9febc1b0984c7d6887460d058a75a9444bd1ade793c5b945c9b79ad2c63e46_Device=CPU_Config=() +223:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=dynamic_IR=8b9cabc6a44ece744453092791ef63b8d6ca4d83af7e8635f2f4ad78186e5184_Device=CPU_Config=() +223:conformance_Clamp/ReadIRTest.QueryModel/Op=Clamp.1_Type=f32_Shape=static_IR=0662f4c4f222a79755532ac9eed43118b2ebd0faf0fbb9b400f9047ca1071b5f_Device=CPU_Config=() +223:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=static_IR=5be0b1c69be525cbddd7996b695c1a4a9f380173d03f291e8570df76c050678b_Device=CPU_Config=() +223:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_Shape=static_IR=a7f6c704686f1b0e6fd4ab522930aa3fb5b4cd4683b204aa31e5c73b427e7058_Device=CPU_Config=() +223:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=29eeefa6ea54ff2530e2e17153db324026e85d4e45432c053ca066699187bbc5_Device=CPU_Config=() +223:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=281f1852405ad37d0606184e81d8534d769f50b3fe99f5f17ebfda6954f4a584_Device=CPU_Config=() +223:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=1e95665a92aa6efcc7e06d24fbe4cb2afa07d75374cea3ea928658a270ef489b_Device=CPU_Config=() +223:conformance/OpImplCheckTest.checkPluginImplementation/Function=Interpolate_opset4_Device=CPU_Config=() +222:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=61f6b4fbde686888b82614a5d24cac53e835377c4cfa791ace3f3cd3f8ac2dd8_Device=CPU_Config=() +222:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=3f87262a458b0dd0a330ab0cfc48c74ee687819228d3e2e1226df3b02de26afb_Device=CPU_Config=() +222:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=1d52baa348f922bf85866fbfaa488c1ca33e01f0b79bd6a25fb430e8b7fc8b06_Device=CPU_Config=() +222:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=caf20ebc8d39cb23a107a03e819e8ee5b2807fbd311fe65453446251e4b6a611_Device=CPU_Config=() +222:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=cc2f28d736d3c67fdd13fbea9b8cef7c0b075f06b37034581fc732966421802f_Device=CPU_Config=() +222:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=57c57d85bad2b76d3d65d88baf2b3677dca6e5d534121e87efd618efbe5b1547_Device=CPU_Config=() +222:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=f89e84d6fb931cf0cd074acd01a50e50daa47ad88b1b74e4b3671d63bd7889f2_Device=CPU_Config=() +222:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=b94d2ed6a2b113922805a69578ec5ba2ba3d8f0ea46ca37f095b4ccc94d76b77_Device=CPU_Config=() +222:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=u8_Shape=dynamic_IR=81bbb9658ad214babb825fa4b576aa83a9ceaae7dc0b878a84e42ea194f3ec13_Device=CPU_Config=() +222:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=c87c002bc627f4adfa58547da4c2b1f270e07e9961a1b4ae99dda72d88980550_Device=CPU_Config=() +222:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=3688e2a973219245d05c5fa675cebe9036d40777809ebf583c1bae9b9f87eed6_Device=CPU_Config=() +222:conformance_ReduceMax/ReadIRTest.QueryModel/Op=ReduceMax.1_Type=f32_Shape=static_IR=a3b350b1516cb0391e088317ea67433757a08847710c4a4bff143922873208df_Device=CPU_Config=() +222:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=5fd7b424cb32653589798a45526ac4b3f3aafd29a58e5ed1cef16a958fd4a859_Device=CPU_Config=() +222:conformance_LogSoftmax/ReadIRTest.QueryModel/Op=LogSoftmax.5_Type=f32_Shape=dynamic_IR=a3f02c85607891ecc34c484b433c6a78333e13f3d8cd231e651f8bec26e7d0ce_Device=CPU_Config=() +222:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=de46537615051a46fea66871c5fc6ef3417b577ce42bd1f7e239d821e1ed5c51_Device=CPU_Config=() +222:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3c7d4160bf883d550620e8d1ceb54b3d78bf1512388b5ee57e1a380949d441e1_Device=CPU_Config=() +222:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i64_Shape=static_IR=a2450d07c12669e586815e60d9a2b568f88a49c9b63730c898b9eae907b5ec4a_Device=CPU_Config=() +222:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=86fb2ad636e51f682c83919d64217835cd9ab458695e3bdab295c4107516e733_Device=CPU_Config=() +222:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=750599c4cdfcbe7468328647a8760c7249a9f5dba8bc33ebd00c151d9f3b13f6_Device=CPU_Config=() +222:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=4b00183255fde45d5c3b815b552e5a4279284bfe1ceb31389560260ad5546c14_Device=CPU_Config=() +222:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=d46d4fc3e7b3b2cea07f7ba710f77f7d99b4799e7fb0d3127ea6862f3f731ae9_Device=CPU_Config=() +222:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=b99ba096eea2f3725fa98eabc2a941fa895c0a58bcd7a8ea68d2a245ce913113_Device=CPU_Config=() +222:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i32_Shape=static_IR=eabe482de99e120ef1260cc91a746df95f8db04fa1cf6832dc45b3ee1b38f9c5_Device=CPU_Config=() +221:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e27641fd09143d736ece2166cc3156e80c190d5def706b86358f49fe980cf9b7_Device=CPU_Config=() +221:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d46034925bf5b01e31b5a57911fe30f5dd09a8712432312fb1efd844e69913bf_Device=CPU_Config=() +221:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c3e5791580edfc2b522c8a3aecd33445b3fa8d771e2b5a8387ef0f303773c848_Device=CPU_Config=() +221:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=9efd5749a1591709057d6e97334c9b5b89f5864d705c91774e0196d42966d1b9_Device=CPU_Config=() +221:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=7efae3e9c1a0419670b3967f8b2dda53fb0200f946a3d529b8da235ee14690ff_Device=CPU_Config=() +221:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=254aa036653eace9f3faddc8f2fb69e04ba0c788a2070c054b4c9fc059d33845_Device=CPU_Config=() +221:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=b08690e29e0249d5a6a30f2ad886ec714067df994bc4d8cbd82d0d02af6335bf_Device=CPU_Config=() +221:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=5f18fb02adfd683f379dd5a15d38f01cf744e6940754f6a40e2646a1d9c97be8_Device=CPU_Config=() +221:conformance_VariadicSplit/ReadIRTest.Inference/Op=VariadicSplit.1_Type=f32_Shape=static_IR=fb8283ecd8934dfc5340a41e9889a0a760b39869e4873efed4ef85606c162ce7_Device=CPU_Config=() +221:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=i64_Shape=dynamic_IR=84a8c7a897894ee6bb1c03759bced74ea6d773a2cb8335efdc8d193a534f3833_Device=CPU_Config=() +221:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i64_Shape=static_IR=81313f6065af987d98f37a1709f149d804bc1a36bb0a5c4a11223b29c6ccc3d2_Device=CPU_Config=() +221:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=i32_Shape=static_IR=e256f7acbc71e64cab857fb6378a035096c7ceebdd4f867b5140d35865cf6532_Device=CPU_Config=() +221:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=f802331401875cb16be10c9f752520406437b2e63a50e022b7d95b732e5296f2_Device=CPU_Config=() +221:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=dynamic_IR=b94b5361ee75b3684455c2b871b656a50c72e325564787c302a714f222845b26_Device=CPU_Config=() +221:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=570d13e19f312cf288f0f5d651f051c01f0fb65999579c3b06960c2936a18181_Device=CPU_Config=() +221:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=f097978a7f18dafc7577a9dcf2306d82d397faf1bedb106ca3de70b3d9ada557_Device=CPU_Config=() +221:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=98274ec3fc894754adaacedf83b4b7da373e639a51cfa7dc348412898e45e8dc_Device=CPU_Config=() +221:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=e2ab1cf295df4df47d43e632065bf8a48fa58e6f3a6d1bc971b45fe97a66652e_Device=CPU_Config=() +221:conformance_MVN/ReadIRTest.QueryModel/Op=MVN.6_Type=f32_Shape=static_IR=2a9ba5f3e5a74f05be93e288553139a15242f1500e1eca8317dbd82ee8cf00d1_Device=CPU_Config=() +221:conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=66bf131d73ad3116d698e15ac3c9e48bde66e096228138eb865c0807295c0d4d_Device=CPU_Config=() +221:conformance_GroupConvolution/ReadIRTest.Inference/Op=GroupConvolution.1_Type=f32_Shape=static_IR=c843b49e26b9be555df454d4c63f0bff72e6ce29d3ae80e9193741500b08f424_Device=CPU_Config=() +221:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f069cbce6f4c3276869b6d9c4a6c843d7a1e1c9d299e8680218636b04339a9dc_Device=CPU_Config=() +221:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=46282ba6f0eb5aac6acc1e114a2408cc301300a027c6d7a05691928b5e6dd9dd_Device=CPU_Config=() +221:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i64_Shape=static_IR=7e1801bf4ef7ad1b27663dfb399f318ccb2526e925d48e3d30e2ab837824b217_Device=CPU_Config=() +221:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=cee58d2e3f2d6ef0061c5b245a15c60f0a26a58474c015f71dbdbc0c171b2a8b_Device=CPU_Config=() +221:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=f64585bfa3951a93f76c18fbc795f3ef82176e270c9f37161bdfe48e094c1d39_Device=CPU_Config=() +221:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=static_IR=3f0c39b97aeab67748bd4039950e926a9d9f33b6d3261c4d65d048500adb5b7f_Device=CPU_Config=() +221:conformance/OpImplCheckTest.checkPluginImplementation/Function=NonMaxSuppression_opset3_Device=CPU_Config=() +221:conformance/OpImplCheckTest.checkPluginImplementation/Function=I420toRGB_opset8_Device=CPU_Config=() +220:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=34784838e98e93a6b024109ef3a8a5d4e1fc7f89b98ca23c81cf085f19acc663_Device=CPU_Config=() +220:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=356e2a728749d3970a85939d23344315d0ff533567c35a559caa3bef173b76f7_Device=CPU_Config=() +220:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=1a9779319a9cc5f21b6005ebb9b4517e0bb1f868ef8e568453a58c44474c40bf_Device=CPU_Config=() +220:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=7efae3e9c1a0419670b3967f8b2dda53fb0200f946a3d529b8da235ee14690ff_Device=CPU_Config=() +220:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=d2759b52de5dc9f1fa494c243d08ac40cf4e877c51323d53dbfa02abc1564e45_Device=CPU_Config=() +220:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=817b3db8f96297276bc70f1b4854867cb92c164925c9dce59a1d054e3c315bee_Device=CPU_Config=() +220:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=3638f7714d7627d7536ec02891656e512fee1ec55d59bb4f68c7409ad82f3879_Device=CPU_Config=() +220:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=1696523c5dd3a701251583b9c9f29e43f852383cec3dde5a93e6f7f7cabf3398_Device=CPU_Config=() +220:conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=fc530f5b6bbe8f06808eeaba33889867e705fa69591d01da4dd3dee9515f323f_Device=CPU_Config=() +220:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=c4d1a1fdd0a336620be37a8ce7578ca0dd0c74f89fdb32ee86e7004792aa8445_Device=CPU_Config=() +220:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e044b25aa265a98dcd0a5cf5f7132fdac5f36074068dc2210e04dd4c459aad61_Device=CPU_Config=() +220:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=f73224b14c094974e582d3d903cc332f5c1da138368692e5d0be93127f1bf753_Device=CPU_Config=() +220:conformance_Cos/ReadIRTest.Inference/Op=Cos.1_Type=f32_Shape=static_IR=e5379d72e978c773e9be98561b316a64f76c6015608d87739211e7c0e8b7bba3_Device=CPU_Config=() +220:conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=0662f4c4f222a79755532ac9eed43118b2ebd0faf0fbb9b400f9047ca1071b5f_Device=CPU_Config=() +220:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=0c6a844f626f6628628034d332ccb6d520e0447e4b616048c7efb516d0fd87bb_Device=CPU_Config=() +220:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=b7973bf8dd344289b971d9b47575d6793643f503e13bb83c4e9c2a2863570b7a_Device=CPU_Config=() +219:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=07849f3111a0f12a712cb0deb7ec9c4778e70120385bdff7f17c1af30e31062c_Device=CPU_Config=() +219:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=41d80c815a196293f7d22af59f5f602f7e4f11e06208a693b19743fb796b98a8_Device=CPU_Config=() +219:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=f0c4dee4dcd8f03dd599ae04d7dd6ccfafc4d900d052a62f232a5507ffc006f0_Device=CPU_Config=() +219:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0bc70791680aff885fa6a5903cea30fdb2386e7720403a8e6698362c5491a877_Device=CPU_Config=() +219:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=5bed52483d61091259db863ffcd3b09c190fedde5dac72edad6f1bf37230f344_Device=CPU_Config=() +219:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=deec30214c79ceb43a503bf521937a2bd554588775195d0e6302c521cd2b55ab_Device=CPU_Config=() +219:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=static_IR=9c1e1b695646ea4f56a87b7e5a815c12856f718920e01e86ed78f2dcaf896a37_Device=CPU_Config=() +219:conformance_TopK/ReadIRTest.QueryModel/Op=TopK.11_Type=f32_Shape=static_IR=9c57b92a55a929edae54a9705d80d730f7682ef015aa6923bd4658e244e9ca89_Device=CPU_Config=() +219:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=d6250086b712a16042ee74438bb61b89fbfaa5bae433049207402d1da4cffaef_Device=CPU_Config=() +219:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=46b077d7466eecbadbb7ceba5ed90724db3d9e216d22171f5dee02e44b9a5377_Device=CPU_Config=() +219:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=efeea353bf41d0aac1f5400e451346d6cb407610566018f368726328cafca221_Device=CPU_Config=() +219:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=30dd450fadb8a1081c1315cd0e5234728862b4de39b097a5a3248d551369b60a_Device=CPU_Config=() +219:conformance/OpImplCheckTest.checkPluginImplementation/Function=Less_opset1_Device=CPU_Config=() +219:conformance/OpImplCheckTest.checkPluginImplementation/Function=EmbeddingBagOffsetsSum_opset3_Device=CPU_Config=() +218:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f729a1e882f1894319a357f6c5474552e883ae9322cc3dc399b3a292b13e6de4_Device=CPU_Config=() +218:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9991a1b4140ee8e6ed0460fb384b7729f681bc1068315a4d970eea59dcc89950_Device=CPU_Config=() +218:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=3ebf4d995c8af299693b32b6adabb6a261a3761137ec6c5e68b35bdf0942bd85_Device=CPU_Config=() +218:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=dynamic_IR=b0376bbdfc6560184c2eb15a9cff7fc6d6b39c47dd22936fb64629d345e227d0_Device=CPU_Config=() +218:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=a30154a78e0e565a598629670b87338d03582cbe4ed5547256634ddad7bc9d5c_Device=CPU_Config=() +218:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=f32_Shape=static_IR=8f7dc81bfce05ce39b694fe48197a4fd2aa7933c7061508be3b9dfefef518f75_Device=CPU_Config=() +218:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=a1862e486a20c8de71dd94c12a157098ac5f222ba8ba3e1d3edaf9362331e185_Device=CPU_Config=() +218:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=f32_Shape=static_IR=424814fbe4a3ba7a49c506f11509c035212fbdf4ef44fb2bc708c5f201e4e1ec_Device=CPU_Config=() +218:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=510b36fcb991c73abd98b488eff26715dde08a322b7b9429cd897dce6976dab9_Device=CPU_Config=() +218:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=be59de0f93d8a22736d98d0aab618839905eb9a04f79c8d88d7ef08c7267f4ec_Device=CPU_Config=() +218:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=962d8a421369e4dac96b6d89d05053f63c9e5fc8b7b82a60c922432125da80c0_Device=CPU_Config=() +218:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=739517c4c613063fc5ef734443f0a599400dec31cd5a56686735f3165b2dc2d0_Device=CPU_Config=() +218:conformance/OpImplCheckTest.checkPluginImplementation/Function=LogicalAnd_opset1_Device=CPU_Config=() +217:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=cc5e06594accd8694073f3ebe702319fe0711c3b7d4db5e06072d83eeb7cb096_Device=CPU_Config=() +217:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=88e65a668c1bbccdf69927ed3926a7c273c97f72a7059d1d748ba6b0da8492e7_Device=CPU_Config=() +217:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f226723f90368b020cf11817ce0a39c002b9c30e07d16ac9297b7e574a010b0e_Device=CPU_Config=() +217:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2d886a31e22f61d30c33ddd300ba7d8ba1cd9796ee1a4f688db9126b1d8d9c83_Device=CPU_Config=() +217:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=847ce287888e882e988cdd5bf41277c32c207e38215e1e7d41439890037216db_Device=CPU_Config=() +217:conformance_Split/ReadIRTest.Inference/Op=Split.1_Type=f32_Shape=static_IR=c14da825d470c9141af0ea87eb82edd0866a415cb5ac59f1014c2ded35340201_Device=CPU_Config=() +217:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=0b603f9cb63e722122080ea36f76fe45b25da83b0b1e213871140e82dea5f405_Device=CPU_Config=() +217:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=727e029d6373e823f7500e6bdfd1c07ba87fdb3ba428fd0a089885d7a1e91552_Device=CPU_Config=() +217:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=6167830634e0b253aa78e883453d45bb737cd5df33c849e4b16b99164fd49d5e_Device=CPU_Config=() +217:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=2463ef4b8684fd6b391fca0b123328e1d695b47017fe94ffe5a419a3c22ce93e_Device=CPU_Config=() +217:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=96117baf3ff208c696a9796404eec467b613c37977067ff0cc62e39355856d30_Device=CPU_Config=() +217:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=f32_Shape=dynamic_IR=33e67497d576ce6af4a214d55862646d034effd328ef5beed8d7b0f380b6b689_Device=CPU_Config=() +217:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=4d9f16ede014da56824607d45502439f71b57275c332fbf15c6ba2ec1496466f_Device=CPU_Config=() +217:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=dynamic_IR=e894ea68d25e2a0af6fe2e330929c3305710bd07aca8e099b727df78fb26cdf6_Device=CPU_Config=() +217:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=4946bdb7dec06c2bc8eae33d5903d6fa41bbf3654b13a0cb5cfa4af5a4720426_Device=CPU_Config=() +217:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=a6b0532b200874d6d1c57719b46f2b301c368ebc35042df00796dfb87eed618b_Device=CPU_Config=() +217:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=0b0005b038a938c698489da595fd89a45d2f685c831bc172d81b2afc09658dae_Device=CPU_Config=() +217:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceSum_opset1_Device=CPU_Config=() +217:conformance/OpImplCheckTest.checkPluginImplementation/Function=Interpolate_opset11_Device=CPU_Config=() +216:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=244310d1092f478729162ea9a4da5660b066ad7ca70a65d8a205cb03787eb73b_Device=CPU_Config=() +216:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=57c57d85bad2b76d3d65d88baf2b3677dca6e5d534121e87efd618efbe5b1547_Device=CPU_Config=() +216:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=6903ceb67d029d79d90687340dee0204830d5df1f1ea6fbb09f14a6eca234739_Device=CPU_Config=() +216:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=478861c92198ee8679e3e43476abfe79906c4ead6ee80af975af365829822025_Device=CPU_Config=() +216:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4d569fc3e7d2fa1724c99fec62e4f31fb000a6f5c306273c404e2b449761feba_Device=CPU_Config=() +216:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=eca24a51b737307a94a918f4d03923c1e035a3379c73359515c63ff3ea98be85_Device=CPU_Config=() +216:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=i64_Shape=static_IR=469a63c5aee73bdefc9abdf8abd8413713c0b68cc098d16c193399a11c7093c5_Device=CPU_Config=() +216:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=a65e17fc28c74df4f3b1bad89635ccfc376a857f2d92ba646ca830b03eafab7c_Device=CPU_Config=() +216:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=c078bcf5a6a207fd76d9cddc1a35df577529e71ba0a120b28c7ed17bd12673bb_Device=CPU_Config=() +216:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=5038017e90f931327d5159938d422b2afc229aa4d776a4ac80a946724fee357d_Device=CPU_Config=() +216:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=166d8442037dcf0469f0b14ab83676b30bce53edd79494c52a575e3744920c4d_Device=CPU_Config=() +216:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=155b8d9ccf06f4d8f9ada6024fbe66f39e4e6e96917c12d7ac02eac98c5473de_Device=CPU_Config=() +216:conformance_Einsum/ReadIRTest.Inference/Op=Einsum.7_Type=f32_Shape=static_IR=b9f9ac285915db9ef3e7437728695f2833d165757ffc81afb88242e7b471f434_Device=CPU_Config=() +216:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=static_IR=2aa586a55098e1960c204572ca9704bb3b8b9a3baab5fcf08200594261f7bef7_Device=CPU_Config=() +215:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ed872c2ef0d35af97e7f9be84d83eee6d42f2fb279b71f4feaa1aecefb450a28_Device=CPU_Config=() +215:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=392b855febfc39fd1b2a9fa43270f58bae53e0d210525e8700edc15a10d28d33_Device=CPU_Config=() +215:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=88e65a668c1bbccdf69927ed3926a7c273c97f72a7059d1d748ba6b0da8492e7_Device=CPU_Config=() +215:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=28cabba0fd0acde452552a362925344e8cd8c5af033419d83041bf26b1d14d69_Device=CPU_Config=() +215:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=i32_Shape=static_IR=61760c9c95110bf88cbfb8aa09378cc214d4cbbbd6c39c98feec1dcfbb7d47fb_Device=CPU_Config=() +215:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=dynamic_IR=0b30cc6cee9ce5400085a0e78b44763bc169eeea93357f22fd716564f20226db_Device=CPU_Config=() +215:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=966eae58d5c103f24a598a5143d7b3a3c40a12fa2606a65431f0d1aef855cd32_Device=CPU_Config=() +215:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=6b2c79edda9cc9cce61c98552d6a0d3a3555c9ccac3a56c6692f536a0abdb61e_Device=CPU_Config=() +215:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=b11ede8f1aee40577413d8bbe89704e02252e3f02805fcc0ded624857ddb8280_Device=CPU_Config=() +215:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=66375ff8539da6387946c19b0d20e6b4fd57da25150255e41282458e241963a0_Device=CPU_Config=() +215:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=e22e40a4f300567612f963b17707be4de09093cb9a248aed62af594e7986f7dc_Device=CPU_Config=() +215:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=65afcce29f554c2dfbbb4449ea6e11f1f1b9b96aa5c8bf73a55796de849b58bd_Device=CPU_Config=() +215:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i64_Shape=static_IR=2001ebb8291c8bc8cd1db17c172f216cfb3994c57e344eef65565ea9f9cda1d7_Device=CPU_Config=() +214:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a2ca34430931dd41f08f2b3cb8163ea5c1889a23b53d0f3b7d26b7a8af1acef3_Device=CPU_Config=() +214:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=62409191ca760efe019eed9d1923c8df9ab545d39f90b1230a58d1747d3143b1_Device=CPU_Config=() +214:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=3f830d5ee243ca3f56d027f95929bbadd427e4954e286e6c890ddd60f9c5c2d0_Device=CPU_Config=() +214:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=e7b65875a7e2d88532271dfb93a4a0fbe4c41963fee3193cb3de547c19121f78_Device=CPU_Config=() +214:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=d9937a6c3eb62ad6328d7367f15e45758ce5f2ebc0488931855a5b1925574d36_Device=CPU_Config=() +214:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=d8441d8bc521ac390fb58cb882a952618ebf5892d40e8768a9051f852a9dcfc6_Device=CPU_Config=() +214:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=72eb2887828b5b14b41d001b6c7277d395f39c8003b9461730a938833899aacc_Device=CPU_Config=() +214:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=63de0838ea26e3575f49700f73fffb0d3415ab68b29b1a1da690b84f7a034822_Device=CPU_Config=() +214:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=dcd71a51a6682c9bc461a6cb72d59082352ab8a020e1f79e64c3cc44a37b55ba_Device=CPU_Config=() +214:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=c30414e8e845d75527c26f62880518cc4d24c1a528b20cefc3b2c32be7436c81_Device=CPU_Config=() +214:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=1cb2f17dcf4f8b738a23313501e9a98101169cd9e368f3fb98c552f994232073_Device=CPU_Config=() +214:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=e3a5a7f1a73793457fae9520ae122c6bbbfa92f1daac0ef214e47a2ec7ea18e2_Device=CPU_Config=() +214:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=2a3d6c0476c17897fd4cc6d3623519fc033ac4022a01fbebd40b461f414f6c15_Device=CPU_Config=() +214:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=99e405218c1a96c5f8af65aa814893d8958e8e991d1ed8dbbbd586efa589df39_Device=CPU_Config=() +214:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=66375ff8539da6387946c19b0d20e6b4fd57da25150255e41282458e241963a0_Device=CPU_Config=() +214:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=2737751bcc195e4aaa63ab6d86d803741817287d78fc864e18a31c328078940d_Device=CPU_Config=() +214:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=fb5525d36d14f54eebc5670c06232ca4e32cf920d309b5777e37d3377d386433_Device=CPU_Config=() +214:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f43df065734a36674b3fdc7a47fddd1cfa5c1b36bf73e7de86a100c645fbc7d3_Device=CPU_Config=() +214:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=87a966d3d3b90cb32db3454c5dfb2f67af86b68a5e45fa1c5f4a75c3b5cb452b_Device=CPU_Config=() +214:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=4bb7bd2471752f1a62dc15dbcacad87dd329443459a90dc6768b1a34fd00c064_Device=CPU_Config=() +214:conformance_Ceiling/ReadIRTest.ImportExport/Op=Ceiling.1_Type=f32_Shape=static_IR=fb5c74aa3b17b4a8d5e1603b9179b60bf3f0b8301c74a8fb632b6869896439d6_Device=CPU_Config=() +214:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=c5f54dc9ad0b693c13c07d44fe5572bd91852b0edd57f8f06314df3e71f3659b_Device=CPU_Config=() +213:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=aa6c3816ce7ce49f40be5edbe957468e80910a8eb5a3956f54d89fdf7c264b44_Device=CPU_Config=() +213:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8198512c1849e0efe931509147ac4dfed4ddc7ea8d0736a7defb4fce81e2ea28_Device=CPU_Config=() +213:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=7d3a099a5040e70c73014df347c478d0976123d68b6fcab6bf767f90bbdf8e6a_Device=CPU_Config=() +213:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=3f87262a458b0dd0a330ab0cfc48c74ee687819228d3e2e1226df3b02de26afb_Device=CPU_Config=() +213:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=8973f2f4c2be5d0ed57c94e1aed24bf809e51854c03c2abd73ea37ef7221d328_Device=CPU_Config=() +213:conformance_Relu/ReadIRTest.Inference/Op=Relu.1_Type=f32_Shape=static_IR=377acd11b0f7dfb4f3e57baec8a6c8a84737857b7e794614542f139982feaf73_Device=CPU_Config=() +213:conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_Shape=static_IR=1a0e3f63698678d2e6bb8968fbadc98227d9ce548e77c53021412d80d7711753_Device=CPU_Config=() +213:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=dynamic_IR=c18d3d2fd8001cb07daaa5000258b36352807e3e81999d2d80a668e4d6add085_Device=CPU_Config=() +213:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=abd733caa05592feccf41344f138de6625efce4afe605efeea57e0748d7b2e07_Device=CPU_Config=() +213:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e2d2eef3e776af9379eb35540d8f4c888491082d8333aeb70f58822aa5cee878_Device=CPU_Config=() +213:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=dynamic_IR=25ae6295f4d206fa9069e20bc659dbd87c20aaa15c3f149ab25d003641c738c5_Device=CPU_Config=() +213:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f13ce39b60cc25991465a0c02e27edcb35af0523cd28004adf6fd9acd8a5fcb8_Device=CPU_Config=() +213:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=9360fbacf32f2208bd7f241535752ccaf434551d16bd8fd46d0422cd1cafc3c6_Device=CPU_Config=() +213:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=8fdd77d8381b78b82c04360bc3f05a358bd690bd8204e2cdaa2c0a65bff61a41_Device=CPU_Config=() +213:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=319c7b312e9074a43819b034ce82eddf1c8f9e51d4eba3fbc7a112cb6393debf_Device=CPU_Config=() +212:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=dd575df40c907e85f7561296f2b1b5bb9786bf44bc27f26e33f235ba57391e26_Device=CPU_Config=() +212:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0b7d6fb137555d6fde92f0c9b3e6278715adaeb38cf760236070b17bafb5babc_Device=CPU_Config=() +212:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=5295b6c6090a820891e5754c34d03dc3347d3436fa16fa4a701422ce8ac78b92_Device=CPU_Config=() +212:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=1be99c00176df777bd8cdbd9f74ff064237f55053dc7490050d692274182182d_Device=CPU_Config=() +212:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i64_Shape=static_IR=7c1b4dfda36336bb1a943fec9786d89e220f2a811159fe9cbed7d51186f8fdfe_Device=CPU_Config=() +212:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=dynamic_IR=7a1d8cbdb446c3330ed845d7a81e20d9b7c7662d532f4d816d4fc1c56822fa81_Device=CPU_Config=() +212:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i32_Shape=static_IR=87c65c520de106b146e91222609f5b25cd79e96cdd6b942c3293cddb656617ee_Device=CPU_Config=() +212:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=592176a8c97f4d759a0c6b3ef56c3610df4a0df4743f3be7ba3ed2ffb5dcfaed_Device=CPU_Config=() +212:conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=5b185120e46fc0a2238ff4de19e278888ecda5fbae130c62e1ec21b4883ee61d_Device=CPU_Config=() +212:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=e1130d42d591780dd2a746ce7ff874a2bf4725ca9fd09803932ba4a7b0b389aa_Device=CPU_Config=() +212:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i32_Shape=static_IR=4e2e2e9dd89aad4bc14634b85c94336a7250dbb8ff61cb451c9507753f54a102_Device=CPU_Config=() +212:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=e0422b2fb57587a85d9ce1532f7fc28a6bd01e72a325d42d9045419dda4bbba5_Device=CPU_Config=() +212:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=d7e3ea8c5ea46f1b0430b6a2763c85395235c0ac58652e1d269e1257f6dbf7c8_Device=CPU_Config=() +212:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=94ad9524c175a0e0d2fe22bceeac82b0dc66006caa0942d343d551268e03afec_Device=CPU_Config=() +212:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=839faaa50aafa2e3ed38fc682d0759304b694043dac1a242a085e2973aac8091_Device=CPU_Config=() +212:conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d671a241de6d46bd5562def47a92602d2c9ba076568feed303765168433ee89b_Device=CPU_Config=() +212:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=6e6c053ee1974a5d036c6d549508f6d43586d501c72db05df9930639ad745bc4_Device=CPU_Config=() +212:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=static_IR=f7bc08f4bc2edb455c7afc9cecba3666df1150bf4e3a67a20061714f867ddb0f_Device=CPU_Config=() +212:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=18d294f76a0d8f4562d85033a45aaa3f2d50fdfd9d9f30e295a772fd10540d25_Device=CPU_Config=() +211:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e6aef819fecf54f7c98b15f330074d33ea0ca6c8fc3edd895b362f04449b12a7_Device=CPU_Config=() +211:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=bca72a16df5bcf81d10dfbbb0e53aceb2a8a70ec94d4247d47333679de7214c5_Device=CPU_Config=() +211:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=42fc9a4f8909a26e98a1938054417339bbc3c21668dfa2792da78fa1ed8eb49b_Device=CPU_Config=() +211:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=0e58762b5cd9926391cba6f63db3c7db49285b900ad0abc93b4d05d4baec800c_Device=CPU_Config=() +211:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=d51bc4204bb6079e79da8d0cf95ab8a3454c90a040aee0fc6fedb00f0795c577_Device=CPU_Config=() +211:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0d74ee98934e32799620ac90fd3ae8335bca026b9225782458949c64139d89c3_Device=CPU_Config=() +211:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=28cabba0fd0acde452552a362925344e8cd8c5af033419d83041bf26b1d14d69_Device=CPU_Config=() +211:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=b9581fac6848b0c6c9fc9af5fd17eca3f2f64832fb7205f97684f1cc4c1985f0_Device=CPU_Config=() +211:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=21950c433f50ded0f662b9e0591e756a8dd685bc11a8296bcacc57ca1a4968b4_Device=CPU_Config=() +211:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.1_Type=f32_Shape=static_IR=a4fe57973b0bba01e6038a8050f07b8ad1bf6871c1ad86270920f9084dc84905_Device=CPU_Config=() +211:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=dynamic_IR=9ec266d6550d7e0c9f4d6114272d7afc80ad822b0bf5078654598b3d623f356b_Device=CPU_Config=() +211:conformance_Select/ReadIRTest.QueryModel/Op=Select.1_Type=f32_Shape=dynamic_IR=848de524e27e13a1e5b33e5db3cdf2710ba4566c3219a018e878f998c07dd718_Device=CPU_Config=() +211:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=0d413b2d40036984ce2b85933c4b5ffda416e8779a20b602095d2654db296d58_Device=CPU_Config=() +211:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=18697d690af0de3ff8365a5aafa6ebc7d8e14418c3ab5dd55b3b505d2445ac86_Device=CPU_Config=() +211:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=9f19f32ddff44c1c8f7dc3b9b244a9191a15fef9874e016666fe6a817937f699_Device=CPU_Config=() +211:conformance_NonZero/ReadIRTest.ImportExport/Op=NonZero.3_Type=i64_Shape=dynamic_IR=31f428e60ddfdb3cb3c98c2cc858d0409fd35c5e6e97f9dcdfbb20a876c475a6_Device=CPU_Config=() +211:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=28dbc474828462a812108c43a47aa4e70fa0d2e8e814bef5916092f3e8c7a2fd_Device=CPU_Config=() +211:conformance_GroupConvolutionBackpropData/ReadIRTest.ImportExport/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=3e893f54d0ed092823ca8e256e66c367f53e466f30573a7b5911a432d88299a2_Device=CPU_Config=() +211:conformance_FakeQuantize/ReadIRTest.QueryModel/Op=FakeQuantize.1_Type=f32_Shape=static_IR=48256cdbf5a3d19f0b7bb6b0540cbd664a36885a88fa8f5f56da7057de97a608_Device=CPU_Config=() +211:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i32_Shape=static_IR=349d64660bcbb9269f88067431a4b8fc31fcfd09ffb1afa9f3ecf4bc37e8c4ca_Device=CPU_Config=() +211:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i32_Shape=static_IR=6ddb35aeda2a6cb63282d2fcf6503aa02135ad60e23c752280ef82aaf6a31191_Device=CPU_Config=() +211:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=dabed23c3f30d92c6fcca7a6845160022837de8cbfa1077c222e6f1224b745e1_Device=CPU_Config=() +211:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=b2dd13c363e41fef66b0dcc3e21e77b9a97e413c1c89f8c8a53179b05f01c2cd_Device=CPU_Config=() +210:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=8c43b49d99c64bec883205ca15c7b2d9dbb47b9fe5140fedaeb8eb7220a36f6c_Device=CPU_Config=() +210:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=7cfdc0a97fd79a5d272b29850c24dad4a0a8f147ea89b7683c98fa203a448c52_Device=CPU_Config=() +210:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=7c43bd989494b4ef0f2ca40c3b0c57b471d58b21491456e9588938f702721be0_Device=CPU_Config=() +210:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=dynamic_IR=7a1d8cbdb446c3330ed845d7a81e20d9b7c7662d532f4d816d4fc1c56822fa81_Device=CPU_Config=() +210:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=6fefc3626ba6ef60433d3635bd5abeb3e7025277a86e2fd9d92234ff099c303e_Device=CPU_Config=() +210:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=2ef8d38ce64fd0460d641e6f3bfcb1654bbe3d2c25f9dd244ae259eaa4b6941b_Device=CPU_Config=() +210:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=i32_Shape=static_IR=201b881bba09ed67334d9489a1a8971e483120bd1cc75a1aa1c9f015f760e002_Device=CPU_Config=() +210:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=7373e7e64fbb2fabed337c09be0d6b42c5cfad39b26d92c6dd74810499863448_Device=CPU_Config=() +210:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=6af32fc288bcbd414ea63525c4345aeda74ab21c44aab5910f85b8b7fb5d1179_Device=CPU_Config=() +210:conformance_Equal/ReadIRTest.QueryModel/Op=Equal.1_Type=boolean_Shape=static_IR=857447d7e14c7516667094409cf5ef351000344fe170570671be0f71834d04f9_Device=CPU_Config=() +210:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=45c9fd0289649c455939587c623f1884a4e675e2f970192d9ac2f60a65e6da9a_Device=CPU_Config=() +210:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=0bc2bfc4481de7733f5503750d21376d00de6bfa699ecff3ee0c4333d9515db8_Device=CPU_Config=() +210:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=55c7f63e25ddf106ebdab6f4eab66f1be6950cf7a68abdb5b7e9a395d2fa6add_Device=CPU_Config=() +210:conformance/OpImplCheckTest.checkPluginImplementation/Function=IDFT_opset7_Device=CPU_Config=() +210:conformance/OpImplCheckTest.checkPluginImplementation/Function=Assign_opset3_Device=CPU_Config=() +209:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9e21c0af425c90066d92577a0b8aadb6e9fdee50c197b15eea040b89eb715a6a_Device=CPU_Config=() +209:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=5d738cba54fcfd696b0cb7e808dd466b4510900ccba26c728b5eb272a55d6bab_Device=CPU_Config=() +209:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=104a69286d09ab8a5a88403ce6b421979659231fe5c5f973393216607a995dcf_Device=CPU_Config=() +209:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_Shape=static_IR=1942042c790c3fc6053ad91fa5e45f8ebf3c11bff7e3427a71b8fdc1bc5db053_Device=CPU_Config=() +209:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=dynamic_IR=9feb072b58552898ff80a05dffe8f39c880b4f2a2382d56cb24a78e278ea1756_Device=CPU_Config=() +209:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=fe8bea06bc602fce2362c5c7671e3c8cfc63fee6bace0be9baa41e9874e86b26_Device=CPU_Config=() +209:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=7cfae687d408da17a0405d88f47e2b6623a608861114dc76018b8a2142453139_Device=CPU_Config=() +209:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=8216637727ccef527454bfdea7ab22ccd4e5e29709494bf96dde5af3b4a7eaaf_Device=CPU_Config=() +209:conformance_Clamp/ReadIRTest.QueryModel/Op=Clamp.1_Type=f32_Shape=static_IR=028177a440f430edc5dfd7a7f0f2c0dded422876a98b6da66a647ad9aca10e57_Device=CPU_Config=() +209:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i32_Shape=static_IR=1c63f30ce7cb977ac945ee25eb97f3c472a81b999eacbcdd4b3bfd253f25cb51_Device=CPU_Config=() +209:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=cc13aaec2a2bbe9b760651d358622114b4b0a20cb106472bd8519f0fade61dcd_Device=CPU_Config=() +209:conformance/OpImplCheckTest.checkPluginImplementation/Function=Sigmoid_opset1_Device=CPU_Config=() +209:conformance/OpImplCheckTest.checkPluginImplementation/Function=GatherND_opset5_Device=CPU_Config=() +208:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a48d232b00b4d4a735d6b9999c29b413a32cd7f05c104610a11cab01465a3887_Device=CPU_Config=() +208:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=822cfb44c999b67217c8fff1da18293fcbd3a8a71d901d95991ad6df22398af2_Device=CPU_Config=() +208:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4ef9d8687805658001fa7650e660620d74bab09868b356603c268bc8cdf7a5c7_Device=CPU_Config=() +208:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=461dc8aa282946831fdc86d1c024a273ac0f29f5ad615cd55b879feea6d23007_Device=CPU_Config=() +208:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2d886a31e22f61d30c33ddd300ba7d8ba1cd9796ee1a4f688db9126b1d8d9c83_Device=CPU_Config=() +208:conformance_Tile/ReadIRTest.QueryModel/Op=Tile.1_Type=f32_Shape=static_IR=fdfd59e3d316eea2f9fc3c56664cf1a07603bb6e26d1b367987d5046526ac60e_Device=CPU_Config=() +208:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i32_Shape=static_IR=fe8bea06bc602fce2362c5c7671e3c8cfc63fee6bace0be9baa41e9874e86b26_Device=CPU_Config=() +208:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=3688e2a973219245d05c5fa675cebe9036d40777809ebf583c1bae9b9f87eed6_Device=CPU_Config=() +208:conformance_Select/ReadIRTest.QueryModel/Op=Select.1_Type=f32_Shape=dynamic_IR=f550a37ab884668f47ed232e7119c2a2baa814c98fbbcfa3129e7a00feebde0b_Device=CPU_Config=() +208:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=9c32e47cd885805256c3e3053412f7d8c448762b4b509507f6e4dd78e2aeb56c_Device=CPU_Config=() +208:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=3c7c072c9e4ee694e049a5f256cf0e72caf85384291ee8d399ce136d22c575a3_Device=CPU_Config=() +208:conformance_RegionYolo/ReadIRTest.Inference/Op=RegionYolo.1_Type=f32_Shape=static_IR=1cb2f17dcf4f8b738a23313501e9a98101169cd9e368f3fb98c552f994232073_Device=CPU_Config=() +208:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=01b095b8763565527be0de9edff565070949485db907493e99e95c2cddf6abaf_Device=CPU_Config=() +208:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=27e8804992c0d74c18c958f0876c06be6c7eda2b36fe7de3ab616b577dce13c6_Device=CPU_Config=() +208:conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=f32_Shape=static_IR=6e67522f2df32ac8e237fd4de148d082f3c55e6c31ace80cffeaef784dfe75a0_Device=CPU_Config=() +208:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=7cbd5676618d9b507238807c281801b8a817202b0ae648a44cfa32fc16c02547_Device=CPU_Config=() +208:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=i64_Shape=static_IR=32ab4bca2ccc66d25b8b9ac449dbc58666434d98aa5b789e1aa28726c530986e_Device=CPU_Config=() +208:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=3d20de7392465c055c84dc20d0af64ae6d14809f5a6e4bb05e315a2654066f93_Device=CPU_Config=() +208:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=362638bf065f1917d2b4dac3008a8f46f8f8d64a80d2442c1ad98f4fb943cff9_Device=CPU_Config=() +208:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=c5f54dc9ad0b693c13c07d44fe5572bd91852b0edd57f8f06314df3e71f3659b_Device=CPU_Config=() +207:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ea604e7edf80c14a14bf7fcb042125f4d666d0d69ce3c0209c2f9dce26d406fa_Device=CPU_Config=() +207:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d4b1dbc565a45f6c9f60cd4a73bb15c0f9e05baadfd3acdcd5e133d782c54cbb_Device=CPU_Config=() +207:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=243d5b44a22822e90c2f6b7c2817f8110bd6a060331e686c1fde1869f3392db1_Device=CPU_Config=() +207:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=2cc50ee05a039bf65fd7be2282284790d9d2e1fabb4cfec509f5bed121152d93_Device=CPU_Config=() +207:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=9aac77567d944de6632688fd3de80c0b3da1ee741da639897c2104d3121d690b_Device=CPU_Config=() +207:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=71c0c7e3176ae8b233352c89d47a61394cb46695e7879118ed02070a4a23d5e1_Device=CPU_Config=() +207:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i32_Shape=static_IR=98932a2171e1c93b2bec3991892faaac027e1c319e91b9008ef0d0f469bcb0e7_Device=CPU_Config=() +207:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=i64_Shape=static_IR=d4acbcb1930b26610eaa33c0bb8aa7fd866d8142afda9fd007226f0ee6fa5c36_Device=CPU_Config=() +207:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i64_Shape=static_IR=44dceb7343477ff50d3de4be1567a57a97d2e3c6f92b48fc93d20eea80487862_Device=CPU_Config=() +207:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.1_Type=f32_Shape=static_IR=fbb53c04f3cfadff9d6543e2fb4eb88d882c3189b4212e77a6ca6e50bdba6e07_Device=CPU_Config=() +207:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=b81d993247e604272e6df01b8c4ba016be7f60263c892e8469deef67a8a6afba_Device=CPU_Config=() +207:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.11_Type=f32_Shape=static_IR=d507892556310f7fe85cbf9245ddf040b219ec8cfe9c779809180a011caab9d6_Device=CPU_Config=() +207:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=dynamic_IR=e255ef2321233444ce6e4fdeb513a9b271987457aa9bd456948b64f589de1e2b_Device=CPU_Config=() +207:conformance_Floor/ReadIRTest.ImportExport/Op=Floor.1_Type=f32_Shape=static_IR=b064511ab38a9a70b4d203e11a12b990f388a03550ba98c65468be1b85c68fda_Device=CPU_Config=() +207:conformance_FakeQuantize/ReadIRTest.QueryModel/Op=FakeQuantize.1_Type=f32_Shape=static_IR=935369702948a57e71d169e75218162f370b48f344fe819f11112c011b6626fc_Device=CPU_Config=() +207:conformance_Exp/ReadIRTest.QueryModel/Op=Exp.1_Type=f32_Shape=static_IR=67632b67a0834136cf2f3bcd6b3fbaf0d2f2bbffc1da6c33fd5fce0d0b8a763c_Device=CPU_Config=() +207:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=5980eb1b6c7a44c7812f89f10f0741e5925abda9ad07e1a82ae2a3310abae74a_Device=CPU_Config=() +207:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=8411c0432159fb60adefa760384515552240bc6220800a736556d7461765eb60_Device=CPU_Config=() +207:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=e61665bc5590265246ab882bb55b9487e81412012ed98ac9cb16154bc8eddd17_Device=CPU_Config=() +207:conformance_Clamp/ReadIRTest.QueryModel/Op=Clamp.1_Type=f32_Shape=static_IR=785551399ba4bb8eb76271bf698b3ca795b8388338f110843d5c78c03009625d_Device=CPU_Config=() +207:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=07b257862a62290d7e8ae939147bb7422992528bf54209b8d1bff500b99b6f4b_Device=CPU_Config=() +207:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=550d5d475e0f53be8506153a78626cd5a5c0a949b9bbd9e2fea96a4ba2f7b908_Device=CPU_Config=() +207:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=13e9472dcdeb5e6ce2928191ed13dde08b6cdd62c82c94e77469d8a3ed94e39b_Device=CPU_Config=() +207:conformance/OpImplCheckTest.checkPluginImplementation/Function=IsNaN_opset10_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d8fc538fc2b9ca150eb22763e4c7416c002b5c7fa6481314201540178e940a78_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a4b3740eda9e6bbd3968dd39e6abb33b22a90a811298df6a761958216acb389f_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2f96ff03126561aa475067ad88e454b2da78fc8f0b816dc6c01ec5c81568288d_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=588ef4d887ae9d8ad432525108c81a9762dc27490a3e01d3e86795c73275148b_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=ae538f87e4d49bbdc53184fcaa6082eee131a79b480dab9b46e12976d01ea913_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=90deb33b54746ec16cf8594f8aa0792c6aab2e27ff12ed97523da583402aad95_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=2ebbd25d315f10aa32cd8251ced4e269c1688202ee64b4fb5245e4ab53cba16b_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=2606bb36cbc87d845c9f678ac84e47c0893f0b86a3b675e70018d1e535234875_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=fc8b85b03281a7e8532a130a70fcfce5b6c40b1c8863eaea3910013a0bc4e769_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c08b3d30c1b4f1b5456e4791d4d7fab1d21f743dff0dac1ae5d09abc6764fca8_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=2435ff5e2ac06afcf99563821fa2a2a5e4a9456cb3f74154b3eb364a6f0e450a_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=0e5b8f44656b680d14f7b7aa3293d8933ebfa82524d6acc09e41d38e8efda726_Device=CPU_Config=() +206:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=42fc9a4f8909a26e98a1938054417339bbc3c21668dfa2792da78fa1ed8eb49b_Device=CPU_Config=() +206:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=a449aa561efb222cad1a414ee87443f9fec0e5c2f6220f6a57b6705c9ef26cd6_Device=CPU_Config=() +206:conformance_Split/ReadIRTest.QueryModel/Op=Split.1_Type=f32_Shape=static_IR=dbc3b2f724614a68d750ae4adfd7d8239c77ced05d30f89deabe272f104a5e75_Device=CPU_Config=() +206:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=u8_Shape=dynamic_IR=81bbb9658ad214babb825fa4b576aa83a9ceaae7dc0b878a84e42ea194f3ec13_Device=CPU_Config=() +206:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=e77dc4aecdbd4ab3d67fc3c1d9e350a9d259af1d4c0188d680121a31c6ed8ccf_Device=CPU_Config=() +206:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=491b849a7ce8fdb2190df5415fe037ff02fc23814efc520c343e872f539d6e55_Device=CPU_Config=() +206:conformance_Greater/ReadIRTest.QueryModel/Op=Greater.1_Type=boolean_Shape=static_IR=aed960e9b7608b89973346cc2ab23c7ff65e72275fa55daa8b13f925a3779701_Device=CPU_Config=() +206:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i32_Shape=static_IR=4e2e2e9dd89aad4bc14634b85c94336a7250dbb8ff61cb451c9507753f54a102_Device=CPU_Config=() +206:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=4a3c7edd1efc847f3d1255738c19cdaa682c9348c0b0bfc466ea9d5749d5eca4_Device=CPU_Config=() +206:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=b7973bf8dd344289b971d9b47575d6793643f503e13bb83c4e9c2a2863570b7a_Device=CPU_Config=() +206:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=i64_Shape=dynamic_IR=c3d754fe46cacaaf519f39fdc6feb9df6b23d92f6271f6e731c2a8ddc24a948e_Device=CPU_Config=() +206:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=static_IR=b92112b2ea2f233a6fb6ee512363082a49db0f85ab23f89dc29ad907e6ab408f_Device=CPU_Config=() +206:conformance/OpImplCheckTest.checkPluginImplementation/Function=Negative_opset1_Device=CPU_Config=() +206:conformance/OpImplCheckTest.checkPluginImplementation/Function=MVN_opset6_Device=CPU_Config=() +206:conformance/OpImplCheckTest.checkPluginImplementation/Function=ExperimentalDetectronROIFeatureExtractor_opset6_Device=CPU_Config=() +205:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=5a82d5761e70d13623af2cc6a6eab20a7a0657ac28f38223e34b63d6cbc1224b_Device=CPU_Config=() +205:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=9a26ec9d1e0c4963016ff36986c79f5faed763ca5189215923d375e43c70a17c_Device=CPU_Config=() +205:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c599f8f5de2a73e08727a5e27e2f77989b4c5ce9a5e70e6b98ce4c87e8aa26f5_Device=CPU_Config=() +205:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=952a43f4c368721e0c69418b71fe89982ef1eb2be0671653cb1200e34cb4bda3_Device=CPU_Config=() +205:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=d8441d8bc521ac390fb58cb882a952618ebf5892d40e8768a9051f852a9dcfc6_Device=CPU_Config=() +205:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=c662eb0004f431152ddc69e12826a6c0e7aa66b24be0169acf10ca95f2a63f52_Device=CPU_Config=() +205:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=f7e1aae2dbc817ca8f64a6bb0742e476055c239cc6e31a4233b7580205feeb41_Device=CPU_Config=() +205:conformance_Relu/ReadIRTest.QueryModel/Op=Relu.1_Type=f32_Shape=static_IR=99a80c495a8fb4626995167a3ad2efa0efed7696459f6219125414a2bd20dfc5_Device=CPU_Config=() +205:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=1f71810b04667907bc88c4a1ecc28b9325fde04026b5e56b5eb0e2d6608f3742_Device=CPU_Config=() +205:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=a78437a93ab424a706d064188d1bc0971b2e1afc98a74fea979a6f8b99036597_Device=CPU_Config=() +205:conformance_Erf/ReadIRTest.QueryModel/Op=Erf.1_Type=f32_Shape=static_IR=906676850a62f56935dbd13792be1013db602488f29eb757a546b411699ccdd5_Device=CPU_Config=() +205:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=98c0b6c23e4cf51a6069f306109ea2b4e181cfb8e552482cc0d0e63c61406933_Device=CPU_Config=() +205:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=717ea579a24839ee9c5ba7c59a07af667fea4fd44ee18bf60e8970264852bde7_Device=CPU_Config=() +205:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=fb6a053d244fc1bdea6fd5e69e0c05025272ac0da2f676e077c598239b6493c2_Device=CPU_Config=() +205:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=fdb744ee1deeced50395d992d949989a5e8bac5d4f73a6d4b51a56f22359f4f1_Device=CPU_Config=() +205:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=6587874c50811a2ca7e27f84cb4381e9a06eb4465e940ea877c76dfaeba02753_Device=CPU_Config=() +205:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=622add2dcd72d2e1560e983ef4aad56fd35b48b71964ea8204137026f445d37d_Device=CPU_Config=() +205:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=6daca83f4b162285c00c695825e255cbafce9cf9c9cea68b969a301105475303_Device=CPU_Config=() +205:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i64_Shape=static_IR=5be0b1c69be525cbddd7996b695c1a4a9f380173d03f291e8570df76c050678b_Device=CPU_Config=() +205:conformance/OpImplCheckTest.checkPluginImplementation/Function=GroupConvolution_opset1_Device=CPU_Config=() +204:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c80a104f5912c91154ff9731be5aaf1ce189988eb9689ebc32cf4bb8f1307615_Device=CPU_Config=() +204:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=089d73af2221696ce3755a9f33124c9af87fd3e860a1d4f229995eb01ff46703_Device=CPU_Config=() +204:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=ae9604aa3fcfc361f87562022cf6996fb2cdd9c356eed6a6eaddb14e103b6b73_Device=CPU_Config=() +204:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=4e6262ae12e4f9470a87cc4f1cc1ef2a817a8080e25a79ca4ef67cb60a558b41_Device=CPU_Config=() +204:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=6095afd484c177267854bcab902c3057a2a1bbf37b2188d3a31fd2cec48de2fe_Device=CPU_Config=() +204:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=134ff6b704123c583b694d7023c99cbcfd10a1afc48819ef35b46dc4d0bca500_Device=CPU_Config=() +204:conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_Shape=static_IR=2b1509d227d4c32fee4bb0b7ac59d4ecf5018afce9fd19714067a20d01933455_Device=CPU_Config=() +204:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=f32_Shape=static_IR=bcb10a9124f9b0471012f9e22d4aed5c2a47a55e652312e8a8382dc0d809a23e_Device=CPU_Config=() +204:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=939b665ae35f9a384e3119dc3bdc1904b105de495d262648282c859b0cb4c9e3_Device=CPU_Config=() +204:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=a895a5053f72560fa5e36ce8b68a8de0cde25ddc1152cb1f647211f1b570d172_Device=CPU_Config=() +204:conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=f32_Shape=static_IR=99b432aa5821136994e06b4e3c690a4e298bc5a496740ea2c5fe6aa300edacf8_Device=CPU_Config=() +204:conformance/OpImplCheckTest.checkPluginImplementation/Function=Atanh_opset4_Device=CPU_Config=() +203:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=afd856f31f3a815b84c34b66e1ba0a70a313301ce82fdccc2f1b779ad3157d4f_Device=CPU_Config=() +203:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=244310d1092f478729162ea9a4da5660b066ad7ca70a65d8a205cb03787eb73b_Device=CPU_Config=() +203:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=c990afda81456723598f8f4085cb476376b1789d7f755e340e1d5498bcf02080_Device=CPU_Config=() +203:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=85d1eaa250a32acf89b675cc50f513ef3c7df50ed9d68f2cff2fc89db41b63f2_Device=CPU_Config=() +203:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=i64_Shape=static_IR=7b9883414482f3b1108e549a9c47bb8a8aa162d962813c7e99411d000e02690e_Device=CPU_Config=() +203:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=299e5f0fc159bf294093a5e1f258f7083fc54a08cbaa3a55b2a2197d29ae780c_Device=CPU_Config=() +203:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=2b02493e0e09536d01441e885df61f27f2202a3e16742695bcc4d1d0d892c56d_Device=CPU_Config=() +203:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=cbd1111f323b8e6d78b59b531708defef64b90463f973f64f52251795ac5a7dc_Device=CPU_Config=() +203:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=eba756a8d0ce89c9a8df50baeaeb82d5b719461bbaa06386db7e1be10ec535f3_Device=CPU_Config=() +203:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=dynamic_IR=1c727cc96123227a9fe6c3079a497fd64a04f273bff45b5ea56a3c0d577eca8e_Device=CPU_Config=() +203:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b0dea4cb6a0cd2380e8657b0b64caab43819c0f8182ed73b2cb12eec608bfa7d_Device=CPU_Config=() +203:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a52a8e6ef7bbeacbc1435cde72a1a70bdb8a3abf78b5b971c2ecb1135cb4c136_Device=CPU_Config=() +203:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=93788242c06d787e33afa50ecbef5372898e50024d0c88624056a752535572bf_Device=CPU_Config=() +203:conformance/OpImplCheckTest.checkPluginImplementation/Function=Sinh_opset1_Device=CPU_Config=() +202:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b01e9e819c2e610a4fdedcb693f536f99b9dbdeccfcf9b0e70dc37c19c365421_Device=CPU_Config=() +202:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=125ec4e4ba4158d3a6d1a7725cda9a18a220926d5ad6ed623a1433688c79b579_Device=CPU_Config=() +202:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=5bed52483d61091259db863ffcd3b09c190fedde5dac72edad6f1bf37230f344_Device=CPU_Config=() +202:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=a45c7a05cac7f403aae101f22fac53b2697d90dcade1bb550200ce439cda7914_Device=CPU_Config=() +202:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=1b46ce72aadab0dcf92991f242e971bbb36689e1bcafecc68d646aace43291ed_Device=CPU_Config=() +202:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=e7b65875a7e2d88532271dfb93a4a0fbe4c41963fee3193cb3de547c19121f78_Device=CPU_Config=() +202:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=9fa81cf001e6c48dfcf4e75aa77f95b3dce4e8d48b6ec3cfc896dcc08006c62e_Device=CPU_Config=() +202:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=2ad5b63ed56c3966570062970125d1cac16629595e9ac34c6613cf00d6dec0aa_Device=CPU_Config=() +202:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=574e53e574b1a6e0bc16a7296aadd78785cac535293e956b008b0a2274b7cb36_Device=CPU_Config=() +202:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=d9eeac72636735d7541c2d0ef14ebfc7d4a1b3598c08c136a9123b2ed89e13ef_Device=CPU_Config=() +202:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=ddacee38f2bf3dd45ddd36ba236440ae28b9737487e0fb186c2b9777c0b557e9_Device=CPU_Config=() +202:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=7988ae4f263061e530c61f5987afd5e7f1945ecef9fcded2bc9799afdcec0df6_Device=CPU_Config=() +202:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=93f586b65926f2fb89cf5cc3379013f6df6964cb757fb3396060277dd393bb12_Device=CPU_Config=() +202:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=7ab51e173b82572bfb29cac5dfdc326e3689e466c68cf91590dcbdddf1f530de_Device=CPU_Config=() +202:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=6017d3f7ee3d7e667e8e7e4881f9aae335d47c8617c92b18ec370aa0770314d9_Device=CPU_Config=() +202:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0182ad6b02d77803accd2ebe55d87b679950570d1dcfef2940adcbb5fb9f1a24_Device=CPU_Config=() +202:conformance_Convert/ReadIRTest.QueryModel/Op=Convert.1_Type=f32_Shape=static_IR=19d36388bdf9535fef89243d6dfce670fc91377062ed4b3095ea55b88e4f296a_Device=CPU_Config=() +202:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=674e2446a2f5929d12d36f14451d68e7b55ad61d2d8df755e85c27c4a52943e3_Device=CPU_Config=() +202:conformance/OpImplCheckTest.checkPluginImplementation/Function=ExperimentalDetectronGenerateProposalsSingleImage_opset6_Device=CPU_Config=() +202:conformance/OpImplCheckTest.checkPluginImplementation/Function=BatchNormInference_opset1_Device=CPU_Config=() +201:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b2fc03d707298e863f83bd3912617e76e63d0fd922c87edf912c17bf51cc1fcb_Device=CPU_Config=() +201:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2758266e894d04cd7283921f678a468cc1fced81d1a09a3c95add3ed9e5d6719_Device=CPU_Config=() +201:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=15995a372d69998eb6a001f53486201fa9bbc89fb608c7d2a447203a404713ea_Device=CPU_Config=() +201:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=d6be1de020bbe6277d8cacd77eece21f766e5e39badb520ef29e880d52e3604b_Device=CPU_Config=() +201:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=19f9f95d2205816fc002d8eaea7cfb19f19218fbc3528e4932b99f1486b62827_Device=CPU_Config=() +201:conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=6ab37e1d52328b5ce1204cfe13977b06dcfabeb4acff9821d65ffc91bd3cf09d_Device=CPU_Config=() +201:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=dynamic_IR=f1e43476084575ad240db6631f433a61ba2076d1ca95e44a0e4471ea9d6f66df_Device=CPU_Config=() +201:conformance_SoftPlus/ReadIRTest.ImportExport/Op=SoftPlus.4_Type=f32_Shape=static_IR=443141d6914003828f76ac1de39cff68ee8ae96b2524fc41e9f5f95707b834b0_Device=CPU_Config=() +201:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=3326cf79d426d1a669158c4db8256fdd956fa4725b0d6fb9e8ab5e5206612eef_Device=CPU_Config=() +201:conformance_ReduceMax/ReadIRTest.Inference/Op=ReduceMax.1_Type=f32_Shape=static_IR=590a910a27283b92d7a4650bba546a3bec08a6ded604bbe8523ab3c6d734c70b_Device=CPU_Config=() +201:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=i64_Shape=static_IR=f9377788ac0fd1ad0a7f51d16543722cb5acb69640745df17d9f41f5d1d0b544_Device=CPU_Config=() +201:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=e14dc7fb2cf0b63fd08f616d407511ff2036109a3e105dcb87398053d1c334d0_Device=CPU_Config=() +201:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=2c5e9a1cd59ec2d5786132697bfcb1519a7857cdfe06038bb39abed39c09e9a2_Device=CPU_Config=() +201:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=7d3d30fa9e2a8a839cf42249de3eb8228681229e8b302ff7f290cc0d00c10a1a_Device=CPU_Config=() +201:conformance_Elu/ReadIRTest.QueryModel/Op=Elu.1_Type=f32_Shape=static_IR=1cb500b61fe11278cc50fca509be3e7b654190294dd581c7862ea3f108e0c192_Device=CPU_Config=() +201:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=68f6c55980c58f4d6de9e948d1c034b712cf74de509d8fd825fe7f7dfb11550f_Device=CPU_Config=() +201:conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=static_IR=c0c33bc628fffda062b4f013c7d41d0f9080f14f41e084ac547099384a9b3d20_Device=CPU_Config=() +201:conformance/OpImplCheckTest.checkPluginImplementation/Function=StridedSlice_opset1_Device=CPU_Config=() +201:conformance/OpImplCheckTest.checkPluginImplementation/Function=RNNSequence_opset5_Device=CPU_Config=() +201:conformance/OpImplCheckTest.checkPluginImplementation/Function=PRelu_opset1_Device=CPU_Config=() +200:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e4523b73661dc593224b91713f8f20f1c87513a62e3b8ee8265e1136eb74f9ed_Device=CPU_Config=() +200:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a2e1d1400763fcb89889255855a5c99dbbb17ee5e390e891c94211308fa2d725_Device=CPU_Config=() +200:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=90cf12798b53937dd1a31daebe5444e1c10c27c5a67fcde6dc61b5feb1df89ec_Device=CPU_Config=() +200:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=7895fea00309326a052d47dbd2f9e562b86bb9d0501f2a2fd8843a0340359b67_Device=CPU_Config=() +200:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=18763287c1afb7684d3f74e91fbb8a8c17a13aa52908a5d97b6ad220c5c4f633_Device=CPU_Config=() +200:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=i32_Shape=static_IR=c78feba7097eb1c59197840a7e5510c26faeaa51ff724d643dc1f1ec077a6344_Device=CPU_Config=() +200:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=8d472bf25f969c5ab5eb85fb198c2176766a2de7cd444819e8b60d416969e3c4_Device=CPU_Config=() +200:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0534fdfa97228a6aacf4ed196a9ace8e09d8e4decdcce058176b0312500b6c07_Device=CPU_Config=() +200:conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_Shape=static_IR=848caca8b0b971d54e9c9b715b8bf35e0a33f1274d50a946384e64e5c0843a96_Device=CPU_Config=() +199:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8c773c776396a2ff506691f857349efa9a4a580f1e046d1f17ff2ab49c73553d_Device=CPU_Config=() +199:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=f9e738e5e947a25c9a0d18fe47597f10526e8a74e9d72b35fd848b73f4c80b0f_Device=CPU_Config=() +199:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=6a05cd292e71af9d96e456cbc515097d5224a9e41cd9c3d48cc73f1a4e6e2164_Device=CPU_Config=() +199:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c14aca93b401d9d2325a5396c1489e1fa29aaa57f592cd2b4e6792ba5af90a90_Device=CPU_Config=() +199:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8eef79ab2081a12ed39f5c6f8f2e917d14685f54ccd0fcb0e19865740ca7d608_Device=CPU_Config=() +199:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=f89eecd15ff45d6929f82696f96a68adfd694043ec3f859952d80080bd140627_Device=CPU_Config=() +199:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=e2da6d928938b6445170cd69fd4a7aab40130a560cef3ffa2d268a428f56fcec_Device=CPU_Config=() +199:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i64_Shape=static_IR=fe80951a0a44625457a6106d8613c9813c9c0b8fe3606fa5ac1c064217c8a0e6_Device=CPU_Config=() +199:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=d25e26d9a54a5dc9799e9881e3035bfd5f125d12ea6cb69fb1eb0166e29ec88d_Device=CPU_Config=() +199:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i32_Shape=static_IR=c377dc784ecf97aef916740686298f47bc82c7c007326042ffe748e91ccfde1a_Device=CPU_Config=() +199:conformance_Split/ReadIRTest.QueryModel/Op=Split.1_Type=f32_Shape=static_IR=f5807b455d9031305e8225f2b65fd5cc289f61785d762f19a275280085a2e5e8_Device=CPU_Config=() +199:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=1c6447222d58a16595cfdd8b216fac2cb3f005d2b236a6526ef5de8e272e4847_Device=CPU_Config=() +199:conformance_ScatterElementsUpdate/ReadIRTest.QueryModel/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=5b185120e46fc0a2238ff4de19e278888ecda5fbae130c62e1ec21b4883ee61d_Device=CPU_Config=() +199:conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=ae7b6a45a538bb7f65d5895f2f7941fd9048645482faa40adb1f773e282a946c_Device=CPU_Config=() +199:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=af272d91ad67b0c830585f82cd83729fd832744707be8a2be800f76f3faadf6f_Device=CPU_Config=() +199:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=21f786ad25c17eff66f16501d160439b96636a7d5d8512c1bd3db5fb5d5e6987_Device=CPU_Config=() +199:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=fc530f5b6bbe8f06808eeaba33889867e705fa69591d01da4dd3dee9515f323f_Device=CPU_Config=() +199:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=b31dbb99720fd5083e5a7e5b1b626bda91455999e2918eb8e658992cfa6588dc_Device=CPU_Config=() +199:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=dynamic_IR=b7b0a0b3921a1e1434a3fef630e32b124c810e8bd15a3e861fe7da79158947b2_Device=CPU_Config=() +199:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=9d4ebc5a7d195ea0e566695253c38ac5d02fea1f4fbe97396828ef9f7754808a_Device=CPU_Config=() +198:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b706dc1dbca4cc6c308f2cadf799fec41a8b3f08251de3a58444f0d760994cbb_Device=CPU_Config=() +198:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=3b4dbc6facc24173723b52757e4ee60953d7a639e1fcb6e70236918d6a40b3a5_Device=CPU_Config=() +198:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=945bd465761a4d9b013b0a5e88a3a9e041d8bd8bfa8df8044f28d71ba26f224b_Device=CPU_Config=() +198:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=a45c7a05cac7f403aae101f22fac53b2697d90dcade1bb550200ce439cda7914_Device=CPU_Config=() +198:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=396388d4dce8240937c39dcd24e583e775f7b4e84d6c85fa9b5930588dfb9b56_Device=CPU_Config=() +198:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=7b702f91c21af6c336654c924011d0f4d149111c503c697fcb85a83cd60b7ab7_Device=CPU_Config=() +198:conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=3df69301c7a4d857a546a30a0d76674c52e3abd819d644ec036636eb7cb92fc1_Device=CPU_Config=() +198:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=b6984001a616b3dd3ef4b835b2dc6a48bcaf8882bfde7761b4e141733364f66a_Device=CPU_Config=() +198:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=3326cf79d426d1a669158c4db8256fdd956fa4725b0d6fb9e8ab5e5206612eef_Device=CPU_Config=() +198:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=30897cde05f349bface3d90a8d730da4c4c3e5133c59495d59258224dcc29ae6_Device=CPU_Config=() +198:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=i64_Shape=static_IR=8834a8881c2da907f6ae38d4c45100dde754e653f3e4994cf9add141c217c781_Device=CPU_Config=() +198:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=7625f5af6c70a9d4bccb783dc369a11b53ef1f6492df030ae5404452ea0cdc79_Device=CPU_Config=() +198:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=6119edc6e1f969159ce54e6ff4451d96db51485b54fae625a972035414c704ef_Device=CPU_Config=() +198:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3b3a5cbc6a255792eeeec698aa5a90947164eab96ec744ada9d02b6c7f453f8f_Device=CPU_Config=() +198:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i64_Shape=static_IR=cbb80f496fd705f24fdb25f6de3734bb2a2b7f49c984bdb32c4f62ec4640797a_Device=CPU_Config=() +198:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=86fb2ad636e51f682c83919d64217835cd9ab458695e3bdab295c4107516e733_Device=CPU_Config=() +198:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=b61800abac107b248c29df7ba04a73c91d490782b1da46164c1b7d2f8cec3cdf_Device=CPU_Config=() +198:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f55c473236715e5c4e6ec21a9e07d1c73b14d529b57fae0cb38ef9d6cd383b53_Device=CPU_Config=() +198:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i32_Shape=static_IR=0add7fb1bf1500ea125aa6d245bad577d6dea18d038c020d18c2dcd56704c671_Device=CPU_Config=() +198:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=f86f86769ec214942eaf1fdcd312a29e26308676419d8fbd98fdc485c2de0815_Device=CPU_Config=() +198:conformance/OpImplCheckTest.checkPluginImplementation/Function=HSigmoid_opset5_Device=CPU_Config=() +198:conformance/OpImplCheckTest.checkPluginImplementation/Function=BitwiseAnd_opset13_Device=CPU_Config=() +197:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b66a71c512cd06f5dc1d1a254ba0128b606c1c41b860f272dc1d2514502c2350_Device=CPU_Config=() +197:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=f5d63cfc40e19fff35078633a3354fe5e3a8b6dbadbc89e20747398d87e02176_Device=CPU_Config=() +197:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=1a29918856ba0f88e99346fda6e6c21ff2bf129f5599d8a1c8611346ab41f2f7_Device=CPU_Config=() +197:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=37b1b14a23dbc309d75fbd98158648e1a7fd246684b96e1ebb10a75c3f5b03b6_Device=CPU_Config=() +197:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=51bb427ac8abf618a72159cde1ee840e08518016a09e995f503cd888941f5039_Device=CPU_Config=() +197:conformance_Select/ReadIRTest.Inference/Op=Select.1_Type=f32_Shape=static_IR=da15c9ddbf446de00565c83e95b8a554d400b8b925481e56eb3df41f7efe26d9_Device=CPU_Config=() +197:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=f802331401875cb16be10c9f752520406437b2e63a50e022b7d95b732e5296f2_Device=CPU_Config=() +197:conformance_Negative/ReadIRTest.ImportExport/Op=Negative.1_Type=f32_Shape=static_IR=c29451ffff103b5e965a1bbea7994ef6da6394060855ee071b9e7a3a4702141f_Device=CPU_Config=() +197:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=c721fdd5d79e702e4ac48a31d0ebacc4977f050c67d1c415b085773042c8e93b_Device=CPU_Config=() +197:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=f32_Shape=dynamic_IR=214b1d4be2a141409b6b54847c952a282d9b2d7236d3d8ada3463f7dc8554097_Device=CPU_Config=() +197:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=cd389fc4a9417c7136f75474e42dfb43d1f9cb35fa0e104632ffa69fce2b7e57_Device=CPU_Config=() +197:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.4_Type=f32_Shape=static_IR=9ca1360242688f494c59b8eb1073a4bf7291ee7b2ff460380bd47248fc591dc1_Device=CPU_Config=() +197:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=14108fac0139d5bb39f6b2106857e1ac91c8d44ef9156e4e0873facf9d932316_Device=CPU_Config=() +197:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=5498e895212b57b42748644679c1dd67936f230d2c61998ca6bee31d527035cc_Device=CPU_Config=() +197:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=dynamic_IR=4fe95284f224758c29c5198a8b2e6f97e8e737435d36cb94b9cdf0bca3c89dc1_Device=CPU_Config=() +197:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=80e0900e8270dfbd0fc879ad4d065847f767cff9399688bb9e5e03b8531c554e_Device=CPU_Config=() +197:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=c0413244803edff103b95dbbcab27b2c714740372ba215264371a9474355a8c4_Device=CPU_Config=() +196:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=df19449b7a95887e834ba16ebf2e1f08416d6293686a6cb6b6cf39fc82559595_Device=CPU_Config=() +196:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=776ce5493890837f137a7abc7851ff04164468d7c13ef1022f73f1f68e058c1c_Device=CPU_Config=() +196:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=3c0b9fab07568e0eebb5e5d068cfccdd617ee6e98e4253a0461ea8d3f0f582e8_Device=CPU_Config=() +196:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=178677f6c6e3857b2c3aa8765c8e3186bd25b73154ba6463ff33a9e1c911e6bf_Device=CPU_Config=() +196:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=d1b4dff28b71e41d8106d3730f2705e537487aafe0dd53ae7dfba9ec21724287_Device=CPU_Config=() +196:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=afc2cb913bcb4e4badd203c9cdf491ea1e6ed4f1cd835e7507889a9bba25b958_Device=CPU_Config=() +196:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=i32_Shape=static_IR=f777fb31e1669cd58cc77e2a04c3f9a804b654b6d710432641a3dc34504460b4_Device=CPU_Config=() +196:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=b574ee57274a9f27f6d0908cef2645c458983225e3cb82c455148e83337ee3ef_Device=CPU_Config=() +196:conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=static_IR=4420cfb7f4a734731dacfe5b0c27db41ccaac2ab8bbff56cac0f99ed96e976f2_Device=CPU_Config=() +196:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=670a0d513277b4508e8edcddae6361e98fd03c2fff31293637c36f97e59a6b9c_Device=CPU_Config=() +196:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=1c91956bf0130fd338f8f11cf76a08dcf5fe3c6c42239fa6a6aeb517eeabba36_Device=CPU_Config=() +196:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6c1aeced5aaaecd99f3917a0f38e01902dbe81614ae4dc9a99fc09a379990abc_Device=CPU_Config=() +196:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=84f6f3544adcc7c68df5ca411844cf36c2232c1b6c820094e5693a444faa143d_Device=CPU_Config=() +196:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=b12c40f6d576482396a94e28e0814488b87eb6844583bc87384ed385d45bd6e0_Device=CPU_Config=() +195:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d27e8ca8280dc9219f4b76a2c8f47cf526b32a58710126c7549e2c04026944de_Device=CPU_Config=() +195:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a0b3d7813e380f287a758c35e56e8e8edbb72b8c64fab6194a8890dacd5e2f16_Device=CPU_Config=() +195:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9465b2ea76ea3be1365dfe1255524d4ecce0dff6123e929a2157bfc767396b0c_Device=CPU_Config=() +195:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4df4ab698c70278594efe8b4349a4c99c8b2ab7c4ee0182c5a4b7673da922ad6_Device=CPU_Config=() +195:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=51390fa8c8a5680ae4a8de4f655083caefbb8445dac8814d2b1827e2bd43f470_Device=CPU_Config=() +195:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=static_IR=0fcad2ddd1c7b81bf5e88ef4d4abb26a33326a37fb0cceb1205c1efd2a2d3615_Device=CPU_Config=() +195:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=dynamic_IR=9337e101d74f6d35bf81e9be895ffba9e972cdab9d79b2802f1c1ec0f4d34a83_Device=CPU_Config=() +195:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=dcd71a51a6682c9bc461a6cb72d59082352ab8a020e1f79e64c3cc44a37b55ba_Device=CPU_Config=() +195:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=7fb9c2cdb4c82a4b65d110fc84c03948917cc1921c372cc645cab00a3377fad8_Device=CPU_Config=() +195:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=ba1a7c4cca6d39b8bc7be7d52a0680d055e33a776f4048ecf38335a2ccdd8d51_Device=CPU_Config=() +195:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=2b927ee73187f1c1cbdb071ad3c0a72c9eb8a8631f2e7c6c3a8f8482c301fcf3_Device=CPU_Config=() +195:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=static_IR=d93633fac99f9472435ede6fcdb9c72475b68bf1352d58b33e8cbdf9ca74ac50_Device=CPU_Config=() +195:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=d36c5ab59d2ab873aa35b35a952e061568edd4ee8e64c1ab200bea63472a97b3_Device=CPU_Config=() +195:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=2620e86e1e6ce8f0ecb3eebce969f3e7df11f7f86c6f97309aa24993f9036033_Device=CPU_Config=() +195:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=0a7b1efc8d314c5e37062e482a9398f718082ba0528c6ca2d2f6c88e7a4a2bb0_Device=CPU_Config=() +195:conformance_FakeQuantize/ReadIRTest.QueryModel/Op=FakeQuantize.1_Type=f32_Shape=static_IR=66f4344fac8e5e5484f5762b1bfea68ed08bcbc378a8b10f53d0a8e053524749_Device=CPU_Config=() +195:conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_Shape=static_IR=66f4344fac8e5e5484f5762b1bfea68ed08bcbc378a8b10f53d0a8e053524749_Device=CPU_Config=() +195:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=b1477d38842775944964d18c13278454256d9610e0ef880fbce0cc87e5977556_Device=CPU_Config=() +195:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1e56a3e2379d29d81af93174e56ef91408af41dfc085d4851ff58dbec781b8fa_Device=CPU_Config=() +194:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f99a212a117855e6e2dc4a338444a8ecee441f989638f7a0700ce24e037d29e3_Device=CPU_Config=() +194:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e4b374c3afdeb45605c3ac745c03fc9eb938cf3f3828c119917ca92a6e9135f0_Device=CPU_Config=() +194:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a4ab938f33d0b58425ed98a56789d0ee94beeca13ec7fe3358c9d3751ef136a5_Device=CPU_Config=() +194:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9e0cfe97e08c7b2974ef224799ccaa3fa777802a5fd320a089e527f00a594dbc_Device=CPU_Config=() +194:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=004b6fd9b060324a42aad296dcb21f5b7eb7586c082f98d23f25a6d882f70c14_Device=CPU_Config=() +194:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=dynamic_IR=b91ccf96246dcf055dd9122c823ccc54ea572f1ad8fcbad3a98c88edb7e454c4_Device=CPU_Config=() +194:conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_Shape=static_IR=591cc5abb16f22cfa720e53be695097b83c42a971536fb5b79d0b02cc4ad328b_Device=CPU_Config=() +194:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=dynamic_IR=c117722add2db4a6eee4dc2fbfb99174911d54eb3896c65097d31d656fdee639_Device=CPU_Config=() +194:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=8b759b2f1999be207aeb39763bde3eba4aee028e9369a86a87493ff86f3fa014_Device=CPU_Config=() +194:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=033c6bc337d14053ae097dcbee99ef5de7cb7728b589cc8d64783467505a8ba7_Device=CPU_Config=() +194:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=67a5010efb429e6dedf35481443b40a77cb01c1b4fb51ec5890fcfcb010fd6f7_Device=CPU_Config=() +194:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=6e67522f2df32ac8e237fd4de148d082f3c55e6c31ace80cffeaef784dfe75a0_Device=CPU_Config=() +194:conformance_Minimum/ReadIRTest.QueryModel/Op=Minimum.1_Type=f32_Shape=static_IR=5150e1785d97b052a42873f9e9d23a511027248ff4b13ba7c269c8c3d4639e45_Device=CPU_Config=() +194:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=c4d8543f8e0b375407e428ef119ba4049d44cc273a10661b57645bcd1d36f5cf_Device=CPU_Config=() +194:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=9c651eeba5c3e7b07a8cd0d4ba479fe8c5aaa2c4df9b18ab022e775ea01dd867_Device=CPU_Config=() +193:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=ad640e033c11cf7d81ab237630f0ba656968f620eb4ed77f38cd79c6cbac42f6_Device=CPU_Config=() +193:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=95d9789ef78c733e0c7972738bafd4da289a90f0d9ea00bc9452192173390b6f_Device=CPU_Config=() +193:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=7caba2dff8ab10660f66796a39d8d2a78f3e282f0629c2ecbee9b90c34e62aa0_Device=CPU_Config=() +193:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=7caba2dff8ab10660f66796a39d8d2a78f3e282f0629c2ecbee9b90c34e62aa0_Device=CPU_Config=() +193:conformance_Tile/ReadIRTest.QueryModel/Op=Tile.1_Type=f32_Shape=static_IR=9f4d316675c933ea5d6511324e3d664440a8ba287cb2ffe768517f9cbfb613e7_Device=CPU_Config=() +193:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=0138363d3baa37869a3e55e1b059a42a87612507ba318e753361a58549ed5ec1_Device=CPU_Config=() +193:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=1c91956bf0130fd338f8f11cf76a08dcf5fe3c6c42239fa6a6aeb517eeabba36_Device=CPU_Config=() +193:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=dynamic_IR=166d8442037dcf0469f0b14ab83676b30bce53edd79494c52a575e3744920c4d_Device=CPU_Config=() +193:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=c4d8543f8e0b375407e428ef119ba4049d44cc273a10661b57645bcd1d36f5cf_Device=CPU_Config=() +193:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=eace26dff7f6f0403126e78a4c93920ee5e54a721cd580b4b18c2c9989baef86_Device=CPU_Config=() +193:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=965ded994c427ec62353194906203c202a52dfc0467196d5f1143759fed94b07_Device=CPU_Config=() +193:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=5176d95c14be776a4247f25a469708ba7976378b7aa8860a115a28a8bf2c2902_Device=CPU_Config=() +193:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1b59316585dcbdfdbef9fd71e2681207498cc867a2285eff20d125c4fca0502c_Device=CPU_Config=() +193:conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=dd366f3f5b63fbfce3d9378cf0d8bfa4a909a973bc3e5e97eaa9d346c5cbf1d4_Device=CPU_Config=() +192:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a4772901ff77541ae624f89db89901c7d5a502a0dc5d1e0dc21eb8e08c599525_Device=CPU_Config=() +192:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9257d329b4cc9eff8545270d1693734adac9ac4ee44dcbaa21c774287e84aadd_Device=CPU_Config=() +192:conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=static_IR=33aa0d800115f94b07bce9c6ca8b4447f2c4f442bff77cb9b02b23d2ddabcc01_Device=CPU_Config=() +192:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=d683b96d525bc074d4f8c15934a5082a3fba1068b591f67e4b05d605fe5e6aa7_Device=CPU_Config=() +192:conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=f32_Shape=static_IR=ca5d2626f2066e0c806addc4b6ffb4b3a71f1183b93783b92f44de62d82faaf8_Device=CPU_Config=() +192:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=ae817dcac1ed2395cc4098f67bf6d2bcbecd8b7e91ef7592622d1ee75ed4a3cc_Device=CPU_Config=() +192:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=c721fdd5d79e702e4ac48a31d0ebacc4977f050c67d1c415b085773042c8e93b_Device=CPU_Config=() +192:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=0db5765bcfeb7716699abd0cee850918cf5ef18e2cfdf1614b463734ca35a20f_Device=CPU_Config=() +192:conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=2a9ba5f3e5a74f05be93e288553139a15242f1500e1eca8317dbd82ee8cf00d1_Device=CPU_Config=() +192:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=cf02be750ce25545f7bfd694603192667eb3fdb07a186eaa7f3ecf5767547651_Device=CPU_Config=() +192:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=90f981f067c23b4fd3d2df838af8e6d11ae1c5e9465b566501628c7f3d63674d_Device=CPU_Config=() +192:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=static_IR=3e4364d93433ea741efe178b0c83cfb13c46259888aec468f59f77cd3f1bb39f_Device=CPU_Config=() +192:conformance/OpImplCheckTest.checkPluginImplementation/Function=GRUSequence_opset5_Device=CPU_Config=() +192:conformance/OpImplCheckTest.checkPluginImplementation/Function=BitwiseNot_opset13_Device=CPU_Config=() +191:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=caf20ebc8d39cb23a107a03e819e8ee5b2807fbd311fe65453446251e4b6a611_Device=CPU_Config=() +191:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=7b1df1422bfecf1fdf9c25f72d938950cb1492ee1c7223d9c0d771f93b1fbdb8_Device=CPU_Config=() +191:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=i64_Shape=static_IR=4341385bd87226eb41493c667323e8c1c39817983a48025192209270750eed06_Device=CPU_Config=() +191:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i64_Shape=static_IR=055b7eb16539ce5cee62e165db9a6d51a11e0bdf90bc9f82eeca1f2faac2bf89_Device=CPU_Config=() +191:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=69f4c39c3fb2dfc55714893e1e45761238e74bf28ecfadbee3f4965b5a379888_Device=CPU_Config=() +191:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=static_IR=944072d96554abf3ceac6b928cc00ea1705d5e0dfae8e9a0662de4e56fb3e62f_Device=CPU_Config=() +191:conformance_Pad/ReadIRTest.Inference/Op=Pad.1_Type=i64_Shape=static_IR=1c06ff77487507dddcddf290d75d4812bfc8a7b2c9bc78176da5212eab029966_Device=CPU_Config=() +191:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=5ae2e8ce34957ac812bd04943714d0b0ca6e2098c46caccfd775620d7f373cbf_Device=CPU_Config=() +191:conformance_Interpolate/ReadIRTest.QueryModel/Op=Interpolate.4_Type=f32_Shape=static_IR=d05c1b7fcf976117a23e0284998d9ce21689411ff24530175787f1512ca25879_Device=CPU_Config=() +191:conformance_FloorMod/ReadIRTest.QueryModel/Op=FloorMod.1_Type=i32_Shape=static_IR=2d09fd84ef3e176a2eae04f1066929ceb3973045b87989e5f0f11b97cab6cc7c_Device=CPU_Config=() +191:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2e586703f4f8e9285249881d509a2a0b96d4758be5f97d75e7ee4f78951c58e9_Device=CPU_Config=() +191:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=922e194a5ae53e76be5ae624754d3c1fe5ea0d8c564410062bd9c30afc48ffe0_Device=CPU_Config=() +191:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=fced0ff647e4ea9a4b1673016b017f68ed75cdc778cad156dbd6cc379bb815f9_Device=CPU_Config=() +190:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b42c98b6313e56a7a012553eeabae92f0672c0bde6f9895d10fb459796448b75_Device=CPU_Config=() +190:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0d40552a1b6c1945765ada16284a0c03f5c1454fb12f226a34dee8a07b14f17f_Device=CPU_Config=() +190:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=47423c3e9443249e3dbbf58ee0f5b69b15e677f84de44ddb9d2851d1341dae96_Device=CPU_Config=() +190:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=21534d0488c3f7c8bd40bc81476832e866000c97ee6892359826c7877905d733_Device=CPU_Config=() +190:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=fb8283ecd8934dfc5340a41e9889a0a760b39869e4873efed4ef85606c162ce7_Device=CPU_Config=() +190:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=boolean_Shape=static_IR=474c6273d1e48e8e5885966dc93629ad413683ad942e3101452c1a58fb5b5af9_Device=CPU_Config=() +190:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=f32_Shape=static_IR=7b702f91c21af6c336654c924011d0f4d149111c503c697fcb85a83cd60b7ab7_Device=CPU_Config=() +190:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=83b83dd13b1733a50ec728ca6e7f09eb75641a573178816d1d33f30390464d87_Device=CPU_Config=() +190:conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=static_IR=7f806d6c4a0ff3515dd9a092fee2ab14a5f363fd5fbc7503d64a8cec4bb1cca3_Device=CPU_Config=() +190:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=dynamic_IR=debf36fea706c02dc67354edf761f0dc931ebcccbed285f186164fc4b9532766_Device=CPU_Config=() +190:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=f0472c0e5ff8fb82651424269bd9f77e73eff6c43c70b6192f07303c0d35db8e_Device=CPU_Config=() +190:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=cdd7ce044f231ae39fc0f7460a55473c0de6934124cd263444a5912b8cbbc0ce_Device=CPU_Config=() +190:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a7b2c196b6ae12252522b2571af40b540eae94513bfbd88e15708fee816869f8_Device=CPU_Config=() +190:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=b0a418fb8ec50f25147079b3aef1b13095ea626a9e52a643600c39972982ff9c_Device=CPU_Config=() +190:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceMax_opset1_Device=CPU_Config=() +189:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=fe5cbe273830f6a09e3f18eaf8e9410f9f7f1083af508a9dcaf5f0f22aa3ac1f_Device=CPU_Config=() +189:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=64d7467cf7785e52814a8c25f96c1a5d82c071ced27dea8302b5cd69b464ac65_Device=CPU_Config=() +189:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c6e38c3297ab303b166e2a613203a1f09f4ba5a15659c8d2b233febd8fd09d9d_Device=CPU_Config=() +189:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=93a9a06d14c3b4d51891ff0e704c74dae5905db9b5de06d31379f33fa685c80c_Device=CPU_Config=() +189:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=3500be960a489d618c1ff6345c1d6788d17c43786c10a7e7b630586920bce356_Device=CPU_Config=() +189:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=dynamic_IR=bc8918b82285bb58c2cf1b4b60b023262426de4044e0c2d50ae07f4b22ae0eb0_Device=CPU_Config=() +189:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=static_IR=a6722b8718b7c028e1bbde4462945c096dfc551775af27bcc7d00967d7d73919_Device=CPU_Config=() +189:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=91b6cdd8a7664759217ce0b84a8baed2105bca0ae9876e9efd01c074aa27039c_Device=CPU_Config=() +189:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=0a5f9fad12bf5e2592c6f720232bb38d94a5fb9ac1fdc5a8f7d474ed9e9d2504_Device=CPU_Config=() +189:conformance_NonMaxSuppression/ReadIRTest.QueryModel/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=802164adc9e651b0a3ec0b5f96341fc3cbd098042412236b65e0c8f77b5153f2_Device=CPU_Config=() +189:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=bf7feb979b2eab03afc780965804a3f6b8471b574c36125654fcaf3ebc2c30f5_Device=CPU_Config=() +189:conformance_Erf/ReadIRTest.QueryModel/Op=Erf.1_Type=f32_Shape=dynamic_IR=e6f95710a782b6c7df8397480e5cffbfa773fdf4ef11c93b2b1ac4694313b080_Device=CPU_Config=() +189:conformance_Einsum/ReadIRTest.QueryModel/Op=Einsum.7_Type=f32_Shape=static_IR=b9f9ac285915db9ef3e7437728695f2833d165757ffc81afb88242e7b471f434_Device=CPU_Config=() +189:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=f32_Shape=dynamic_IR=7562536120d473cca837bb2ad1e3969484868111954ac0b168a5c2805264a689_Device=CPU_Config=() +189:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=256f748d7b98b0eb70cc659403910bac929d62a2b153e63438f8746f602a83fa_Device=CPU_Config=() +189:conformance/OpImplCheckTest.checkPluginImplementation/Function=Slice_opset8_Device=CPU_Config=() +189:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceMean_opset1_Device=CPU_Config=() +188:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f01152d615a3092ffd4ad1059779ea183d7a62c1ab5b970d940f3f537e6f12db_Device=CPU_Config=() +188:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e23a8faab46e1096894a906794325ff1a8c6001d3b980aa809088385675c77ed_Device=CPU_Config=() +188:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=cf334277b64fe023019fb7f007aae9ebf7432b733a1876d6cd61bce6a204e0dd_Device=CPU_Config=() +188:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=780fe1f9a82f728f88511b2d8194c4f425144ffb5ae4aaeb1ce90c6fdea3362a_Device=CPU_Config=() +188:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=f5a74749f6c90dccecbb5e4a7d0fee72cca6247f0084487b5ca7d94d098c9b9b_Device=CPU_Config=() +188:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=c377dc784ecf97aef916740686298f47bc82c7c007326042ffe748e91ccfde1a_Device=CPU_Config=() +188:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=31ce051edcf02344a693eb2d200fa02b53412a5707faaffc2907cadcf81192f4_Device=CPU_Config=() +188:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2ef3273b8c144dedd6cc2d2b8c2d2921d999fa286b10d90aa796fa188dc52cef_Device=CPU_Config=() +187:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ec19939673cc58f2511ffd6695a3652f1d724872b0db958a6d667e1e87002b21_Device=CPU_Config=() +187:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=97f6fd9998be395222e6878ccaab47f5d50561d1ab8f988987f7f292e784fe2d_Device=CPU_Config=() +187:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6453d2955ad3344d5e021f97d71691ddd7c27ffc0d9044b724c9a6b5c20cb427_Device=CPU_Config=() +187:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=eca24a51b737307a94a918f4d03923c1e035a3379c73359515c63ff3ea98be85_Device=CPU_Config=() +187:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=42afa027ada245d36900a89c54a870ba5fc7fe3cc3bc0fc7dbda23af3e5111d8_Device=CPU_Config=() +187:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8b55c14423b60f30029c68c603417fb98119c5922e2827c60c99edc05ea813e1_Device=CPU_Config=() +187:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=deaa5ef98e478a5850df528107031c9c7bfa6305bc7507325c91b98f9337b0b8_Device=CPU_Config=() +187:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=i32_Shape=static_IR=5224ffd078708e8917b14b4118bc4a42317c123dc0a5dca8234ad73d44daf845_Device=CPU_Config=() +187:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=f32_Shape=static_IR=8f7dc81bfce05ce39b694fe48197a4fd2aa7933c7061508be3b9dfefef518f75_Device=CPU_Config=() +187:conformance_Sqrt/ReadIRTest.Inference/Op=Sqrt.1_Type=f32_Shape=static_IR=ace54c326bc8255cd741eec12762e4d8f645fe93d50c037effce893745f8fdb5_Device=CPU_Config=() +187:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.1_Type=f32_Shape=static_IR=a4fe57973b0bba01e6038a8050f07b8ad1bf6871c1ad86270920f9084dc84905_Device=CPU_Config=() +187:conformance_Sin/ReadIRTest.QueryModel/Op=Sin.1_Type=f32_Shape=static_IR=54a909996c38d86ec830295e37f0fc0070260101390dbaae2cc6eaabea82a7b5_Device=CPU_Config=() +187:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=445aa399303e82b524cce3e0b3522cfdb57200720b3b72584c785fad157117b1_Device=CPU_Config=() +187:conformance_Maximum/ReadIRTest.QueryModel/Op=Maximum.1_Type=f32_Shape=static_IR=62b8aaf25e8c93387362b0c657886c31c39a7330cf3455486b8943a1e375ef5c_Device=CPU_Config=() +187:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=d575b00d2b6e155319fe7120133d8e0c3dcb5c79bda710b0650fa48543dc5c84_Device=CPU_Config=() +187:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=10cf1b7a4de1231ad721c9660697d6ee17bcaa2151f08eef596b41e6e3aa1b2f_Device=CPU_Config=() +187:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=boolean_Shape=static_IR=4da22853b6e4b853fa57b9dce8f5a26920d079a74055831d651c10f48ee96e8f_Device=CPU_Config=() +187:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=i64_Shape=static_IR=e8fc48c9bceee1462572c6aa8c5afbda94a9d6e8760deea1c9a3d04d1d797fb5_Device=CPU_Config=() +187:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=dda009a1f3191e35286b7515f5741905e303f27287041248e2ce15f6954af810_Device=CPU_Config=() +187:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=95884fb8d74cae609a67146ef94a84eadda8f3bd6369a9cb465bc413264a1d0a_Device=CPU_Config=() +187:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=3c1c8bc7ce009c03509ca9d6a86f3d5cff89be49439e7513edcde4e62fbfb8ce_Device=CPU_Config=() +186:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d16722dfa770998d9923d09fa1e2a973bac5ae7afc6452a0b5ac21d839720bb4_Device=CPU_Config=() +186:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=7c43bd989494b4ef0f2ca40c3b0c57b471d58b21491456e9588938f702721be0_Device=CPU_Config=() +186:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=392b855febfc39fd1b2a9fa43270f58bae53e0d210525e8700edc15a10d28d33_Device=CPU_Config=() +186:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=71010d034cbc059af32ae6066fff1f27834db480e76042d1ef7bd1e7bc426a08_Device=CPU_Config=() +186:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=boolean_Shape=static_IR=474c6273d1e48e8e5885966dc93629ad413683ad942e3101452c1a58fb5b5af9_Device=CPU_Config=() +186:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=db5c391cca1031cb4ec32def18ce3a4776c53f71e861c39b350fe5856da4fa43_Device=CPU_Config=() +186:conformance_Sin/ReadIRTest.Inference/Op=Sin.1_Type=f32_Shape=static_IR=54a909996c38d86ec830295e37f0fc0070260101390dbaae2cc6eaabea82a7b5_Device=CPU_Config=() +186:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=05e9fdd5183bd179e5ef996ebcdc53f239900ca46a8122ee8bb1e885c2c091ce_Device=CPU_Config=() +186:conformance_Pad/ReadIRTest.QueryModel/Op=Pad.12_Type=f32_Shape=static_IR=05e89f7690a9c7d235c753aa4af28229a44fab527f44ff4832ebcebf0c9debfe_Device=CPU_Config=() +186:conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=i32_Shape=static_IR=6650e462a4f0086329d8576eb6352979e89825517f48e264fe719c7c5ca276fc_Device=CPU_Config=() +186:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0c5ac67592b69e8c2b7acbae7a0f877cfed184c572d2fae09eb8fa629e86eeb1_Device=CPU_Config=() +186:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=9b64733aa0a8994cb3695a7c26f905f4d2b86c2e157edbd8a9970d33970a4015_Device=CPU_Config=() +186:conformance_CumSum/ReadIRTest.ImportExport/Op=CumSum.3_Type=f32_Shape=static_IR=d517f63a168293380a1f066313e6a2bacef9eddf961ce164f11ce2609a632b3a_Device=CPU_Config=() +185:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f8408a1d4e8c11ebbda01e0431217a5ff4ac6a869cc4cd3208cc9adc59d227fa_Device=CPU_Config=() +185:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a29be1e2e5f78c12657221f33e5309470a7a4dbb9061a8100d7c454215198f7c_Device=CPU_Config=() +185:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9ce6a2f4787ef120c486a68cc02bacb95d6cb1c4cdb5e2054275cde409a39803_Device=CPU_Config=() +185:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8e80bbd29743e87a0a6d4158a06249766b6a9cf424cc1c0ed3c6f60e30e6db58_Device=CPU_Config=() +185:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a2e1d1400763fcb89889255855a5c99dbbb17ee5e390e891c94211308fa2d725_Device=CPU_Config=() +185:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=f32_Shape=static_IR=adee3d4d6728b17fb5ab17a9915c5b7c8808f949ad358e8a16a0bb12dad7c958_Device=CPU_Config=() +185:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=9f19f32ddff44c1c8f7dc3b9b244a9191a15fef9874e016666fe6a817937f699_Device=CPU_Config=() +185:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=ea63b1a277de19e725624c4d57d7decf2a01f9764510b0849e0b9dc49ad24fbe_Device=CPU_Config=() +185:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=i64_Shape=dynamic_IR=08776190d0fddfcb15ad75cdbf6892de03f79e89d57e02b7c3e80b4a7a125d35_Device=CPU_Config=() +185:conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=6ac457e9181610da9eb4bf0bec6cd53bf3078e0b84df1211f49921207d81c6e9_Device=CPU_Config=() +185:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=i64_Shape=static_IR=056c07f9ad8e27e01b269b5136ee29b4cb4d1229a009cda07e4fd32c45d4e97f_Device=CPU_Config=() +185:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=508a961c358d40ddb6906694a24f87dc24f74cb4643aab58ee1d6fa28f099e6b_Device=CPU_Config=() +185:conformance_HSwish/ReadIRTest.QueryModel/Op=HSwish.4_Type=f32_Shape=static_IR=98546b7eda390c30f82053a093b5e3855c6dc8c631451b3637eadf95858af2bb_Device=CPU_Config=() +185:conformance_Floor/ReadIRTest.Inference/Op=Floor.1_Type=f32_Shape=static_IR=b064511ab38a9a70b4d203e11a12b990f388a03550ba98c65468be1b85c68fda_Device=CPU_Config=() +185:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=80e0900e8270dfbd0fc879ad4d065847f767cff9399688bb9e5e03b8531c554e_Device=CPU_Config=() +185:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=851aa3cf931a01e0188758055b866fd14280bc344f548da6166e4a57ca7c9254_Device=CPU_Config=() +185:conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=c985b086d155654f9db8470da3af5245c4fbb0139015d049b8b3b20f393c2545_Device=CPU_Config=() +184:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e58cf21c9c62dd427747021dcf9544157638e0773329eecfb8755a71b24f65a8_Device=CPU_Config=() +184:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d9b3427efacda497c4fb86cebe89023b322722167d0c32de8a2602a80b23580b_Device=CPU_Config=() +184:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=949611ba6617b054b828175c04452b8fcbd109c99cb25d5d8827a872b4044fd3_Device=CPU_Config=() +184:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8dfd99ad2ffed2573598829ff34a62deccbd70f5337c1fec4c2962cef1992595_Device=CPU_Config=() +184:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=005e1b57ad768f4c8efb3116fe51bc85661c377e6632518b9172e8862d1c3edc_Device=CPU_Config=() +184:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=8fc5ce53f1f6b924371ab2cf156ddbf7aea234b17befdcb6206ba51a7ad988c9_Device=CPU_Config=() +184:conformance_Swish/ReadIRTest.QueryModel/Op=Swish.4_Type=f32_Shape=static_IR=d79b47022a50437c9df095b34e515c53eb042c9813fcf6dc7bcdb96962818ddf_Device=CPU_Config=() +184:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i64_Shape=dynamic_IR=e255ef2321233444ce6e4fdeb513a9b271987457aa9bd456948b64f589de1e2b_Device=CPU_Config=() +184:conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=282e24ea7ef9130becb8db8f0251c907b02a534119d08162e07091212d67f290_Device=CPU_Config=() +184:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b38f11a07d752c83a5e4fc709d5b78fe9a40ef3394f4b617a30df29c21640338_Device=CPU_Config=() +184:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a3d6337c1ea3e8b67256696ea4231da4fc0e9d9f8bea169607a1287233086b3f_Device=CPU_Config=() +184:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=234277ecce31161bea52cf4aa2a37aa8cd43f1bbeed281a79a6aa1d07368872c_Device=CPU_Config=() +184:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=i64_Shape=static_IR=93ce70e605eb712479090e3a266e86eb7422bf0fdd3acb1c38a0b92a9c381e2c_Device=CPU_Config=() +184:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i32_Shape=static_IR=d5cd3fb647dd4a57feb28366d922a151a3ffb1707864f2ac85595fcc30f222be_Device=CPU_Config=() +184:conformance/OpImplCheckTest.checkPluginImplementation/Function=ExperimentalDetectronPriorGridGenerator_opset6_Device=CPU_Config=() +183:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f0145ffb8d2846d866b1a89c8217d54209830e6d3d0d10913e75af42f2510c74_Device=CPU_Config=() +183:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ad4c3d2d3f258a4be14846d9d26203008e01b2832ff004bb8a23ff05c72747b5_Device=CPU_Config=() +183:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=86d8d42c30e423e801b5d4d832f87cd6837bf9feb3c546f5bf87e04f842a04f1_Device=CPU_Config=() +183:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=d69304b651805edf18138147ec5a4c16e883ad5e5d9828db849a35249c28b263_Device=CPU_Config=() +183:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=3d37166487c5c52af657343f8fa10903efc7d580d5b370a519a0ccfbf6fc56bf_Device=CPU_Config=() +183:conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_Shape=static_IR=9c57b92a55a929edae54a9705d80d730f7682ef015aa6923bd4658e244e9ca89_Device=CPU_Config=() +183:conformance_Tanh/ReadIRTest.QueryModel/Op=Tanh.1_Type=f32_Shape=static_IR=7065a836f4fd77a07431ecff6bcc591ef9b0160cb5366a8f3c8b8fe5f83f7be1_Device=CPU_Config=() +183:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=db87efd37ce8dcbe14286197df3b7a345fdc46ccc03d7d8bda17e3791df332aa_Device=CPU_Config=() +183:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=bbf75e5f7aa9f20f890a8eb204ddb5f159ca5eae0616fb99ee0b5169b165d595_Device=CPU_Config=() +183:conformance_Sqrt/ReadIRTest.QueryModel/Op=Sqrt.1_Type=f32_Shape=static_IR=33aa0d800115f94b07bce9c6ca8b4447f2c4f442bff77cb9b02b23d2ddabcc01_Device=CPU_Config=() +183:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=05c2f891e743416ad510bf0ebf713738bd41258123cc4bbdc5cf067f251e35d8_Device=CPU_Config=() +183:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=d56533ce961113b2ca0baf02f3ff9f8ff210264343f6bebf26418a35ecf36b02_Device=CPU_Config=() +183:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=static_IR=ba1b92833f2c8734c5178762b6cd8c847c23027ecf79ebeba295c39b667162a1_Device=CPU_Config=() +183:conformance_GatherND/ReadIRTest.QueryModel/Op=GatherND.8_Type=f32_Shape=static_IR=58581d0746e5bf56df7df18df87d35371d41ff69ba09c7850c8e96354c7910b4_Device=CPU_Config=() +183:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=dynamic_IR=8029d5dae7f4721807eb717310512bad44630efdd0a64962496a0fd802a12325_Device=CPU_Config=() +183:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=5498e895212b57b42748644679c1dd67936f230d2c61998ca6bee31d527035cc_Device=CPU_Config=() +183:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=76ef553ce6e6b782a200e030fcb744ed737623fc3a8c9c8faeb0e05691c5a55c_Device=CPU_Config=() +183:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=030fa97d19aab57ae9eb898fe101353fdc76bbc034d4574971c68ef254006c85_Device=CPU_Config=() +183:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=b077af9b63e937fc64589d3007372d5fb2e4accc392ea09889a2519e3885413d_Device=CPU_Config=() +183:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=e2d1f4fde3dc1889d4f86004173ea34a9d9836f645730727f5cdf90bc0738361_Device=CPU_Config=() +182:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6eeea9355df867c7fc97af81dae6d02799239ec1e480dc2c975a60761fc5f7be_Device=CPU_Config=() +182:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=b7aebb27d8d2b43e770ade887778c291072210b947b77b1b92e05d3327843977_Device=CPU_Config=() +182:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=992d8967c619d96c75985952485fcd79b943ac5e71c40457eafad4b71bf56a4a_Device=CPU_Config=() +182:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=952a43f4c368721e0c69418b71fe89982ef1eb2be0671653cb1200e34cb4bda3_Device=CPU_Config=() +182:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=6e73ec183893b70ec42a4393f3b1b7c55767a14f630eaab0c3e3b6d22c6b8e26_Device=CPU_Config=() +182:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=a16b5a0ea2fc8d89980db21cab743fbf776918ed2ed1f91f2e4d3ad3c304d4a4_Device=CPU_Config=() +182:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=63ba45deb14e56e09574bd3694e3d94caf6ab09f67f5278e6c299c6c924a3cf2_Device=CPU_Config=() +182:conformance_Transpose/ReadIRTest.Inference/Op=Transpose.1_Type=i64_Shape=static_IR=d4acbcb1930b26610eaa33c0bb8aa7fd866d8142afda9fd007226f0ee6fa5c36_Device=CPU_Config=() +182:conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=i64_Shape=static_IR=68115f3a18f8ea201078166547e9c2a8587a5bb37646adf6f90da976f7298386_Device=CPU_Config=() +182:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_Shape=dynamic_IR=45a9a897d75b175e3d805e74ec09322789564e0c0e8d9535724f262a9f534572_Device=CPU_Config=() +182:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=efbe921ab3c27a093f20ff704fd02e5c610e7507d94a2d2092379c5a99743380_Device=CPU_Config=() +182:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=86cd07b4db06e4210732553cace1797b55c19f590e2d9b7814eb30485d8599ef_Device=CPU_Config=() +182:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=i64_Shape=static_IR=e8fc48c9bceee1462572c6aa8c5afbda94a9d6e8760deea1c9a3d04d1d797fb5_Device=CPU_Config=() +182:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=4555fb7029260c7e46403e1fbc99a3815a94373b7b08d2408277976173facc37_Device=CPU_Config=() +182:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=23654f4a28ae697d81f49d72568e7f0657d5c15b82e173fd7381760ebcb61cda_Device=CPU_Config=() +182:conformance/OpImplCheckTest.checkPluginImplementation/Function=TopK_opset1_Device=CPU_Config=() +181:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e4be028a5a300682b79da2f015dd1c1b13381b38b19bb76951e1f26439173212_Device=CPU_Config=() +181:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c599f8f5de2a73e08727a5e27e2f77989b4c5ce9a5e70e6b98ce4c87e8aa26f5_Device=CPU_Config=() +181:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a9c40d7a1ada834400ffbdff779b9970c83bd576891dfa7f637182cadf9e9681_Device=CPU_Config=() +181:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=879bb4767167c3e9c45eacd08a14fb7e01b072864013784f924d62aad7b37c56_Device=CPU_Config=() +181:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=f2685b40efb789012e69252fa0fe30803c68be724a52dbcda9b2cb796138ea57_Device=CPU_Config=() +181:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=f2403b7d119fabadb1609250bbd0959aeef2cd68c62a4036657518ebfbcedf71_Device=CPU_Config=() +181:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=125693eeea442dd24dd812dd2eaf8d2154274f5975d68b0132d2bf9bedfe0ee8_Device=CPU_Config=() +181:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=c3e5791580edfc2b522c8a3aecd33445b3fa8d771e2b5a8387ef0f303773c848_Device=CPU_Config=() +181:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=2e38326f5f0527299a0385fc3bb6c85c80e12e5bce07fe530624aba7113e82a6_Device=CPU_Config=() +181:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=c359e1ea71a80fc519e8a2dacfc7f52f5a94a1142058641b0434f40866875c12_Device=CPU_Config=() +181:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=75aed7fbb8f7d7e8a1281d4a16c4fe2e55160dfb9e6a1bc446913a223c5aa0de_Device=CPU_Config=() +181:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=fc530f5b6bbe8f06808eeaba33889867e705fa69591d01da4dd3dee9515f323f_Device=CPU_Config=() +181:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=dynamic_IR=66df22ce11e7009aea35ba6a11b4294eda44815bf041eed0721499a3d2c484b1_Device=CPU_Config=() +181:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=acaf36c12445c608b306074ac4e2be9cfde2f5550905993d4b5bd1714dc96aaa_Device=CPU_Config=() +181:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=32537f045cce3d13cb28dd292a0ebe06e13002877d9ed2e5b25d3ebdf5afcb58_Device=CPU_Config=() +181:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=i64_Shape=static_IR=e8fc48c9bceee1462572c6aa8c5afbda94a9d6e8760deea1c9a3d04d1d797fb5_Device=CPU_Config=() +181:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=1e95665a92aa6efcc7e06d24fbe4cb2afa07d75374cea3ea928658a270ef489b_Device=CPU_Config=() +181:conformance/OpImplCheckTest.checkPluginImplementation/Function=Minimum_opset1_Device=CPU_Config=() +181:conformance/OpImplCheckTest.checkPluginImplementation/Function=GroupNormalization_opset12_Device=CPU_Config=() +180:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=cdc57df56ccf890a00f886c3b83f504d24ea9d4ed5f0ef05f1189879172777f8_Device=CPU_Config=() +180:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9beef927f57c512d381a87a35982fe4ca7a00b9a9d50ede54f7baecc5ec7fa0c_Device=CPU_Config=() +180:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=7340b50308272b86e1b98e6962ee280e9575fc0d7042b9cc076c530268e2ca74_Device=CPU_Config=() +180:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=be4634976e408d799217398ce693fe430fd46cdba6c78e01e9b824c208856128_Device=CPU_Config=() +180:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=78db1c0e2c0fd4f0d351e66ce9cd31f7a6ee804cd23bc686b8c9081125b7142e_Device=CPU_Config=() +180:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=506b15e531d5a643d3276fd84af8e10eb2a62ce20fe3aeda90c50cd7442e0a88_Device=CPU_Config=() +180:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=deaa5ef98e478a5850df528107031c9c7bfa6305bc7507325c91b98f9337b0b8_Device=CPU_Config=() +180:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=86decc829c047a5febe7e5d047c689075810441a2f4725088317ef68d6c31239_Device=CPU_Config=() +180:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=0832e6deae4ceb25b92cdfa532fb5d5fadfe7fd7a00b79f630ddb5bc011986ab_Device=CPU_Config=() +180:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=i64_Shape=static_IR=7e88dcf638caa6058b01dd6c31ba40efb0fca8077cc295ca63c2ebe4c7298926_Device=CPU_Config=() +180:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=b06553539d6e27195623fcbce51610b5671dd70700bcf61703a1f7a8bbc7c5d8_Device=CPU_Config=() +180:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=e1130d42d591780dd2a746ce7ff874a2bf4725ca9fd09803932ba4a7b0b389aa_Device=CPU_Config=() +180:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=d3155499ccf835bc57e4ca19c25ca32fc63ecede0a2c43ab2a3e43ba4a6a4dcc_Device=CPU_Config=() +180:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=7b8d235013affb9589d57a8f99b36858d739258b787cffc7cec85d1dca567261_Device=CPU_Config=() +180:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=593116ea16692c8f5a8994c0562c47e1c627f9088c519b752a635a7d91973085_Device=CPU_Config=() +180:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=059046ce67f6b09ef45aaad5724e28fdaaf40afb92613740fd058c974a120d3e_Device=CPU_Config=() +179:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d9771ac46751569172412bbd4495eccdbac435f78a97f8fdfffa9215faa74544_Device=CPU_Config=() +179:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4c794e0e6b27bbef5d21922537d8b23d0d2b5955622c1f5ee724a4d8faf2c86b_Device=CPU_Config=() +179:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=20450a7796284bbdcb011ce027d5c7260ed7dcdf07e4d39e48d99a2162eaae51_Device=CPU_Config=() +179:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=847ce287888e882e988cdd5bf41277c32c207e38215e1e7d41439890037216db_Device=CPU_Config=() +179:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=dcfe0aa2fab0afc3b370be59184a5e59c7bc0e8b2930bb671d1d6b38f55234ea_Device=CPU_Config=() +179:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=71c0c7e3176ae8b233352c89d47a61394cb46695e7879118ed02070a4a23d5e1_Device=CPU_Config=() +179:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=deada5d69a05cf27af659254f89b4e53e6685c517fdc2bb8a250cb5d4ba0a3dc_Device=CPU_Config=() +179:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i64_Shape=static_IR=5f8b64ad8dd9ccd202ae8d5080ce166fe9f47b909e803da49546dbffdfb4ab3d_Device=CPU_Config=() +179:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=static_IR=6a8fb5f2948de2436a33999ee2a01e239193c268f61634f1e80692b0c45aa3da_Device=CPU_Config=() +179:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=85d1eaa250a32acf89b675cc50f513ef3c7df50ed9d68f2cff2fc89db41b63f2_Device=CPU_Config=() +179:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=41a35ec8a58f581cb2558464a66077408e961b57821db604fe525d492d4f4fbb_Device=CPU_Config=() +179:conformance_Sigmoid/ReadIRTest.QueryModel/Op=Sigmoid.1_Type=f32_Shape=static_IR=936ac30f388261cb12776b5e94062a9b5f7b81aa16c9aa5d8f994b8d69231c40_Device=CPU_Config=() +179:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=static_IR=44dceb7343477ff50d3de4be1567a57a97d2e3c6f92b48fc93d20eea80487862_Device=CPU_Config=() +179:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=12965dd8a422975f08bb0fc707c666ad7ae2671d09c68757d534e3a1d67efd41_Device=CPU_Config=() +179:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=0b603f9cb63e722122080ea36f76fe45b25da83b0b1e213871140e82dea5f405_Device=CPU_Config=() +179:conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i32_Shape=static_IR=e34207bf06e51dbf322bc0db76f3a9828ae018b02dba2b1826ed97004bee8125_Device=CPU_Config=() +179:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=static_IR=ae7b6a45a538bb7f65d5895f2f7941fd9048645482faa40adb1f773e282a946c_Device=CPU_Config=() +179:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=c0eaf7f2465de396f92db5829a30b7d887dc26bc8d49b86f0fd0d688c7129e18_Device=CPU_Config=() +179:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a08cb2499595ed0de5c51e3b0feae24d9d5462d227572e771862564e1875b6ef_Device=CPU_Config=() +179:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=510b36fcb991c73abd98b488eff26715dde08a322b7b9429cd897dce6976dab9_Device=CPU_Config=() +179:conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=f3d704d4f0da6c58c39e279d727dd82fe0e59a41dbaf09a3cbaa8f591daf95f7_Device=CPU_Config=() +179:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=176c218db11ea18f367fdf98a3de14e9a9c65152bbcc39783c38772b37f6e9c2_Device=CPU_Config=() +179:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=static_IR=489201dc4d1a937b4387f1b7d01f75fa42ff02d7035d39ac6a7f56536b0d3a20_Device=CPU_Config=() +179:conformance/OpImplCheckTest.checkPluginImplementation/Function=ScatterUpdate_opset3_Device=CPU_Config=() +179:conformance/OpImplCheckTest.checkPluginImplementation/Function=AdaptiveMaxPool_opset8_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e0f4f91a6470af49c5e2497ae8fa917051879c18dd1e39cae18d159b697e8fec_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8eef79ab2081a12ed39f5c6f8f2e917d14685f54ccd0fcb0e19865740ca7d608_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=22dc864b06ef0c7deb8aecd74a26c7bcf75eee316288284413fb61381d79425f_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=1b46ce72aadab0dcf92991f242e971bbb36689e1bcafecc68d646aace43291ed_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=13f3d097d5e17c2add48d6f9b6f86454a1b521408d7fb8252e3638d9f17ea6fb_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=ca72f78cc4db6d46ce969f61c5bf707507ed8204785159e1ac5130e7aa251858_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=59bac5d30704b81def0385b29fb8d79e459a71b9251b4f6e94116524bd9aa7be_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=35d15ad61ee34c17abe50c4a67e568c2e253712c2d63cb828b0bccdb2175a6bf_Device=CPU_Config=() +178:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=b6e76f65817017d4cbe3504634568430a419a30e418a5febf75b89b566ca3631_Device=CPU_Config=() +178:conformance_Slice/ReadIRTest.Inference/Op=Slice.8_Type=i32_Shape=static_IR=e256f7acbc71e64cab857fb6378a035096c7ceebdd4f867b5140d35865cf6532_Device=CPU_Config=() +178:conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_Shape=static_IR=38f6cef69f6a7d9886b5d38902fb76e4ae41385fb3c95e229be4b44456ab2e87_Device=CPU_Config=() +178:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=56fb4fb30ec6fd9ddd0ff2e394434eb87546ac7de273f47b663252efa2a380be_Device=CPU_Config=() +178:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=427900d25144ee6b8cd4b35cd53c6e9335375018f6328dd01ae4db304846d991_Device=CPU_Config=() +178:conformance_Range/ReadIRTest.QueryModel/Op=Range.4_Type=i64_Shape=static_IR=9402d607ff481567bf322dcea9aa597387a195b9d3756ff46de81c3ac2737a49_Device=CPU_Config=() +178:conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_Shape=static_IR=ea8cc682a9a36cc61498573e967ec64d289af84a9e3da1911085b1de4fea4c82_Device=CPU_Config=() +178:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=065b3de2617f318d1376e9610f9fa1a2f2fc04292f9a7cc949780ae41d3539b4_Device=CPU_Config=() +178:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d12581f68d14d140f4b982b47b97000f6b666cd115483247d369fed87267556e_Device=CPU_Config=() +178:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i64_Shape=static_IR=0d6cc305ea05df2178e3b4ea61ba2f296655e77af08556491e0dc8dfd46bdc6f_Device=CPU_Config=() +178:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=510b36fcb991c73abd98b488eff26715dde08a322b7b9429cd897dce6976dab9_Device=CPU_Config=() +178:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=a35667a1c5401fb3102a59ce0fa67d0ea4829f8ce282c43767517ce025469bac_Device=CPU_Config=() +178:conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=static_IR=2001ebb8291c8bc8cd1db17c172f216cfb3994c57e344eef65565ea9f9cda1d7_Device=CPU_Config=() +177:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2ce1f8773e871f8aed0d3541cfafba0bb079e1765f04c1336af8a47f354cd766_Device=CPU_Config=() +177:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0e5b8f44656b680d14f7b7aa3293d8933ebfa82524d6acc09e41d38e8efda726_Device=CPU_Config=() +177:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=93a9a06d14c3b4d51891ff0e704c74dae5905db9b5de06d31379f33fa685c80c_Device=CPU_Config=() +177:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=319f74dd5b7a959d0e5443c76051fa5958463cd18ec11c275ef92b77321bb93c_Device=CPU_Config=() +177:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=2d153f841ff4b6825fe5b8399105916112addb79300aa00df85409c88fdd70ec_Device=CPU_Config=() +177:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=005e1b57ad768f4c8efb3116fe51bc85661c377e6632518b9172e8862d1c3edc_Device=CPU_Config=() +177:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=e1ea320702cf8065ce85c325507a199b95dc9ffce3fa715b4d8195ca67a5a374_Device=CPU_Config=() +177:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=c3ef1d1e09e7c0917298070d6909b455d5962c4bf3adf8d2d4c04f0741141f1f_Device=CPU_Config=() +177:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=e0641a7f9e64123d0d51a75e576fbd0e405105b8ead44a618068e77d2b4bf933_Device=CPU_Config=() +177:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b78ffc69401084763d529e2aee12f9b9793bc92be3eca3df2a97730b9a252ce3_Device=CPU_Config=() +177:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b15fd62115a849e0b5226ebe9162cda9371ad2783637a518f2a8724d24710253_Device=CPU_Config=() +177:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=6624c22e3b5d72c4e8d21df59af6f3759fa4d8fa68f2b5f3f92a98d6a943d0b4_Device=CPU_Config=() +177:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i64_Shape=static_IR=489201dc4d1a937b4387f1b7d01f75fa42ff02d7035d39ac6a7f56536b0d3a20_Device=CPU_Config=() +177:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=55c7f63e25ddf106ebdab6f4eab66f1be6950cf7a68abdb5b7e9a395d2fa6add_Device=CPU_Config=() +176:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b2931a4972ae4f946778af45cd5824e6958dcc1fc79cea4da1032590b2663d16_Device=CPU_Config=() +176:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=23f7f775455e615175f3122ce422ee96de019ca40fe603b5a4605d51f28210b1_Device=CPU_Config=() +176:conformance_SpaceToBatch/ReadIRTest.ImportExport/Op=SpaceToBatch.2_Type=f32_Shape=static_IR=8acd95619121cb22760fd92815b1ba85f541f282d3860e910f73036ed335a9ee_Device=CPU_Config=() +176:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=cb67c5d0b8712ebac00fe4169f0cad2e0a8c71d7f9603d5d2ce6ff6dd6bc055e_Device=CPU_Config=() +176:conformance_Einsum/ReadIRTest.Inference/Op=Einsum.7_Type=f32_Shape=static_IR=1c6cbe8477d09b0b193ddf9a453c1b6a8a79e3d1adcdf1c096709cee7a4866db_Device=CPU_Config=() +176:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=23ad83652d315aa08ee781b0fc81c0eb737265280c85a86a4f08cad71b33e74a_Device=CPU_Config=() +176:conformance/OpImplCheckTest.checkPluginImplementation/Function=EmbeddingSegmentsSum_opset3_Device=CPU_Config=() +175:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=49245e23b8c1c485428d0e490a687e48c541bfb833eb7838efd8c112736a076d_Device=CPU_Config=() +175:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=1f429cd9b96a0ae8b336e874e911d2cdb79820b76030c61de8a1c057a0c33168_Device=CPU_Config=() +175:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=cf334277b64fe023019fb7f007aae9ebf7432b733a1876d6cd61bce6a204e0dd_Device=CPU_Config=() +175:conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_Shape=static_IR=7065a836f4fd77a07431ecff6bcc591ef9b0160cb5366a8f3c8b8fe5f83f7be1_Device=CPU_Config=() +175:conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_Shape=static_IR=2b026a0d21a35251b07099e31ec58c459b848602575d2afa67e55830e8f3f411_Device=CPU_Config=() +175:conformance_SpaceToDepth/ReadIRTest.ImportExport/Op=SpaceToDepth.1_Type=f32_Shape=static_IR=9296c80cc93d8ab7448140ad2f31b3b47a0759c383d1bc045704985503732195_Device=CPU_Config=() +175:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=978c328422e3866322f3bdd52955690a47a1fdd47ddb9db66a4707b36a535dbf_Device=CPU_Config=() +175:conformance_Einsum/ReadIRTest.QueryModel/Op=Einsum.7_Type=f32_Shape=static_IR=810f13adb3f7342c7d514bec2aa3f20d7a59527b54c7f6954b038efb194c5ceb_Device=CPU_Config=() +175:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=68f6c55980c58f4d6de9e948d1c034b712cf74de509d8fd825fe7f7dfb11550f_Device=CPU_Config=() +175:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=6f2159bf315f062962fe87512c15ed5cacf09f898397a92b690c32caf147e50e_Device=CPU_Config=() +175:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=152333527a542f3e2228bac5d0fd4ed288dde9205632a318b9b22b64e43be329_Device=CPU_Config=() +174:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=eb33c5485ec10ae4f1268ab19db6a4ef86812d4c92680b43791274bb055e2220_Device=CPU_Config=() +174:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=94693638ec4742dea16dc168eb9323995f1b2a35a53f577cf58ac3a08096892d_Device=CPU_Config=() +174:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=f01fb29e8f5ddc7562e954e46b1d2bdbe6144d6bbe2ed2a0f16610f2812ac721_Device=CPU_Config=() +174:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=dbee34cd3b708559af1ceb5fcf89aac35add00fc1b9e3eda2beebb2d5b629fc1_Device=CPU_Config=() +174:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i64_Shape=dynamic_IR=b94b5361ee75b3684455c2b871b656a50c72e325564787c302a714f222845b26_Device=CPU_Config=() +174:conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i32_Shape=static_IR=8d3863956a8a6a5067c45d40ae0207b14b9f1736bdf2a5b8c01979fbc012a5e9_Device=CPU_Config=() +174:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=d36c5ab59d2ab873aa35b35a952e061568edd4ee8e64c1ab200bea63472a97b3_Device=CPU_Config=() +174:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=7201a55d869ac6072af38ff89dfac3cfd2e6720d25f7607c6cc5f80040a8e82a_Device=CPU_Config=() +174:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2bb16e2257294e3f7d905f66a483a8210f392ea822836e4edcf8910a7fbb4277_Device=CPU_Config=() +174:conformance_Exp/ReadIRTest.QueryModel/Op=Exp.1_Type=f32_Shape=static_IR=9416264710da7447d7e3bced32d5275e81b03a897ad99eed5291cc94ad77449b_Device=CPU_Config=() +174:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=c0c1a43608279d8870258be63005b38e23fe5501876c87840cc16a0bb2cf8dfe_Device=CPU_Config=() +174:conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=beb6b43d96ce20db13ecf6abc53742fdc20d2221ea66af01e3c945348acf9bd4_Device=CPU_Config=() +173:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=c9fa96695ebc82ee5e83b4cde8910e54ce09611f304f24fb6b3faa692a21c60f_Device=CPU_Config=() +173:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=1f6be1a43c786bfbf35baad6ff643b762e9d63c069c884a69b4ec6e89062ad7e_Device=CPU_Config=() +173:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=183e5203c7008618a9cfb2680265bb3f588f80c2493bf7fac92eb258e66da2cf_Device=CPU_Config=() +173:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=static_IR=168e02701204a8f0e325fa1a2a4407612df10c3218c9431981fa6f1f8300eec2_Device=CPU_Config=() +173:conformance_Power/ReadIRTest.Inference/Op=Power.1_Type=f32_Shape=static_IR=6837cea94eff6256c3c29807532662e123ccbffde1fcb6f75875d65aa7124a4b_Device=CPU_Config=() +173:conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=b434cd386e4c5e688aac8da3425d2ed0d72961223eaaa1cf2ff951a88a5fa001_Device=CPU_Config=() +173:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=d8432e7d31bcf4d95ff7ab845a6858ea67cf751c7ef0fca60a9bab1d187fe3cf_Device=CPU_Config=() +173:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=6745937b3d592b8cc1729ab2af1888ce58502379a33f0ae5d5a3eb0e70c0bc87_Device=CPU_Config=() +173:conformance_Exp/ReadIRTest.Inference/Op=Exp.1_Type=f32_Shape=static_IR=9416264710da7447d7e3bced32d5275e81b03a897ad99eed5291cc94ad77449b_Device=CPU_Config=() +172:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f83f2d7d9c08aaf30635b39b51c0d7f1f622b4624da59c6cbcdf28d42470f11d_Device=CPU_Config=() +172:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=86decc829c047a5febe7e5d047c689075810441a2f4725088317ef68d6c31239_Device=CPU_Config=() +172:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=07b4c5d05754987a0524385690d79f74988302f437597b7477770e8d062d72a0_Device=CPU_Config=() +172:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=8fc5ce53f1f6b924371ab2cf156ddbf7aea234b17befdcb6206ba51a7ad988c9_Device=CPU_Config=() +172:conformance_Tile/ReadIRTest.Inference/Op=Tile.1_Type=f32_Shape=static_IR=fdfd59e3d316eea2f9fc3c56664cf1a07603bb6e26d1b367987d5046526ac60e_Device=CPU_Config=() +172:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=i64_Shape=static_IR=c52cc9f84ee56b9ced415f830d9f251e52d1dc56a3cace6548b3d345d2b1e812_Device=CPU_Config=() +172:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i64_Shape=static_IR=41a35ec8a58f581cb2558464a66077408e961b57821db604fe525d492d4f4fbb_Device=CPU_Config=() +172:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=1d7cabddc96cb4ca2ed111c9f7a9c31b76ed9a052fd0b79db6bdc8fc55f24a4b_Device=CPU_Config=() +172:conformance_Sigmoid/ReadIRTest.QueryModel/Op=Sigmoid.1_Type=f32_Shape=static_IR=b6a75c5d2a686eae53cc25c6b107630b31a8a4d8c6514980ed1a97754f33bdcd_Device=CPU_Config=() +172:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=7744b2351d112ed761ebe0f43945f7dfd58fd2bfbd94bc5a4737549923caf4ed_Device=CPU_Config=() +172:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=e48a363cfdabe0b62509e21641bb1cc88edaaa7d2eb82bf3ce747cab8355ff3b_Device=CPU_Config=() +172:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=8de81b7de05bdd151427e1b5b03a8b4222284dafd31f9d4b1c3d0917995e9310_Device=CPU_Config=() +172:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=a58fb7847e59bb119656b143af0c6f65e29f8211034fe7aab03666cdb95d7fe1_Device=CPU_Config=() +172:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=66a4d902b67742a95e2d41d79b9d2434e57a55c168a88049624a0ccb62df9ca2_Device=CPU_Config=() +172:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=de3245d77d2e004bea85af29c91e1668ae1b6905fe2cdabb92711adbde6406a9_Device=CPU_Config=() +172:conformance/OpImplCheckTest.checkPluginImplementation/Function=NV12toBGR_opset8_Device=CPU_Config=() +172:conformance/OpImplCheckTest.checkPluginImplementation/Function=Floor_opset1_Device=CPU_Config=() +171:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c6b8f476c9b5cf1a102cb33d5e68033bb074a520d01e360ff46b3e479addf407_Device=CPU_Config=() +171:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8f622d39d560109549e99d37f3c9cb476f4d69e8525e7a0ad8fce6fe79a6f982_Device=CPU_Config=() +171:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=b08690e29e0249d5a6a30f2ad886ec714067df994bc4d8cbd82d0d02af6335bf_Device=CPU_Config=() +171:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=cb7ad9dd22a7bccd73ade4d4aa78f9a25cc2bb7f0c08a01064491200089b3718_Device=CPU_Config=() +171:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=c990afda81456723598f8f4085cb476376b1789d7f755e340e1d5498bcf02080_Device=CPU_Config=() +171:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=125693eeea442dd24dd812dd2eaf8d2154274f5975d68b0132d2bf9bedfe0ee8_Device=CPU_Config=() +171:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i64_Shape=static_IR=45bae87afb2c7e7f0b7315334e33b8a9baf42d81b95b844cb4987dd3540f1dff_Device=CPU_Config=() +171:conformance_Split/ReadIRTest.Inference/Op=Split.1_Type=f32_Shape=static_IR=7f806d6c4a0ff3515dd9a092fee2ab14a5f363fd5fbc7503d64a8cec4bb1cca3_Device=CPU_Config=() +171:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=6aff57130da7904e5d2300c4962f104d31c704872d5c33bbda4bb38efc34d563_Device=CPU_Config=() +171:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=a29bdaa31edbcf7b3dc392625c0aa0a27e827e1363d52519858c93defbf9ebac_Device=CPU_Config=() +171:conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=b9f9ac285915db9ef3e7437728695f2833d165757ffc81afb88242e7b471f434_Device=CPU_Config=() +171:conformance/OpImplCheckTest.checkPluginImplementation/Function=Asin_opset1_Device=CPU_Config=() +170:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9730f247ba4a13fb03274850f295de500156107d33db957188846fe49c2f4566_Device=CPU_Config=() +170:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4e6262ae12e4f9470a87cc4f1cc1ef2a817a8080e25a79ca4ef67cb60a558b41_Device=CPU_Config=() +170:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=18763287c1afb7684d3f74e91fbb8a8c17a13aa52908a5d97b6ad220c5c4f633_Device=CPU_Config=() +170:conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=i64_Shape=static_IR=7adee81cf21b942334c25378325f61e13e9ee3ac95ae004d4d9efceaab6c0949_Device=CPU_Config=() +170:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=51390fa8c8a5680ae4a8de4f655083caefbb8445dac8814d2b1827e2bd43f470_Device=CPU_Config=() +170:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=boolean_Shape=static_IR=4541365c567e68739f0733edba54e889f231026025e6866f805446392c575960_Device=CPU_Config=() +170:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i32_Shape=static_IR=21950c433f50ded0f662b9e0591e756a8dd685bc11a8296bcacc57ca1a4968b4_Device=CPU_Config=() +170:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=b574ee57274a9f27f6d0908cef2645c458983225e3cb82c455148e83337ee3ef_Device=CPU_Config=() +170:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=1c91956bf0130fd338f8f11cf76a08dcf5fe3c6c42239fa6a6aeb517eeabba36_Device=CPU_Config=() +170:conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=41ea59b807081adea7869609c65776a42f88079ec22180807905d5c2e8ca0777_Device=CPU_Config=() +170:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=87f3815fd73265960ef5910a3b03580b13e96d02784e159a0bf0ebc30bc911d5_Device=CPU_Config=() +170:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i64_Shape=static_IR=5b466c4e4b53a5ea739df517da47f0764f9e31197b7d30fd9dabf17d1b33a489_Device=CPU_Config=() +170:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i64_Shape=dynamic_IR=2058e018d32d8a73b2bf6471186e555c47e2c1a15ceb4131bacc43110bc17d30_Device=CPU_Config=() +170:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=beb6b43d96ce20db13ecf6abc53742fdc20d2221ea66af01e3c945348acf9bd4_Device=CPU_Config=() +170:conformance/OpImplCheckTest.checkPluginImplementation/Function=DepthToSpace_opset1_Device=CPU_Config=() +169:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a5e5b588f6223da1508413c42c21c3945994f492b039511b7ba2e576a052a52a_Device=CPU_Config=() +169:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=43d871d4b2b3346c08f8582b892ba0c0017d77688e16fd6d69f83f8101e12a69_Device=CPU_Config=() +169:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=c219261f655fdb1bcfbcc367ca8f6c4bdf0dc1fbeb7413343a3f0bdd74a70857_Device=CPU_Config=() +169:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=6adce7c66c1630295ec8938bcb429f20b628b0ceed938bf81ac0fca8580f8d34_Device=CPU_Config=() +169:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=43d0f2c259414c3e23105e2f5a13e8faaf322904d9b70ceb8a056bdb51677ef6_Device=CPU_Config=() +169:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=be4634976e408d799217398ce693fe430fd46cdba6c78e01e9b824c208856128_Device=CPU_Config=() +169:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=9a26ec9d1e0c4963016ff36986c79f5faed763ca5189215923d375e43c70a17c_Device=CPU_Config=() +169:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=4341385bd87226eb41493c667323e8c1c39817983a48025192209270750eed06_Device=CPU_Config=() +169:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=f32_Shape=static_IR=1e5127a9c21ad1ccabe67dd1f1e28a3730c09ba294ef1f9fc001c6dcd723ec62_Device=CPU_Config=() +169:conformance_Sqrt/ReadIRTest.Inference/Op=Sqrt.1_Type=f32_Shape=static_IR=33aa0d800115f94b07bce9c6ca8b4447f2c4f442bff77cb9b02b23d2ddabcc01_Device=CPU_Config=() +169:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=e2ab1cf295df4df47d43e632065bf8a48fa58e6f3a6d1bc971b45fe97a66652e_Device=CPU_Config=() +169:conformance_Maximum/ReadIRTest.Inference/Op=Maximum.1_Type=f32_Shape=static_IR=78239cbf0f8d473af2209ad3d9297e02208c110efa7af981f8c09ea7d7290032_Device=CPU_Config=() +169:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=6289232b1cbbafc963ac3cd787330c81a9cd02def9fefb83d6f6cced249de92f_Device=CPU_Config=() +169:conformance_GatherND/ReadIRTest.QueryModel/Op=GatherND.8_Type=i64_Shape=dynamic_IR=c1cd785825e1b2794d4bc74f6dc257e92a382e95a868a864125da70acc5cdbf4_Device=CPU_Config=() +169:conformance_FakeQuantize/ReadIRTest.QueryModel/Op=FakeQuantize.1_Type=f32_Shape=static_IR=848caca8b0b971d54e9c9b715b8bf35e0a33f1274d50a946384e64e5c0843a96_Device=CPU_Config=() +169:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=a6ce8e7d0cf79a4e800c911d6aec8f178a39642718eae3f8e9a89f7adc05dc64_Device=CPU_Config=() +168:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=89ed1d3c7fa6e15c01df3b792a183ade5b90edbb87886e1d58db075566b60a92_Device=CPU_Config=() +168:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=588ef4d887ae9d8ad432525108c81a9762dc27490a3e01d3e86795c73275148b_Device=CPU_Config=() +168:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=7e386c35d9d397e043876a23a2b9e5885964cee59bf46f1ae0660e6a84641ea4_Device=CPU_Config=() +168:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=817b3db8f96297276bc70f1b4854867cb92c164925c9dce59a1d054e3c315bee_Device=CPU_Config=() +168:conformance_ReduceMean/ReadIRTest.Inference/Op=ReduceMean.1_Type=f32_Shape=static_IR=33d84638f606d759354e190991899e47d2f4c63b0e378aac985e5fb9132dcd01_Device=CPU_Config=() +168:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=a8ca0b23e0a0f66247fc693c6a8982e4f7daa11e14da296db0dbc9277fcad4df_Device=CPU_Config=() +168:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=1409169e395a3eb90f9235b74f2f8c94e0e27a63fae33cda153d991ae1cbb68d_Device=CPU_Config=() +168:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=154d7358887845b8f2a661e79ef57318fa9499ee5c19b7cae461b6f798c57b36_Device=CPU_Config=() +168:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b53fa2c9b93d3750c17dfb8ef75e51c43881ee79fddc863d6c1c2adfeaeaba2e_Device=CPU_Config=() +168:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=4694d5512c7f6b97213ae6c93eb6f547e57922279edf34b94a8e45b7f6a9a980_Device=CPU_Config=() +168:conformance/OpImplCheckTest.checkPluginImplementation/Function=DetectionOutput_opset1_Device=CPU_Config=() +167:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=4a9237e5cd29f0d2d5e738891752c6f6b29c9dc4c29d130b9c9921ad5787f819_Device=CPU_Config=() +167:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=1bae1a851b5bf95921ad7666e48803dae416315a20a3ddbcc1c81243cb5bdede_Device=CPU_Config=() +167:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=69c87ccfa0080f65ed28b9a088343db5ceef524ae917b8e259b1865a017df22f_Device=CPU_Config=() +167:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=451a3b635d3882a87cc0d7b3f6f74197c08b708669751bb11fef93da9604e276_Device=CPU_Config=() +167:conformance_Equal/ReadIRTest.Inference/Op=Equal.1_Type=boolean_Shape=static_IR=9e166ed18be64949ce2451a1dc981381040fb109ee60e13a7f47308caac73e24_Device=CPU_Config=() +167:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=4fe6c9c924477957512c3d32086ca167fe5a4ddd5cd1b90d5d32452f6de8317e_Device=CPU_Config=() +167:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=9cb8bb36dacdb562fddf77e93890fba560c6cdf038921e057e21f3e5e458c88e_Device=CPU_Config=() +167:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=83cdc5670c74aa0db5a1c14e70c45552cdba1c9e1f4d55c83398ce51abf80393_Device=CPU_Config=() +167:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=58cd9ea3d8db317b6ff7fca55bebcbc6846aebdbe309b1b621f5535b18a70320_Device=CPU_Config=() +167:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=117fd22d36b97216edb2112c043ba97872b9b7915d7909dfc395406e8ad91e4d_Device=CPU_Config=() +167:conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=550d5d475e0f53be8506153a78626cd5a5c0a949b9bbd9e2fea96a4ba2f7b908_Device=CPU_Config=() +166:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=2e06088cb191d8d26309843b1285b9ae4a1eb0722e1370875edde7fd2783851b_Device=CPU_Config=() +166:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=i32_Shape=static_IR=b4f42a7d1252f2dd02b31ac7b0cf4ffcbd452dbf0e508833e7dc709ee04889c3_Device=CPU_Config=() +166:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=d683b96d525bc074d4f8c15934a5082a3fba1068b591f67e4b05d605fe5e6aa7_Device=CPU_Config=() +166:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.1_Type=i64_Shape=static_IR=36b9b7be1407243aad0792e7a49ef25f7c3e3791dc1ff93cad40480837ba87cf_Device=CPU_Config=() +166:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=9c32e47cd885805256c3e3053412f7d8c448762b4b509507f6e4dd78e2aeb56c_Device=CPU_Config=() +166:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=97181a6433949eaef7277fdfec4f8f94b27463ee3ed4a6aefc678fdaf7eab4db_Device=CPU_Config=() +166:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=dynamic_IR=a3add607f5e37633f3298794f8e32e409e3403666af3c0fc57c7d4427b714eca_Device=CPU_Config=() +166:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f4b78bee713f23abfda124ca92d58828eeab6118710d93572a491cfd85cd05b4_Device=CPU_Config=() +166:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=c3f8bb35f2f4473c563c3e5171a8fdc6f7a0ae20e4acde31a578bd20630952fa_Device=CPU_Config=() +166:conformance/OpImplCheckTest.checkPluginImplementation/Function=Sqrt_opset1_Device=CPU_Config=() +166:conformance/OpImplCheckTest.checkPluginImplementation/Function=ExtractImagePatches_opset3_Device=CPU_Config=() +165:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f977fc239a0230860702f8c1971bd424f10b978bb03937668c37edee6777f12b_Device=CPU_Config=() +165:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d962e7157ea216206d6c5b11fe5ef6ee162a1f7dc20f84a3b058e405c324a592_Device=CPU_Config=() +165:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c39e4c1d9cbf5b8730644e1686cc09f36f7e4a4b89cadaf8d8902fdb27993a7a_Device=CPU_Config=() +165:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=183e5203c7008618a9cfb2680265bb3f588f80c2493bf7fac92eb258e66da2cf_Device=CPU_Config=() +165:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=15197edec77da431c491f42f64e86a811d89a337bf44615824226425b1c64d28_Device=CPU_Config=() +165:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=a9d3d025df92369ee1f1a81fe676bb00d7d6cc488868e04d0e713fb9e42451a9_Device=CPU_Config=() +165:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=a6b95dd49e84f2860b57f1f1ab6fe2baa265bb757112e53def3004a360053aa8_Device=CPU_Config=() +165:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=77dbcc61a98e0bf3c1bdcbec543818a8a959751f10b8ec1489b66570ff4e634e_Device=CPU_Config=() +165:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_Shape=static_IR=38d935b0aa7266960b3d349b60c97bb15f535faed953fbe3ff24ae2560828f04_Device=CPU_Config=() +165:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=static_IR=0fcad2ddd1c7b81bf5e88ef4d4abb26a33326a37fb0cceb1205c1efd2a2d3615_Device=CPU_Config=() +165:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=a30154a78e0e565a598629670b87338d03582cbe4ed5547256634ddad7bc9d5c_Device=CPU_Config=() +165:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=f32_Shape=dynamic_IR=c838ac42d5464130a9049a63f7020166b34e2ef974c257a4060fa02c3b70ff76_Device=CPU_Config=() +165:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=5aaa81d6f07ed880b1e93a0fce7b6aab4c3c88bfb1b4b6cda4ead15eb145af63_Device=CPU_Config=() +165:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=c359e1ea71a80fc519e8a2dacfc7f52f5a94a1142058641b0434f40866875c12_Device=CPU_Config=() +165:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=36f17a498b10c140f8a319d82e5c8f2cc3cdb7eb3be9f82f7ef35d9c9470231d_Device=CPU_Config=() +165:conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=f5d3b4ec51e032e4df5dae00ecba1a3198c29cba96c72b8c89126c4638b715d3_Device=CPU_Config=() +165:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e7e10785757d3131ebc375ebfd83c556e2c34a72be20965d9dd3e4f24a5ee2f9_Device=CPU_Config=() +164:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=c2539b8a06e5dd0e01933c6861e366f8ed565e5956b8b2546647b55e966e7755_Device=CPU_Config=() +164:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=c6abba035865ece7c6c44b0284ab7c6b8f735bc1ad1f75a9ee3bae6ce26c58fa_Device=CPU_Config=() +164:conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=c52cc9f84ee56b9ced415f830d9f251e52d1dc56a3cace6548b3d345d2b1e812_Device=CPU_Config=() +164:conformance_Sqrt/ReadIRTest.QueryModel/Op=Sqrt.1_Type=f32_Shape=static_IR=ace54c326bc8255cd741eec12762e4d8f645fe93d50c037effce893745f8fdb5_Device=CPU_Config=() +164:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=e08e84b17997c1b1279429161d287720e4c7deb0e6d055539149bc577ed3b104_Device=CPU_Config=() +164:conformance_PriorBox/ReadIRTest.QueryModel/Op=PriorBox.1_Type=f32_Shape=static_IR=cedd3bc0f0a8e20fe947135bd6ab9515283275867e1b837d36f2fac72363f449_Device=CPU_Config=() +164:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0534fdfa97228a6aacf4ed196a9ace8e09d8e4decdcce058176b0312500b6c07_Device=CPU_Config=() +164:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=5980eb1b6c7a44c7812f89f10f0741e5925abda9ad07e1a82ae2a3310abae74a_Device=CPU_Config=() +164:conformance_BatchNormInference/ReadIRTest.QueryModel/Op=BatchNormInference.5_Type=f32_Shape=static_IR=c602b01c85ee95a1d7deb1498c5f0494a5ee727ce8874d5beded8bf33631d0b4_Device=CPU_Config=() +164:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=i64_Shape=dynamic_IR=edf223c654667e60869d97d2fb6a2bdf356db8d7e997b4b9a66e56445bc24f30_Device=CPU_Config=() +163:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=fd97b6aab7b86b0dd2f8c0ce622601e80f3b864d23d7d4f61d2dfa42195936b1_Device=CPU_Config=() +163:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6b86bf4f834b297dcb461acb5854aeb9783a381521ea1a8e1cf4fbeb60d6d09b_Device=CPU_Config=() +163:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=610a8f8c44b0e133d4b5684c37017859d06bb2251482eca0cdece0a1c216b936_Device=CPU_Config=() +163:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=47423c3e9443249e3dbbf58ee0f5b69b15e677f84de44ddb9d2851d1341dae96_Device=CPU_Config=() +163:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=2ad5b63ed56c3966570062970125d1cac16629595e9ac34c6613cf00d6dec0aa_Device=CPU_Config=() +163:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=69f4c39c3fb2dfc55714893e1e45761238e74bf28ecfadbee3f4965b5a379888_Device=CPU_Config=() +163:conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=f32_Shape=dynamic_IR=848de524e27e13a1e5b33e5db3cdf2710ba4566c3219a018e878f998c07dd718_Device=CPU_Config=() +163:conformance/OpImplCheckTest.checkPluginImplementation/Function=FakeQuantize_opset1_Device=CPU_Config=() +162:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9ea20be5797b5ab937555c69751a5be584c73a191b3fe3d6fb96a5665e26fcbb_Device=CPU_Config=() +162:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=77dbcc61a98e0bf3c1bdcbec543818a8a959751f10b8ec1489b66570ff4e634e_Device=CPU_Config=() +162:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=1f429cd9b96a0ae8b336e874e911d2cdb79820b76030c61de8a1c057a0c33168_Device=CPU_Config=() +162:conformance_Tanh/ReadIRTest.QueryModel/Op=Tanh.1_Type=f32_Shape=static_IR=591cc5abb16f22cfa720e53be695097b83c42a971536fb5b79d0b02cc4ad328b_Device=CPU_Config=() +162:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=1bde2f2a7294810531e23de80f25a451b3033487b5919c949b708b273dc3973c_Device=CPU_Config=() +162:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=508a961c358d40ddb6906694a24f87dc24f74cb4643aab58ee1d6fa28f099e6b_Device=CPU_Config=() +162:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=static_IR=0d6cc305ea05df2178e3b4ea61ba2f296655e77af08556491e0dc8dfd46bdc6f_Device=CPU_Config=() +162:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=98c0b6c23e4cf51a6069f306109ea2b4e181cfb8e552482cc0d0e63c61406933_Device=CPU_Config=() +162:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f49b212b59261888a5ea4652f9a4cdfe25657c7a0e4d3b6ecc16255e8d2e8cd5_Device=CPU_Config=() +162:conformance/OpImplCheckTest.checkPluginImplementation/Function=Cosh_opset1_Device=CPU_Config=() +161:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f99caac2fbfafe61a686cc29c0df0779eae1a0a1826f5bcb820048ec3c148207_Device=CPU_Config=() +161:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2b59c9f67435c46699dc1c66ee7ddbdd333bfa544d0aef7bd1389db2635868c7_Device=CPU_Config=() +161:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=f66bbeb796e4da5d462ef573e38fe52db5bdaf2367b2a07aeedae6ce33c6704f_Device=CPU_Config=() +161:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=445a2c47e85b116d03e5f6fe43863a39778b78ca5175fba1bb0eec669f7610cf_Device=CPU_Config=() +161:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=0f3e035b6571da159267ff1f89b5f2b2d3bbd599760dc5d5721a1fb2ab2ea75d_Device=CPU_Config=() +161:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=c0c3d43548fe20fc4e63bcfc8ee6d0a70a6076dfc0ee79e31fdcecf6cf35921c_Device=CPU_Config=() +161:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=0832e6deae4ceb25b92cdfa532fb5d5fadfe7fd7a00b79f630ddb5bc011986ab_Device=CPU_Config=() +161:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=boolean_Shape=static_IR=4541365c567e68739f0733edba54e889f231026025e6866f805446392c575960_Device=CPU_Config=() +161:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.1_Type=i64_Shape=static_IR=26d97c755f660ed8ee08a0de8d6ab88598391cc79b239bfaf0a102722ffc4bf7_Device=CPU_Config=() +161:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=6e8dbb054c99609e5aedd642130e867c22091118e0bb7ddd870a66dcfd11452f_Device=CPU_Config=() +161:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=8457db0b4ea6829aad99afe4c31b7004b57daef4cd0ae02ca00090cbe5feb72d_Device=CPU_Config=() +161:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d8574c324ded923f1ea3ab0d8e09c626f3e8a04efe08258b665539c639b7958b_Device=CPU_Config=() +161:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=453c1f5bb6c2e9c81a04475c49696c6e9e94f77853ef961e1839b541de7c7e21_Device=CPU_Config=() +161:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f6f3c6d199a224ee983f6905aa4f72ea4138e6076d7307c72588dda0cc9c6ed1_Device=CPU_Config=() +161:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=439308ddb64edf02f96ade09e7888cf89f422fbdb8c8242521ecc3f93e61bdd7_Device=CPU_Config=() +161:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=0256d48640841a9233553afa85e34dca797e6b5eedbd772f606c1a0e6f8e91a1_Device=CPU_Config=() +161:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceL2_opset4_Device=CPU_Config=() +161:conformance/OpImplCheckTest.checkPluginImplementation/Function=LogicalOr_opset1_Device=CPU_Config=() +161:conformance/OpImplCheckTest.checkPluginImplementation/Function=Bucketize_opset3_Device=CPU_Config=() +160:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e86061c75b7e9a65644e82de6b8fb2a532ebdfb302f46f378b6ff20af8d1d14b_Device=CPU_Config=() +160:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a956d2fb1fd17e2d864b3eaa8915cc0c4f9a768e35fdf5bf20cf6bc7f41aa130_Device=CPU_Config=() +160:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=526afcc4dff58aaa019466b0440b94dbd2d5f14c060d47b8ec40183deafecd83_Device=CPU_Config=() +160:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=8bc8753f4d26c5d1f2ea481937dcce0f5b78971f18f5ebb258f49d4a0d86a333_Device=CPU_Config=() +160:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=8832b317ba58dd0efd1e8fa5238d35644d8468a03c9b35809a20ae64098dc986_Device=CPU_Config=() +160:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=51390fa8c8a5680ae4a8de4f655083caefbb8445dac8814d2b1827e2bd43f470_Device=CPU_Config=() +160:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i32_Shape=static_IR=1942042c790c3fc6053ad91fa5e45f8ebf3c11bff7e3427a71b8fdc1bc5db053_Device=CPU_Config=() +160:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=i64_Shape=static_IR=c52cc9f84ee56b9ced415f830d9f251e52d1dc56a3cace6548b3d345d2b1e812_Device=CPU_Config=() +160:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=f32_Shape=static_IR=2f23f1158754aa494abbf61ab15118173a7ccfe90523b2b9ab7cc3a6fdaa0e37_Device=CPU_Config=() +160:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=b94d2ed6a2b113922805a69578ec5ba2ba3d8f0ea46ca37f095b4ccc94d76b77_Device=CPU_Config=() +160:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i64_Shape=static_IR=5d791fd5b82a74a42073567349728035c4ac52ea64c1a154a73bd4e61d1b42dd_Device=CPU_Config=() +160:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=0bbbd97c4428b9565666e9a1e56acc70035b378e16abafc54559a155583d9e6b_Device=CPU_Config=() +160:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=d9231cf5e3e491e318f16514e771cfdee4b781b42fc9d45088da850ab48079cc_Device=CPU_Config=() +160:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=a9b0552d84d057a656080c8e302afa30962dc02105abe7136cfd77f0433eec18_Device=CPU_Config=() +160:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=b61800abac107b248c29df7ba04a73c91d490782b1da46164c1b7d2f8cec3cdf_Device=CPU_Config=() +160:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=f42d85c8e1388cf2cb69f9efb2970255c6535f1c3f904a9b08cc18cbea6aa6c3_Device=CPU_Config=() +160:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0ac57f7cc81a683585f810885288fdaa174de2497d00156b85e067653aad3a56_Device=CPU_Config=() +159:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f89a1dfd0ef8b50a998962d5a4f4b54451ea4c533476a2e3d42c04e9e645afaa_Device=CPU_Config=() +159:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=eb98c3593d72ffaa01de42caf4832854d9486b4148c57742c6dd72a251f8cb45_Device=CPU_Config=() +159:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c14aca93b401d9d2325a5396c1489e1fa29aaa57f592cd2b4e6792ba5af90a90_Device=CPU_Config=() +159:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=348254d0e2b145f9e5443b4d4470b2ab29487acbb34a71285a5c0e1bd29cb942_Device=CPU_Config=() +159:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=5f43b4d027388fff204c9c64df9f62bd2a72034143bd655e45121ca886c5d15a_Device=CPU_Config=() +159:conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f99a212a117855e6e2dc4a338444a8ecee441f989638f7a0700ce24e037d29e3_Device=CPU_Config=() +159:conformance_ScatterElementsUpdate/ReadIRTest.QueryModel/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=3df69301c7a4d857a546a30a0d76674c52e3abd819d644ec036636eb7cb92fc1_Device=CPU_Config=() +159:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=3c7c072c9e4ee694e049a5f256cf0e72caf85384291ee8d399ce136d22c575a3_Device=CPU_Config=() +159:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=99183013393075553f5cd30818ccd603ff5d3e9e71dd8f42ced0df2377280729_Device=CPU_Config=() +159:conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=10cf1b7a4de1231ad721c9660697d6ee17bcaa2151f08eef596b41e6e3aa1b2f_Device=CPU_Config=() +159:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i64_Shape=dynamic_IR=e255ef2321233444ce6e4fdeb513a9b271987457aa9bd456948b64f589de1e2b_Device=CPU_Config=() +159:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=453c1f5bb6c2e9c81a04475c49696c6e9e94f77853ef961e1839b541de7c7e21_Device=CPU_Config=() +159:conformance_Abs/ReadIRTest.QueryModel/Op=Abs.1_Type=f32_Shape=static_IR=5713be8dd761def00c701c74d0aa913d259206eff1103b9fa6de0f6f1a25e566_Device=CPU_Config=() +159:conformance/OpImplCheckTest.checkPluginImplementation/Function=RandomUniform_opset8_Device=CPU_Config=() +158:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f7cf7cbc88dec99af8d35e65e926745ad318706c454b90740a19589285733fe9_Device=CPU_Config=() +158:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f0c4dee4dcd8f03dd599ae04d7dd6ccfafc4d900d052a62f232a5507ffc006f0_Device=CPU_Config=() +158:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=eaac9340f5625cd59856645684fd84a5f1f0703dd3748eb85fdff2eedd8ee64a_Device=CPU_Config=() +158:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=6e1207753b08d53b18c551ad07a245243197370051be78218db028f3d3b835a5_Device=CPU_Config=() +158:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=static_IR=8b759b2f1999be207aeb39763bde3eba4aee028e9369a86a87493ff86f3fa014_Device=CPU_Config=() +158:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=dd6dc9060efbe22735c22c69f0323c7e6a77a30cfbaae7b79670b9b26fb2be70_Device=CPU_Config=() +158:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=978c328422e3866322f3bdd52955690a47a1fdd47ddb9db66a4707b36a535dbf_Device=CPU_Config=() +158:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=98c0b6c23e4cf51a6069f306109ea2b4e181cfb8e552482cc0d0e63c61406933_Device=CPU_Config=() +158:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=0b0005b038a938c698489da595fd89a45d2f685c831bc172d81b2afc09658dae_Device=CPU_Config=() +158:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=c1852c534b8b95bf1a9aa2771decf2368fa095c5f5688d38ab9ce0bd86152a19_Device=CPU_Config=() +157:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f1f52703006b7d81ccadfa1c54db42d8b19ac7b8beb3ee88f2d7252170358d90_Device=CPU_Config=() +157:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9aac77567d944de6632688fd3de80c0b3da1ee741da639897c2104d3121d690b_Device=CPU_Config=() +157:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=57b104f3a1796c31d59d676d9f6d65789ed72fb21beb382bf418c452b8452d27_Device=CPU_Config=() +157:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=9bae5a53011ecba6327961e6496f3312134c81e148523434968c3c56b5e0c491_Device=CPU_Config=() +157:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i64_Shape=static_IR=725aaeceedd7eba9be6ba4203e31cead733ed80dbafc33e902465d4338dc8f4c_Device=CPU_Config=() +157:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=1b13b40884ddc8a2afdfc9bf351627746534303122dd4e0c2c5fdeace9e89e7c_Device=CPU_Config=() +157:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=f73224b14c094974e582d3d903cc332f5c1da138368692e5d0be93127f1bf753_Device=CPU_Config=() +157:conformance_Exp/ReadIRTest.ImportExport/Op=Exp.1_Type=f32_Shape=static_IR=9416264710da7447d7e3bced32d5275e81b03a897ad99eed5291cc94ad77449b_Device=CPU_Config=() +157:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=e0641a7f9e64123d0d51a75e576fbd0e405105b8ead44a618068e77d2b4bf933_Device=CPU_Config=() +157:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=a0cee5b220a433f1d76460a1f452bfc26aae12f7b84983a063605b4a8cd0a5d4_Device=CPU_Config=() +157:conformance_CumSum/ReadIRTest.Inference/Op=CumSum.3_Type=f32_Shape=static_IR=d517f63a168293380a1f066313e6a2bacef9eddf961ce164f11ce2609a632b3a_Device=CPU_Config=() +157:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=d34bccebe88a4093c9810d56088e4bf07b55bdab1801d7d830360aea1be22499_Device=CPU_Config=() +157:conformance/OpImplCheckTest.checkPluginImplementation/Function=Squeeze_opset1_Device=CPU_Config=() +157:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReverseSequence_opset1_Device=CPU_Config=() +156:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c08b3d30c1b4f1b5456e4791d4d7fab1d21f743dff0dac1ae5d09abc6764fca8_Device=CPU_Config=() +156:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=582f7347a93cb2c9e51ade6c405ff25b23d009bdcd3d7a3c49902e627a041252_Device=CPU_Config=() +156:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=09d4b4ea324f91ba6006bad4c82ca08e723c83c1b862d8075475e986696220da_Device=CPU_Config=() +156:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=dynamic_IR=24920893b72e3bdf88b7e4142d1dd9ae0a679f686a3b187bf740f014d04b9ade_Device=CPU_Config=() +156:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=d5f5f2d39bfe4ccc6f12f76e5eca8e2e40ac7ac6c5f38a7cac21970df213d4cc_Device=CPU_Config=() +156:conformance_Squeeze/ReadIRTest.Inference/Op=Squeeze.1_Type=i32_Shape=static_IR=5224ffd078708e8917b14b4118bc4a42317c123dc0a5dca8234ad73d44daf845_Device=CPU_Config=() +156:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=d9eeac72636735d7541c2d0ef14ebfc7d4a1b3598c08c136a9123b2ed89e13ef_Device=CPU_Config=() +156:conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=ea71ab322d6f3d74b0a7bdc3ff5dfd322f2d8c518a1fb5bc9960c5e04808f28e_Device=CPU_Config=() +156:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=a35667a1c5401fb3102a59ce0fa67d0ea4829f8ce282c43767517ce025469bac_Device=CPU_Config=() +156:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=bb5cb4e2a8cb9be32332ed3255c99de478d8d2e31cfb1747aa322df438ebaa49_Device=CPU_Config=() +156:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f0853773e26eae3d051504ed8db7f182c0e90ef7b45625a1a72ac51a73e2208a_Device=CPU_Config=() +156:conformance/OpImplCheckTest.checkPluginImplementation/Function=TopK_opset3_Device=CPU_Config=() +156:conformance/OpImplCheckTest.checkPluginImplementation/Function=LSTMCell_opset4_Device=CPU_Config=() +156:conformance/OpImplCheckTest.checkPluginImplementation/Function=CTCLoss_opset4_Device=CPU_Config=() +155:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f9f701a7d26d77a2b1eb3cc822efb5da95f1edbe614469f725a381ce892d8d91_Device=CPU_Config=() +155:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e5249d5630503351688090f1a9d0143b02e750045924aee8f9003072446583f4_Device=CPU_Config=() +155:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=429b91023f3ae9a323e40ed372fc29926fcd6aa7a8e77e4ddaaf68fa648c43b7_Device=CPU_Config=() +155:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=8de274a65748ff76511a53b614cfb33651d2b51720714851a16976fc1ee2b6ea_Device=CPU_Config=() +155:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=134ff6b704123c583b694d7023c99cbcfd10a1afc48819ef35b46dc4d0bca500_Device=CPU_Config=() +155:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=c30414e8e845d75527c26f62880518cc4d24c1a528b20cefc3b2c32be7436c81_Device=CPU_Config=() +155:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=i64_Shape=dynamic_IR=c117722add2db4a6eee4dc2fbfb99174911d54eb3896c65097d31d656fdee639_Device=CPU_Config=() +155:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=a94e0bbcae35d7cb33efba2c6df3275f7bca8520ddb37eeeab81829906fc8964_Device=CPU_Config=() +155:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=f0472c0e5ff8fb82651424269bd9f77e73eff6c43c70b6192f07303c0d35db8e_Device=CPU_Config=() +155:conformance_Greater/ReadIRTest.ImportExport/Op=Greater.1_Type=boolean_Shape=static_IR=dce38966c13ac9886c7480261e3483d822355a9bf3835d00795e7627744a60d7_Device=CPU_Config=() +155:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=5498e895212b57b42748644679c1dd67936f230d2c61998ca6bee31d527035cc_Device=CPU_Config=() +155:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=99e405218c1a96c5f8af65aa814893d8958e8e991d1ed8dbbbd586efa589df39_Device=CPU_Config=() +155:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=09683cb2a0a44acb804a2791ca93bf004bfc3882c11af94ea67a9fc1eb1e5052_Device=CPU_Config=() +155:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=2c114b0035075d866c028f9a1168725375feac9a666a881ae6b7db6e9066bb3f_Device=CPU_Config=() +154:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=e8df0b3ab9e127c1d37881f4c250ca0fd0dd2ec822cd24bf95e7860484fe9b8a_Device=CPU_Config=() +154:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=874c0fa19029457645c4cff20769f66ba7aaa1a35ade84c948f83aaa9c1ead19_Device=CPU_Config=() +154:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=f32_Shape=static_IR=f2df871f255156043f03f34333d59d9213fd52ea24f69dda1b04888ed269acad_Device=CPU_Config=() +154:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=823c1bd1ce8ee0ae28410bcea9f3c33ef9f9271e8f41f0871a7d6eb6b2850757_Device=CPU_Config=() +154:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=6745937b3d592b8cc1729ab2af1888ce58502379a33f0ae5d5a3eb0e70c0bc87_Device=CPU_Config=() +154:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=a6ce8e7d0cf79a4e800c911d6aec8f178a39642718eae3f8e9a89f7adc05dc64_Device=CPU_Config=() +154:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=474e4bfe52232239280bbe4e2d2aed15cf69c7ec8db86b010084c6e68a8d0e1d_Device=CPU_Config=() +154:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=14f4dcbc8e714fdb791d15b62646db0da2cf647d431dd6ea044ca6976ef51753_Device=CPU_Config=() +153:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=37f1a0a9bb9b948ed78217a65a5a2de7f0234b1e000fe5ee11ede68767240f1b_Device=CPU_Config=() +153:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=static_IR=2e38326f5f0527299a0385fc3bb6c85c80e12e5bce07fe530624aba7113e82a6_Device=CPU_Config=() +153:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=i64_Shape=static_IR=c7a696f3217515ef4ff5eb46fbd15af6533f0fcd268398fbd434f105c0a11328_Device=CPU_Config=() +153:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=e1ea320702cf8065ce85c325507a199b95dc9ffce3fa715b4d8195ca67a5a374_Device=CPU_Config=() +153:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=b459cd78b41e36a6c3823301811fd3322a77f802ffc3399eefdfd8ffa4ce6e6c_Device=CPU_Config=() +153:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e0293184207036f6016f557f8df813c6536b18332f589245c5c606a3b36df1e4_Device=CPU_Config=() +153:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=053d601716750db93af5ae01d67213086ed987370f9ff59723824dcd0a6c2462_Device=CPU_Config=() +153:conformance/OpImplCheckTest.checkPluginImplementation/Function=Range_opset4_Device=CPU_Config=() +153:conformance/OpImplCheckTest.checkPluginImplementation/Function=Multiply_opset1_Device=CPU_Config=() +153:conformance/OpImplCheckTest.checkPluginImplementation/Function=MatMul_opset1_Device=CPU_Config=() +153:conformance/OpImplCheckTest.checkPluginImplementation/Function=I420toBGR_opset8_Device=CPU_Config=() +152:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=cbd851b8c4e89bce3a20b8795b3bc5a0105d26e252a4674541ff630496144aaa_Device=CPU_Config=() +152:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a9311932565e68fff052e15c1a0522e1c09270d06521541ca28b67c34184b1c5_Device=CPU_Config=() +152:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=ba15b8b85609531d91c7809eb90c3a0079d19d36b83c8767306cb276c9d67ace_Device=CPU_Config=() +152:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=9c1e1b695646ea4f56a87b7e5a815c12856f718920e01e86ed78f2dcaf896a37_Device=CPU_Config=() +152:conformance_Tile/ReadIRTest.Inference/Op=Tile.1_Type=i32_Shape=static_IR=75f0349e33d0151c276e3f5ce34f7c1a71f5572331157b2e34f889773d7d8754_Device=CPU_Config=() +152:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i32_Shape=static_IR=aa757ffed4165beb3074da6ad09422d7823a1d0d6c8a654adc56343d0e43dc66_Device=CPU_Config=() +152:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=33d8f6d258ae8dfd09b8e6fd39f0e74384eabfb685e0e72a3c798101ea56a1d2_Device=CPU_Config=() +152:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=c5ce2b16d47cf93b073c2ba13556fa9fdd1b6f1dbe6387a50b507a40ab1d1c1e_Device=CPU_Config=() +152:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=d8546655166c322e3049ed3a71725c8e89901212007c44c8029ef8379de96db6_Device=CPU_Config=() +152:conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=80cdfe1797800671134d77fa9c7032cdc1b19b4905fcefb11399610216f6e623_Device=CPU_Config=() +152:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=19a94fc5cfe3ab1b4e169b342ec8d9f0fdc4ef19484c8c34d6ab938c6e7bf5fd_Device=CPU_Config=() +152:conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_Shape=static_IR=9e166ed18be64949ce2451a1dc981381040fb109ee60e13a7f47308caac73e24_Device=CPU_Config=() +152:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=08ba7fbf736896f373ea81dd727940aefae22a39e217e84dfc5617ed62133d10_Device=CPU_Config=() +152:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=c3c821b46d994404c55856237eb70534cff33687df2bde0a86d0bcc9f20878eb_Device=CPU_Config=() +152:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=7f30f8f46d999a18110b8f8f9235b3534249be45e55f1aacb419126ed1eb5851_Device=CPU_Config=() +152:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=4fb0809c5cf2945a097d18f445de6f4f5cd2c124cdb495e6f0a12e9d937e2b80_Device=CPU_Config=() +151:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=3888863c4f725445490846561b2aef4a5498ef1583903b365fb864e387eb9641_Device=CPU_Config=() +151:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0aa7024ee856fc832b1e639fbed60e1382c8e1b84f7cf2d33447f4bbd9ce75ec_Device=CPU_Config=() +151:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=f2eb693da69b0ad1af3bcef6c4af46ba2b92897f76989c310a65aac5c2027725_Device=CPU_Config=() +151:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=boolean_Shape=static_IR=474c6273d1e48e8e5885966dc93629ad413683ad942e3101452c1a58fb5b5af9_Device=CPU_Config=() +151:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=139cc84777f1e0d489245d058877303e72a93eba3cffbf5f919de21b4514bb0d_Device=CPU_Config=() +151:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=6745937b3d592b8cc1729ab2af1888ce58502379a33f0ae5d5a3eb0e70c0bc87_Device=CPU_Config=() +151:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=25ae6295f4d206fa9069e20bc659dbd87c20aaa15c3f149ab25d003641c738c5_Device=CPU_Config=() +151:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=80e0900e8270dfbd0fc879ad4d065847f767cff9399688bb9e5e03b8531c554e_Device=CPU_Config=() +151:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=9b64733aa0a8994cb3695a7c26f905f4d2b86c2e157edbd8a9970d33970a4015_Device=CPU_Config=() +151:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=008176749f0b2cb46830abe910865d8cf1974cd62902ce3e157a03df2b1cf9c3_Device=CPU_Config=() +150:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9f8fca1ab855d5a71d7acabdefda202e270bf16b559fd581f9e663caa301ffd7_Device=CPU_Config=() +150:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8ad9f9e5cb26eb922d7d7d80f93be2e9d3a5ef344a013c9dd546df2ef195ec24_Device=CPU_Config=() +150:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=21534d0488c3f7c8bd40bc81476832e866000c97ee6892359826c7877905d733_Device=CPU_Config=() +150:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=bea169459345470ab5d89e5ae9a8b67d6e9401caf7dc35f5060805152e20d6cf_Device=CPU_Config=() +150:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=1fe78f5a88510b70fb39ed088e1418ae09155d179afc3a614a641b4e8f86574f_Device=CPU_Config=() +150:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_Shape=static_IR=e6ee69f681f9388da19dc9c17781710c5622ecda436aa2d4b018578548acebc7_Device=CPU_Config=() +150:conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=d9eeac72636735d7541c2d0ef14ebfc7d4a1b3598c08c136a9123b2ed89e13ef_Device=CPU_Config=() +150:conformance_Relu/ReadIRTest.QueryModel/Op=Relu.1_Type=f32_Shape=dynamic_IR=43ceadf05184954dd8697d4f737de323ec2ee75f93e0d33d60dab2acc995f3b6_Device=CPU_Config=() +150:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=f18fa21106120cecd81f50d635b1c42cbd641877ffbf78e746ef7375ff546d7d_Device=CPU_Config=() +150:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i64_Shape=static_IR=a2450d07c12669e586815e60d9a2b568f88a49c9b63730c898b9eae907b5ec4a_Device=CPU_Config=() +150:conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_Shape=static_IR=906676850a62f56935dbd13792be1013db602488f29eb757a546b411699ccdd5_Device=CPU_Config=() +150:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=a6f5b58b1d85e5a99389748ae14e507302127e583c436dd9e6015d3c33ab0536_Device=CPU_Config=() +150:conformance/OpImplCheckTest.checkPluginImplementation/Function=NMSRotated_opset13_Device=CPU_Config=() +149:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=fc8b85b03281a7e8532a130a70fcfce5b6c40b1c8863eaea3910013a0bc4e769_Device=CPU_Config=() +149:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=883597c2c4e004b0ec1e1ca8d1b75395c714fc6a99cd31e35ca0597d0ccd8f8f_Device=CPU_Config=() +149:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=i64_Shape=static_IR=7e88dcf638caa6058b01dd6c31ba40efb0fca8077cc295ca63c2ebe4c7298926_Device=CPU_Config=() +149:conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=9f19f32ddff44c1c8f7dc3b9b244a9191a15fef9874e016666fe6a817937f699_Device=CPU_Config=() +149:conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=aed658319c31cdb1d3a47a2a93c7a4f524d9af8540e2019af10e8e1cebc3c2bc_Device=CPU_Config=() +149:conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=static_IR=81eb5381e1d4d3dc7cf0d83a9cd787813d3267c99b31cc9a3cb0cf9b01727c0e_Device=CPU_Config=() +149:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=d7ce9fd9d99a7ce9ebb5fdadb4db39f4ea66f74788704b2b9f96660c7403c031_Device=CPU_Config=() +149:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=5adf6fcb72c0d6086a95fbbc5744e7d02dfb32490e0f42c62b57bc98489b801c_Device=CPU_Config=() +149:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=static_IR=586094b4ff6617c08c87a53c7be1ca26aae40657c8d964d81eda731dbb27e848_Device=CPU_Config=() +148:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=6a9a72aca963de945d97658e484453191cf6af26cd6838c1603299aff3a49a8c_Device=CPU_Config=() +148:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=16b3235d5271e534a1bc725f80e2bfcb837a1c6f144bcfe8211a3e5359644441_Device=CPU_Config=() +148:conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=41d80c815a196293f7d22af59f5f602f7e4f11e06208a693b19743fb796b98a8_Device=CPU_Config=() +148:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=8f7dc81bfce05ce39b694fe48197a4fd2aa7933c7061508be3b9dfefef518f75_Device=CPU_Config=() +148:conformance_SoftPlus/ReadIRTest.QueryModel/Op=SoftPlus.4_Type=f32_Shape=static_IR=443141d6914003828f76ac1de39cff68ee8ae96b2524fc41e9f5f95707b834b0_Device=CPU_Config=() +148:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=7685da6dcf91a208b72d5961c2c248d816de501366163d61b1ee3c148787fe77_Device=CPU_Config=() +148:conformance_Range/ReadIRTest.QueryModel/Op=Range.4_Type=i32_Shape=static_IR=8d3863956a8a6a5067c45d40ae0207b14b9f1736bdf2a5b8c01979fbc012a5e9_Device=CPU_Config=() +148:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=3effc90c24c0eb76bbc89809d34c6541654366a02e21378a668dd932a6cc7756_Device=CPU_Config=() +148:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=52ee24baa14f302174ce3b13a119ccb6a54994413daa1f052a75464528b07088_Device=CPU_Config=() +148:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f69e74dc680137ec5ef0b63e38d451da7bf1b61d2acabab77df46b76c9777402_Device=CPU_Config=() +148:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=577ff3f9c8d226d1899056073c0223ae2d81dcc940c5fef8b9ce9cf63931e9e2_Device=CPU_Config=() +148:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=43aed1509066aa7c839a82c9865228ce3ebdfbe519061649807875ec6e86d715_Device=CPU_Config=() +148:conformance/OpImplCheckTest.checkPluginImplementation/Function=HSwish_opset4_Device=CPU_Config=() +147:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=cc3619fbe03f9b98ff07babc5c11f9bd9f26927c8d793abc7188595145bd1371_Device=CPU_Config=() +147:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c1923c409aa2da9da8daf339b8b26be9ec6a106e65098182015c21881b0b5379_Device=CPU_Config=() +147:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=8b55c14423b60f30029c68c603417fb98119c5922e2827c60c99edc05ea813e1_Device=CPU_Config=() +147:conformance_Tanh/ReadIRTest.Inference/Op=Tanh.1_Type=f32_Shape=static_IR=591cc5abb16f22cfa720e53be695097b83c42a971536fb5b79d0b02cc4ad328b_Device=CPU_Config=() +147:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=i64_Shape=static_IR=38f6cef69f6a7d9886b5d38902fb76e4ae41385fb3c95e229be4b44456ab2e87_Device=CPU_Config=() +147:conformance_ReverseSequence/ReadIRTest.ImportExport/Op=ReverseSequence.1_Type=f32_Shape=static_IR=a5cc0793d73f7f76fc02b5ae04ef2a29bf212ce5c59f9bbef91e0aa5ee17785c_Device=CPU_Config=() +147:conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=97a94ab826d2992013df32a4f93f6adbc38ad17a26503005046f68904adf53d1_Device=CPU_Config=() +147:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a08cb2499595ed0de5c51e3b0feae24d9d5462d227572e771862564e1875b6ef_Device=CPU_Config=() +147:conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_Shape=dynamic_IR=e6f95710a782b6c7df8397480e5cffbfa773fdf4ef11c93b2b1ac4694313b080_Device=CPU_Config=() +147:conformance_DetectionOutput/ReadIRTest.Inference/Op=DetectionOutput.8_Type=f32_Shape=static_IR=d3155499ccf835bc57e4ca19c25ca32fc63ecede0a2c43ab2a3e43ba4a6a4dcc_Device=CPU_Config=() +147:conformance_CumSum/ReadIRTest.QueryModel/Op=CumSum.3_Type=f32_Shape=static_IR=d517f63a168293380a1f066313e6a2bacef9eddf961ce164f11ce2609a632b3a_Device=CPU_Config=() +147:conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i64_Shape=static_IR=488c8e933df63c1368e021869a92fd48929ac252863ed4c2acfab7174b449581_Device=CPU_Config=() +147:conformance/OpImplCheckTest.checkPluginImplementation/Function=CumSum_opset3_Device=CPU_Config=() +146:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4746fb4d92aab20d21eeb0885d35c88abd50aa250298473f5bd143658eef2316_Device=CPU_Config=() +146:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=24920893b72e3bdf88b7e4142d1dd9ae0a679f686a3b187bf740f014d04b9ade_Device=CPU_Config=() +146:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=8e80bbd29743e87a0a6d4158a06249766b6a9cf424cc1c0ed3c6f60e30e6db58_Device=CPU_Config=() +146:conformance_StridedSlice/ReadIRTest.Inference/Op=StridedSlice.1_Type=u8_Shape=dynamic_IR=4d2c49ebbc46b60233510b63e280442319496782da33185f7c2d6003611f937e_Device=CPU_Config=() +146:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=dynamic_IR=a99a5ab2de2d408c2e40ad5734c9bd5ab4d1d221f4dd24572e05538b134ef88c_Device=CPU_Config=() +146:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=3d24c272ca88d4ee24f437a310abc05340e110f8596beb6a1ef96dd18818ebbe_Device=CPU_Config=() +146:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2bdfd42ec67d330dec8ea2817499b4c2d32a3d91deccede902acba057b050c49_Device=CPU_Config=() +146:conformance/OpImplCheckTest.checkPluginImplementation/Function=Tile_opset1_Device=CPU_Config=() +146:conformance/OpImplCheckTest.checkPluginImplementation/Function=Power_opset1_Device=CPU_Config=() +146:conformance/OpImplCheckTest.checkPluginImplementation/Function=Log_opset1_Device=CPU_Config=() +145:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f8795aaaf3fb96028b8cdcc963cbdff4c3362d78c4801af4172a73a3cd843edc_Device=CPU_Config=() +145:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=c4ae9be783990e398b3e8f0af76cab50d72c40c705677a3fe1c5dea592952d1e_Device=CPU_Config=() +145:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=3500be960a489d618c1ff6345c1d6788d17c43786c10a7e7b630586920bce356_Device=CPU_Config=() +145:conformance_Power/ReadIRTest.QueryModel/Op=Power.1_Type=f32_Shape=static_IR=3ca9994321c7492af9bff158852a484636638e711ae39a6acb66d273f696906e_Device=CPU_Config=() +145:conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_Shape=static_IR=857447d7e14c7516667094409cf5ef351000344fe170570671be0f71834d04f9_Device=CPU_Config=() +145:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=ee1f9348ff09a058dc09cd63581663590521d463d14b785a23ccd3cd28110b5b_Device=CPU_Config=() +145:conformance/OpImplCheckTest.checkPluginImplementation/Function=IsInf_opset10_Device=CPU_Config=() +145:conformance/OpImplCheckTest.checkPluginImplementation/Function=ExperimentalDetectronTopKROIs_opset6_Device=CPU_Config=() +145:conformance/OpImplCheckTest.checkPluginImplementation/Function=Constant_opset1_Device=CPU_Config=() +144:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f166c58732107cb0c82859af62b8fc0d3d144468ab66ff4615a1eb4bd325d3c4_Device=CPU_Config=() +144:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=eb966d8fd7e1301280e6ef709dd785d210a35a1346eb88c3f38379bd96036ce4_Device=CPU_Config=() +144:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=c6abba035865ece7c6c44b0284ab7c6b8f735bc1ad1f75a9ee3bae6ce26c58fa_Device=CPU_Config=() +144:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=13dad9a80315de728323f8d84534389c4840a92e74073be42c312c46107fd964_Device=CPU_Config=() +144:conformance_Sqrt/ReadIRTest.QueryModel/Op=Sqrt.1_Type=f32_Shape=dynamic_IR=8b79cf070ed44bdefd5afbe86a81199e189fa486c42190795419dbfc7cc26d6b_Device=CPU_Config=() +144:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=static_IR=5bfbbb826bcb2c9e7b5364fcc5da23e737953150029c2ea7455ad4b09caaf01d_Device=CPU_Config=() +144:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=3ade42cfc9d970963d8f162b001075864e6967034198986f408ec09ce4093d18_Device=CPU_Config=() +144:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=95afe52c888afd5c641ad2d6d0c3f8491f039af2c6938b91fe6fca613ec0b6ab_Device=CPU_Config=() +144:conformance/OpImplCheckTest.checkPluginImplementation/Function=TopK_opset11_Device=CPU_Config=() +144:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceLogicalAnd_opset1_Device=CPU_Config=() +143:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=b339277c7465442a5163600e784319030de12cab4005f43c0b903bcd0c46e87f_Device=CPU_Config=() +143:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ac40c4284a523b39af21eda7394a11b9ca2f2deb5263c03c92c0e217d34bedad_Device=CPU_Config=() +143:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=14f15558b2c7699f7877a9e04e1e0e7d2a2d7e1307aaca519a98ea5f39afc415_Device=CPU_Config=() +143:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=28e31d83986a1435f11ba6355b98472025fcf2c3c6e090103283d9486356b5de_Device=CPU_Config=() +143:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=i32_Shape=static_IR=38d935b0aa7266960b3d349b60c97bb15f535faed953fbe3ff24ae2560828f04_Device=CPU_Config=() +143:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=f32_Shape=static_IR=1366ff72dd5b68a3faf25de8f98e4ac5500663b1aac4941af11532ea2ee769d3_Device=CPU_Config=() +143:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.1_Type=f32_Shape=static_IR=2b1509d227d4c32fee4bb0b7ac59d4ecf5018afce9fd19714067a20d01933455_Device=CPU_Config=() +143:conformance_NotEqual/ReadIRTest.QueryModel/Op=NotEqual.1_Type=boolean_Shape=static_IR=8fe4bce2e674753d81a1516280769a06cdde538e658ae548087e4888ffa2905f_Device=CPU_Config=() +143:conformance_GatherND/ReadIRTest.Inference/Op=GatherND.8_Type=f32_Shape=static_IR=58581d0746e5bf56df7df18df87d35371d41ff69ba09c7850c8e96354c7910b4_Device=CPU_Config=() +143:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=a35667a1c5401fb3102a59ce0fa67d0ea4829f8ce282c43767517ce025469bac_Device=CPU_Config=() +143:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=0b0005b038a938c698489da595fd89a45d2f685c831bc172d81b2afc09658dae_Device=CPU_Config=() +143:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=66375ff8539da6387946c19b0d20e6b4fd57da25150255e41282458e241963a0_Device=CPU_Config=() +143:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=d0aad85620a1b97486758b17c69043a6a9cf75a459bf6e283b28ca132e917dcb_Device=CPU_Config=() +143:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0bb9a29f02d37ba32dc29b4284f58e10ce59571799f58381d449c77655c795d6_Device=CPU_Config=() +142:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=fffd24bb56af50d2e56fb2abdc6c0c96fceb21f00a9a1556b3890bdc50840352_Device=CPU_Config=() +142:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4104a7840dc96c214be896cac75911b70baebb902a42a26f12b281bc2cd87318_Device=CPU_Config=() +142:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=e5092af5c0f683044b1df5a45f211f4a692436d1112181a5d613bbf335941684_Device=CPU_Config=() +142:conformance_FloorMod/ReadIRTest.Inference/Op=FloorMod.1_Type=i32_Shape=static_IR=2d09fd84ef3e176a2eae04f1066929ceb3973045b87989e5f0f11b97cab6cc7c_Device=CPU_Config=() +142:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=i64_Shape=static_IR=32ab4bca2ccc66d25b8b9ac449dbc58666434d98aa5b789e1aa28726c530986e_Device=CPU_Config=() +142:conformance_Cos/ReadIRTest.ImportExport/Op=Cos.1_Type=f32_Shape=static_IR=e5379d72e978c773e9be98561b316a64f76c6015608d87739211e7c0e8b7bba3_Device=CPU_Config=() +142:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=5d5dd8756ccd01ee77e0c17d26f248c9e35d07aa812dc64bc39ac1ffe17ae585_Device=CPU_Config=() +141:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=e7895756d4bbd8fc1d5f9794410daea2a42c1df95f57627cbad46e6787e6aa5b_Device=CPU_Config=() +141:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=b6e76f65817017d4cbe3504634568430a419a30e418a5febf75b89b566ca3631_Device=CPU_Config=() +141:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=e7895756d4bbd8fc1d5f9794410daea2a42c1df95f57627cbad46e6787e6aa5b_Device=CPU_Config=() +141:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=bb6a76dcb7d086a6f8dc96d3e0b17573b6dc2775ff9d0f19060947deda586bde_Device=CPU_Config=() +141:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=static_IR=9c1e1b695646ea4f56a87b7e5a815c12856f718920e01e86ed78f2dcaf896a37_Device=CPU_Config=() +141:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=f32_Shape=static_IR=d2759b52de5dc9f1fa494c243d08ac40cf4e877c51323d53dbfa02abc1564e45_Device=CPU_Config=() +141:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i64_Shape=dynamic_IR=c5ff38504273a230addadadf4fef517ef73154c5f9f10ef2ace961b1dc3cb794_Device=CPU_Config=() +141:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i32_Shape=static_IR=d246ad7201844e04821cf31a7d0650c362d6684da5e02f625d28b1afc3789127_Device=CPU_Config=() +140:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9575e384c23faea27b9011de8c0093099fbe0ee6462baaebaceb075529664665_Device=CPU_Config=() +140:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=797bfeedb05fe1883757101c44e78eb807ff9c3570aa58b0891172e729d4b384_Device=CPU_Config=() +140:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=bbf75e5f7aa9f20f890a8eb204ddb5f159ca5eae0616fb99ee0b5169b165d595_Device=CPU_Config=() +140:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=2f23f1158754aa494abbf61ab15118173a7ccfe90523b2b9ab7cc3a6fdaa0e37_Device=CPU_Config=() +140:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=97a94ab826d2992013df32a4f93f6adbc38ad17a26503005046f68904adf53d1_Device=CPU_Config=() +140:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=static_IR=a2450d07c12669e586815e60d9a2b568f88a49c9b63730c898b9eae907b5ec4a_Device=CPU_Config=() +140:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=a6ce8e7d0cf79a4e800c911d6aec8f178a39642718eae3f8e9a89f7adc05dc64_Device=CPU_Config=() +139:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=7b904365e0652437dcb59aef3b84da17f4205a821586224e41db1409d96e910b_Device=CPU_Config=() +139:conformance_Unsqueeze/ReadIRTest.QueryModel/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=bda73cc94d837df9fb535743febd300cf0baf7fdf48ff538c079a4a7ca291592_Device=CPU_Config=() +139:conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=2e38326f5f0527299a0385fc3bb6c85c80e12e5bce07fe530624aba7113e82a6_Device=CPU_Config=() +139:conformance_ScatterUpdate/ReadIRTest.QueryModel/Op=ScatterUpdate.3_Type=f32_Shape=static_IR=537f04d52049add01923acd0c57cee03462926f9ce213a4fc9774496f5f66398_Device=CPU_Config=() +139:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=static_IR=e7ab5b597681da2db03c13a2424b4e0a62135eecfb2f97f4c59b53331afb7f85_Device=CPU_Config=() +139:conformance_Range/ReadIRTest.QueryModel/Op=Range.4_Type=i64_Shape=dynamic_IR=0d660483dfd9c9975f102d300ec98da49785fcb6484b379c45df8a61e1292797_Device=CPU_Config=() +139:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=5b1fc9693e4e947bc88a88bf1ad22ee2f59c13bf291626eec3e8ed49b0cef7ed_Device=CPU_Config=() +139:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2f8ee6adb1573c51bcffdd8c24455ecd6b6fbf04f171e9aa5de36c5d6f18babe_Device=CPU_Config=() +139:conformance_Greater/ReadIRTest.ImportExport/Op=Greater.1_Type=boolean_Shape=static_IR=aed960e9b7608b89973346cc2ab23c7ff65e72275fa55daa8b13f925a3779701_Device=CPU_Config=() +139:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i64_Shape=dynamic_IR=8029d5dae7f4721807eb717310512bad44630efdd0a64962496a0fd802a12325_Device=CPU_Config=() +139:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=static_IR=effa926dbd9beaa9b2b7b660288ceab99da8cfb440c4b01b7779d1bc25be336f_Device=CPU_Config=() +139:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=355bfa53a1f9e712db4df6642a51970e96e3612583b2ec90e7a8170e45b1625c_Device=CPU_Config=() +138:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=aecc8a062c16343ac138f351d774858b523e42d5a09ab67b1b61e64fe62e73ff_Device=CPU_Config=() +138:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=72eb2887828b5b14b41d001b6c7277d395f39c8003b9461730a938833899aacc_Device=CPU_Config=() +138:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0bc70791680aff885fa6a5903cea30fdb2386e7720403a8e6698362c5491a877_Device=CPU_Config=() +138:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=89ed1d3c7fa6e15c01df3b792a183ade5b90edbb87886e1d58db075566b60a92_Device=CPU_Config=() +138:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=c9fa96695ebc82ee5e83b4cde8910e54ce09611f304f24fb6b3faa692a21c60f_Device=CPU_Config=() +138:conformance_Tile/ReadIRTest.QueryModel/Op=Tile.1_Type=f32_Shape=static_IR=4d10da0860e049587221c12f55c3bca9fc587b74dd3fec194c8ba5854a736d93_Device=CPU_Config=() +138:conformance_Tanh/ReadIRTest.QueryModel/Op=Tanh.1_Type=f32_Shape=dynamic_IR=8c78da5f8bf9c1a4cd7f89cde9d61eb6500fa10ea0454e36a585466ed97fb12d_Device=CPU_Config=() +138:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=9efb5290056ad2f5ee663d4f67a89edbcc4936e512748bcbc0e9f3935b690b1a_Device=CPU_Config=() +138:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=68f6c55980c58f4d6de9e948d1c034b712cf74de509d8fd825fe7f7dfb11550f_Device=CPU_Config=() +138:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=c1ffd0690c9370725a30028d2915ec798aff173f86a1864f3dc92a4defefef85_Device=CPU_Config=() +138:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a848753a720bf9791ee4c239cf08712d714b877bfb6df23805590ad690ceaff7_Device=CPU_Config=() +138:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=21a343909133e844b3d88a967b2f6c948e4c9c9eb96030b936f9517dd9bec865_Device=CPU_Config=() +137:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=1fe78f5a88510b70fb39ed088e1418ae09155d179afc3a614a641b4e8f86574f_Device=CPU_Config=() +137:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=f32_Shape=static_IR=299e5f0fc159bf294093a5e1f258f7083fc54a08cbaa3a55b2a2197d29ae780c_Device=CPU_Config=() +137:conformance_ReduceProd/ReadIRTest.QueryModel/Op=ReduceProd.1_Type=i64_Shape=static_IR=7dba7222be56b8fcef943fc63ab00cfb3c7e0fb4467aeac96fd43aa4421cba86_Device=CPU_Config=() +137:conformance_ReduceMean/ReadIRTest.QueryModel/Op=ReduceMean.1_Type=f32_Shape=static_IR=2a8fce2d85c65eb0e8b40c2923338675276902296daf8744322876552dcd68f7_Device=CPU_Config=() +137:conformance_Multiply/ReadIRTest.QueryModel/Op=Multiply.1_Type=f32_Shape=dynamic_IR=48a273073ced3efa39d01e5ce40c30b2901e8a3dff0b414911282b8fdfc0b09f_Device=CPU_Config=() +137:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=c4e2668f98d5c21fc085695c9b6037f08a1e6710e1854fa73b7465a618e99b95_Device=CPU_Config=() +137:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=30b790c0018dfbf2d699b7518dc62d7b1d8944cfe0375174e03f00dbf33f1c19_Device=CPU_Config=() +137:conformance_FakeQuantize/ReadIRTest.Inference/Op=FakeQuantize.1_Type=f32_Shape=static_IR=848caca8b0b971d54e9c9b715b8bf35e0a33f1274d50a946384e64e5c0843a96_Device=CPU_Config=() +137:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=cda3b9bda63d065b5c27e6bce5ffe20968024d77efe5e174a9f4395db56a30c0_Device=CPU_Config=() +136:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=c0cbd07b1517991754ef075284aedef586dd4b250e2b867379dacebdf99ce1e1_Device=CPU_Config=() +136:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i32_Shape=static_IR=9fc3d18a9496df4681f38d330d3d1ff7b83b29b8f4e08e19c26a0107c4b69157_Device=CPU_Config=() +136:conformance_Subtract/ReadIRTest.Inference/Op=Subtract.1_Type=i64_Shape=static_IR=469a63c5aee73bdefc9abdf8abd8413713c0b68cc098d16c193399a11c7093c5_Device=CPU_Config=() +136:conformance_RegionYolo/ReadIRTest.QueryModel/Op=RegionYolo.1_Type=f32_Shape=static_IR=d4e6cfc9844e29087dc5bb222a1822c26ec71f2e751575790add7c9b98a5a23f_Device=CPU_Config=() +136:conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=dynamic_IR=214b1d4be2a141409b6b54847c952a282d9b2d7236d3d8ada3463f7dc8554097_Device=CPU_Config=() +136:conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=810f13adb3f7342c7d514bec2aa3f20d7a59527b54c7f6954b038efb194c5ceb_Device=CPU_Config=() +136:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=a6f5b58b1d85e5a99389748ae14e507302127e583c436dd9e6015d3c33ab0536_Device=CPU_Config=() +136:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=i64_Shape=static_IR=32ab4bca2ccc66d25b8b9ac449dbc58666434d98aa5b789e1aa28726c530986e_Device=CPU_Config=() +136:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=99866ef63c9a2e7e2d9b7f00d11a4c177775bef9cfdf074e83f56318c143e6a3_Device=CPU_Config=() +136:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2cc5f8b67a407507c1d59a08981887766d377c7368b53cb0a18ec71df291b1f2_Device=CPU_Config=() +136:conformance/OpImplCheckTest.checkPluginImplementation/Function=Unique_opset10_Device=CPU_Config=() +136:conformance/OpImplCheckTest.checkPluginImplementation/Function=NonZero_opset3_Device=CPU_Config=() +135:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=12c56cc6ebb22e8e31d97e0ef640fecab5f93e5c5b2810c4dde56b09a7ac7f48_Device=CPU_Config=() +135:conformance_Tile/ReadIRTest.Inference/Op=Tile.1_Type=f32_Shape=static_IR=9f4964a8b6440cdec94781121b408df16f0ef2283b0355583eb934b3cd2bcb66_Device=CPU_Config=() +135:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i32_Shape=static_IR=c78feba7097eb1c59197840a7e5510c26faeaa51ff724d643dc1f1ec077a6344_Device=CPU_Config=() +135:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=aeabe9639d6dcd5ab6e09f9905ffa8bdfe7cafcc7f5c8598e20e4ff39bdb50a6_Device=CPU_Config=() +135:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=30b790c0018dfbf2d699b7518dc62d7b1d8944cfe0375174e03f00dbf33f1c19_Device=CPU_Config=() +135:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=1f71810b04667907bc88c4a1ecc28b9325fde04026b5e56b5eb0e2d6608f3742_Device=CPU_Config=() +135:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=dc4bcacb769fc4d8f1ef4ff20ca7ba6b3b369d69ea3b1c65733d4cbd2cb0762c_Device=CPU_Config=() +135:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=753b524e2aad8fde7e7206fa8c3e7ca15c52c49f22f41d48cfb6b4d814cb40af_Device=CPU_Config=() +135:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0973b76264164ca52a9883a69ff5f7df977e28c33a0dbe9095e7e92acd7854bf_Device=CPU_Config=() +135:conformance/OpImplCheckTest.checkPluginImplementation/Function=GatherTree_opset1_Device=CPU_Config=() +134:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=d6d8f4f28ac34b734cc984f83e8f5f6598c063a6955d00ef4c08252d5d05c276_Device=CPU_Config=() +134:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=24e44f75d91fe4e7e28db6c93870a47d536abeb87240841ff5b7e74b40189e42_Device=CPU_Config=() +134:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=7293f99e38d76387b64632d06503c539c369e1ab78d9388e1af42d7071d8230e_Device=CPU_Config=() +134:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=e044b25aa265a98dcd0a5cf5f7132fdac5f36074068dc2210e04dd4c459aad61_Device=CPU_Config=() +134:conformance_GatherND/ReadIRTest.ImportExport/Op=GatherND.8_Type=f32_Shape=static_IR=58581d0746e5bf56df7df18df87d35371d41ff69ba09c7850c8e96354c7910b4_Device=CPU_Config=() +134:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i32_Shape=static_IR=d246ad7201844e04821cf31a7d0650c362d6684da5e02f625d28b1afc3789127_Device=CPU_Config=() +134:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=131fa1ed3ff9df038bbed73979ab906c3d84fea9dd2cf5dedc82b3222d511b1d_Device=CPU_Config=() +134:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0a2311ddc09b949cceb73fd0e09bbdcc2932c2635fee3a2c411bec27a30e9439_Device=CPU_Config=() +134:conformance_AvgPool/ReadIRTest.QueryModel/Op=AvgPool.1_Type=f32_Shape=static_IR=3fbff9f870428a19ed434cdf72834eec251edc3dddd149491c94319d63a8438e_Device=CPU_Config=() +133:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=d13d862f7b8306948676388381950639ef433dcc4e38f5a6fa8d50575d1aa814_Device=CPU_Config=() +133:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=d25e26d9a54a5dc9799e9881e3035bfd5f125d12ea6cb69fb1eb0166e29ec88d_Device=CPU_Config=() +133:conformance_Subtract/ReadIRTest.QueryModel/Op=Subtract.1_Type=i64_Shape=static_IR=4341385bd87226eb41493c667323e8c1c39817983a48025192209270750eed06_Device=CPU_Config=() +133:conformance_ShapeOf/ReadIRTest.Inference/Op=ShapeOf.3_Type=i32_Shape=static_IR=87c65c520de106b146e91222609f5b25cd79e96cdd6b942c3293cddb656617ee_Device=CPU_Config=() +133:conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=d3155499ccf835bc57e4ca19c25ca32fc63ecede0a2c43ab2a3e43ba4a6a4dcc_Device=CPU_Config=() +133:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=e77468c2881ce0c38c14038151d560ccadc7dcbd5eb5f21b68b8e227c89813a7_Device=CPU_Config=() +133:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=d242e8ecc8ae0239fc2e7773fe0f8a1d50792a71ae4aaac4fd439174e87e95b1_Device=CPU_Config=() +133:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=c54189129febdb864ceaa5447a7a0011c8ccdf3711fcfd87424feca61b44c0b6_Device=CPU_Config=() +133:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=bfd899e1dd2a03f99d8b55d9fa5ab04c6e4576358c910e9bda97cf497f0418a4_Device=CPU_Config=() +133:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a7ad8306fe632a2d0c45a492ad2d21dbe40f2f9ea55074d602beb6f8dde17982_Device=CPU_Config=() +133:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=c8bb865a43a3782b3b85e05c3e86388fac07473697ed45a7b04f60010555a3c9_Device=CPU_Config=() +132:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=ea8fff2db5032f5015f68d53904354d4bdfbe5288224c7f549a1573794455d80_Device=CPU_Config=() +132:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=d0bade0811581a6fae53c343866f1bdb63acfe07776fd60b7e791f8edd3f88b2_Device=CPU_Config=() +132:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=a7b79789ba2466daa67ce8610753fbd89a2ca372d65e2326802c24cce03f795f_Device=CPU_Config=() +132:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=6dae5ccb2325826167ff4ec57e51280b4e125801e6405a33f4d95fd9ab9f3fc5_Device=CPU_Config=() +132:conformance_Elu/ReadIRTest.ImportExport/Op=Elu.1_Type=f32_Shape=static_IR=1cb500b61fe11278cc50fca509be3e7b654190294dd581c7862ea3f108e0c192_Device=CPU_Config=() +132:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=1409169e395a3eb90f9235b74f2f8c94e0e27a63fae33cda153d991ae1cbb68d_Device=CPU_Config=() +132:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=970f3f48203f3bd46dcd6ca55ad20f5ff8ad2426c3f6f74377759fdddaaf93cc_Device=CPU_Config=() +132:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=8c3b386463bd59945d3c15512b26409dee7da9b1940f153e3ff62d47d6f79d2d_Device=CPU_Config=() +132:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=8484c5c087ca8317588ef676a0cafb63ded379be5bad862e4d0504f43bc6fb45_Device=CPU_Config=() +132:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=4bedf955c6ec574258a05f59e5397225e1360ba68ea49d4fe105d6a62ccb3e97_Device=CPU_Config=() +131:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0dafd9117cb3fba3a335f7cd28aaa3fbd9276878383657b357210e135a93d916_Device=CPU_Config=() +131:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=deada5d69a05cf27af659254f89b4e53e6685c517fdc2bb8a250cb5d4ba0a3dc_Device=CPU_Config=() +131:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=66abbc2c605a0f866880bd4730865ae6b5401a1f4beb242f346bf6f2f8138eb6_Device=CPU_Config=() +131:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=447c546ed54e81edcfea77cafa8d18261923bf25c050666029828ea72e3a875c_Device=CPU_Config=() +131:conformance_Gelu/ReadIRTest.Inference/Op=Gelu.7_Type=f32_Shape=static_IR=8876bc4ad78a178f235f48e06e705a7dbd3f7ca06e3ea5052e6136811da69d20_Device=CPU_Config=() +131:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i32_Shape=static_IR=c8ec200fa8fd8ec9c185d9d45ee1380be5e0e4a6f3157e5900401e9fce999553_Device=CPU_Config=() +131:conformance_Erf/ReadIRTest.QueryModel/Op=Erf.1_Type=f32_Shape=static_IR=2e5aed1612da0f720adb051e22460983a3911c38cb09184d812ceb949870f450_Device=CPU_Config=() +131:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b7983ae70a4e7868ccbf4b25a5d8e795620182c29817ad1151d89f2e932d770b_Device=CPU_Config=() +131:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2a819b46a29c8bd965ec330a28b5c163dd0a06fa2717d71bd16493ad460e8dad_Device=CPU_Config=() +131:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=09c1f9f81a463477da73d33f00d1321fa5c1f64a9c3c51c6e3c1344e362d4ced_Device=CPU_Config=() +131:conformance/OpImplCheckTest.checkPluginImplementation/Function=Transpose_opset1_Device=CPU_Config=() +131:conformance/OpImplCheckTest.checkPluginImplementation/Function=AvgPool_opset1_Device=CPU_Config=() +130:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=f32_Shape=static_IR=17472505b59f6bcf4f5570eb83b2225b056a403bf2d165562edabb8501fad1e7_Device=CPU_Config=() +130:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=8ef4d7ceb7d904a084d93d6ede1c15a64d2511b3bf1312d630792eb21c591408_Device=CPU_Config=() +129:conformance_Tile/ReadIRTest.QueryModel/Op=Tile.1_Type=f32_Shape=static_IR=6ab37e1d52328b5ce1204cfe13977b06dcfabeb4acff9821d65ffc91bd3cf09d_Device=CPU_Config=() +129:conformance_ScatterElementsUpdate/ReadIRTest.QueryModel/Op=ScatterElementsUpdate.12_Type=f32_Shape=dynamic_IR=cd6084826e0efefc7f1c9c3c7c9f8c1cb35b9a5f61d1a2c8131ecec5babf1af4_Device=CPU_Config=() +129:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=de01a0d560bebb0018927f02409922427ef35b59a96f0aef8f18991ee0d9542a_Device=CPU_Config=() +129:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=boolean_Shape=static_IR=4da22853b6e4b853fa57b9dce8f5a26920d079a74055831d651c10f48ee96e8f_Device=CPU_Config=() +129:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=4555fb7029260c7e46403e1fbc99a3815a94373b7b08d2408277976173facc37_Device=CPU_Config=() +129:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=9795aaeb71c115680b567eab0877df338c0d8971858b489a2636c4483f3512cb_Device=CPU_Config=() +129:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=5d522332a7166265867b633721d8bd8ff23a233e7c8bff59a245bbb24d7be234_Device=CPU_Config=() +129:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=5976ce31ff4cf399e87efd691dce3e75dc2de962241a84c09538593c9865b257_Device=CPU_Config=() +129:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=243bd2256612673dd04651521ed8d3fa4087c90af7b85e1a4aa381c074bacd47_Device=CPU_Config=() +128:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f226723f90368b020cf11817ce0a39c002b9c30e07d16ac9297b7e574a010b0e_Device=CPU_Config=() +128:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f1ffa9874732c1aa88e04fd55fbc864c9c6986877d3d52045fa6ae7f18dba62b_Device=CPU_Config=() +128:conformance_Slice/ReadIRTest.QueryModel/Op=Slice.8_Type=f32_Shape=static_IR=2055c46f29a25953e331656839e227b0472b10695ea23410b64428d14232345a_Device=CPU_Config=() +128:conformance_Select/ReadIRTest.QueryModel/Op=Select.1_Type=i64_Shape=static_IR=6590ae34a784f81de25c016454fcc919ae1f9eab672c78c9da0daf83dcdaf1bc_Device=CPU_Config=() +128:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=6a7aa747b98a21c0469c7edf7ef78a050e1279d891b0c69ddc071befafd42c76_Device=CPU_Config=() +128:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=77d771a04d8618bf32943e460b714076f7bbc34cd1d40f9a90864af976bea30e_Device=CPU_Config=() +128:conformance_Concat/ReadIRTest.QueryModel/Op=Concat.1_Type=f32_Shape=static_IR=b6417017678573faaf72824d1bec40bcccd73ae0007aef24b089dc3743276b14_Device=CPU_Config=() +127:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=686b6d84e29d87a91c8177396d2aa5a1fbb88656c79e41af9a0b30b42805f477_Device=CPU_Config=() +127:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2d38082f8971dd7614234070dc9cb8c9b6b12fee7dc918503f0e256ab32d2fef_Device=CPU_Config=() +127:conformance_Softmax/ReadIRTest.QueryModel/Op=Softmax.8_Type=f32_Shape=static_IR=c662eb0004f431152ddc69e12826a6c0e7aa66b24be0169acf10ca95f2a63f52_Device=CPU_Config=() +127:conformance_ScatterElementsUpdate/ReadIRTest.ImportExport/Op=ScatterElementsUpdate.12_Type=f32_Shape=dynamic_IR=cd6084826e0efefc7f1c9c3c7c9f8c1cb35b9a5f61d1a2c8131ecec5babf1af4_Device=CPU_Config=() +127:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=9c66c6a6d93c10149920c3e034d9a0765afbef45dab66083fd5e3d796a57e406_Device=CPU_Config=() +127:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=6e508ca44667fb311f5b6d634584d2751c3fb15fc034626765c90695b7de9619_Device=CPU_Config=() +127:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=1891282a9bf460336bad3c354519aa0d87ba6ef40876d4a07592194d2d678e25_Device=CPU_Config=() +127:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0b4b74693c2ec96e714901b1acc772655accc3b29170cdb64ae934003338b296_Device=CPU_Config=() +127:conformance/OpImplCheckTest.checkPluginImplementation/Function=ReduceMin_opset1_Device=CPU_Config=() +126:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9991a1b4140ee8e6ed0460fb384b7729f681bc1068315a4d970eea59dcc89950_Device=CPU_Config=() +126:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=3b4dbc6facc24173723b52757e4ee60953d7a639e1fcb6e70236918d6a40b3a5_Device=CPU_Config=() +126:conformance_VariadicSplit/ReadIRTest.QueryModel/Op=VariadicSplit.1_Type=f32_Shape=static_IR=d1d0510ce6d862a5512bf4c5c588f84548f1aed0226eca6850b5e2d470a5ee84_Device=CPU_Config=() +126:conformance_Softmax/ReadIRTest.Inference/Op=Softmax.8_Type=f32_Shape=static_IR=670a0d513277b4508e8edcddae6361e98fd03c2fff31293637c36f97e59a6b9c_Device=CPU_Config=() +126:conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=3688e2a973219245d05c5fa675cebe9036d40777809ebf583c1bae9b9f87eed6_Device=CPU_Config=() +126:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=45e4a607b0964915174f6a14de049a61a5740f258a4a71817e5aae1b93be5ae7_Device=CPU_Config=() +126:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=1c727cc96123227a9fe6c3079a497fd64a04f273bff45b5ea56a3c0d577eca8e_Device=CPU_Config=() +126:conformance_Equal/ReadIRTest.QueryModel/Op=Equal.1_Type=boolean_Shape=dynamic_IR=0723b6d683bc65225624112929bd8f7a0adde9e9c2265a2ec1a54b10c4433735_Device=CPU_Config=() +126:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=dynamic_IR=fb6a053d244fc1bdea6fd5e69e0c05025272ac0da2f676e077c598239b6493c2_Device=CPU_Config=() +126:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=00d6c2465c4fa7ddab80d30c2fd8099a684bcc47cf9bdba89a39560beed737f6_Device=CPU_Config=() +125:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=ffc3cad64b8bf82ffa4d189a247a9434e71886cacd3582956c5dd98921fd2141_Device=CPU_Config=() +125:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=51bb427ac8abf618a72159cde1ee840e08518016a09e995f503cd888941f5039_Device=CPU_Config=() +125:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=883597c2c4e004b0ec1e1ca8d1b75395c714fc6a99cd31e35ca0597d0ccd8f8f_Device=CPU_Config=() +125:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=469d09b261b88011c82288ea622dde06d63805eb41dc256c901b0d206ac5780b_Device=CPU_Config=() +125:conformance_PriorBoxClustered/ReadIRTest.QueryModel/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=93f586b65926f2fb89cf5cc3379013f6df6964cb757fb3396060277dd393bb12_Device=CPU_Config=() +125:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b04f836c4ed5b0403f4b7fdf9c5cb8d11ff9f65105ab9bde39f80191a65f7f17_Device=CPU_Config=() +125:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=59c0e972ae75900cd8c802aa7be9b6c13c96cb10417ff417eb1aafbc49b891ea_Device=CPU_Config=() +125:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=3a3886301663fd20cf2c8c0f74c11d80dfe8b74ac39e41652f0eac1ec9bfa2df_Device=CPU_Config=() +125:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2dd63d58c85301d765882b95995de97f4eff14bbb3c933c4e4b8ee5fbc2e9e71_Device=CPU_Config=() +125:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=073dca39b0ed99c8af202a5e272db241f95de1f64a7a1611e83853b92e7f7f09_Device=CPU_Config=() +125:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=023f3573ef77fb592345c68ee5e6a79191b120f9cb68f81194381da2cf68f21a_Device=CPU_Config=() +124:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=16ccecc11352f2c476db041adea21d67a96e03cf33902b37f4f6855b5113c202_Device=CPU_Config=() +124:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i32_Shape=static_IR=6eac2d4e0df77b93f566f0d226ce4972da143d2b3fd794f7d316faacce442035_Device=CPU_Config=() +124:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=1f7f2d40b938416773b13282d8ac09d81a50e4d5d7548f42fc5fd575f84e1385_Device=CPU_Config=() +124:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=1c727cc96123227a9fe6c3079a497fd64a04f273bff45b5ea56a3c0d577eca8e_Device=CPU_Config=() +124:conformance_Clamp/ReadIRTest.QueryModel/Op=Clamp.1_Type=f32_Shape=static_IR=4d14510ef37733d7ca3d69697626c173feb05638f5036c49b060f6a80aea9ada_Device=CPU_Config=() +124:conformance/OpImplCheckTest.checkPluginImplementation/Function=Add_opset1_Device=CPU_Config=() +123:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=c90b6f528b750f144ddd29be0059c202d46b3bac799c0d70893f2f4f9f05f64c_Device=CPU_Config=() +123:conformance_ReduceSum/ReadIRTest.QueryModel/Op=ReduceSum.1_Type=f32_Shape=static_IR=e1d727df48a0a74d8b9865c00e5c39c9d53a5023d83da3c58f281b6b1411b696_Device=CPU_Config=() +123:conformance_Gelu/ReadIRTest.QueryModel/Op=Gelu.7_Type=f32_Shape=static_IR=4ee688aa25b818f6e6986c7070e544d0eef9ce888124d85c0e5e126802213a46_Device=CPU_Config=() +123:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=766d904d646b6f43847158972a615db69af2bf66517db0992a19418856bef52f_Device=CPU_Config=() +123:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=517a5eeb2f1f21304b8a1d5971f89bfc93aa678252180bdb05144657b1a8619f_Device=CPU_Config=() +123:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0f61e4837d11be2b01f69947cd0b424a45d2e548d9c70ae53b07c43fa1237cd0_Device=CPU_Config=() +123:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1c73b4d05053065f5c37954586376ae4e1cf9e220959363b7c2cb381f489bee0_Device=CPU_Config=() +122:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=eeed611756b048927c290a65dd92a5833ad66d347bbc772abddaa751f2016ff1_Device=CPU_Config=() +122:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=bb610d59221e7c5d8e96f971519b7ef27bda7bbb9be329b873a901a1e749b9cc_Device=CPU_Config=() +122:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=1e04d36f6e56abacf8388fad66368b15355eed9d216d5771b650b0b058db3a76_Device=CPU_Config=() +121:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2fc01b66086ac5d8272dd81ab731188b62bbe8920bff1efe61bf3261a3a8b3e6_Device=CPU_Config=() +121:conformance_GRUSequence/ReadIRTest.QueryModel/Op=GRUSequence.5_Type=f32_Shape=static_IR=860decd2bf091a335f6f820b2c6b6acc58618fbb6027e30484470ce899bb1591_Device=CPU_Config=() +121:conformance_Floor/ReadIRTest.QueryModel/Op=Floor.1_Type=f32_Shape=static_IR=b064511ab38a9a70b4d203e11a12b990f388a03550ba98c65468be1b85c68fda_Device=CPU_Config=() +121:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=9c63b760d92c46d2ba731cb9edc4cf19a96848e4f3c354797f10a7a1bb9edf8c_Device=CPU_Config=() +121:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=074ab76268ab5d03283f03f4e228a7cf73ab5a18fc0e7366778cf8c45286f18a_Device=CPU_Config=() +121:conformance/OpImplCheckTest.checkPluginImplementation/Function=VariadicSplit_opset1_Device=CPU_Config=() +120:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f2293320b7533e95bf000229d2458244fb9af573cd737ca0088a00674df1ac52_Device=CPU_Config=() +120:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=65a5483c793396983edaf7f2cc2c13898507525bd84a8469e97b2d662b5df782_Device=CPU_Config=() +120:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=2027d5da17dab73d23b4984fe88696fb770ba2fa479a194b3531d30ac75dc840_Device=CPU_Config=() +120:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=static_IR=6e1207753b08d53b18c551ad07a245243197370051be78218db028f3d3b835a5_Device=CPU_Config=() +120:conformance_Sqrt/ReadIRTest.QueryModel/Op=Sqrt.1_Type=f32_Shape=static_IR=8952b1ce6fc7bfd900e669e12b520b624c02026b458bae41afe28e1f76058315_Device=CPU_Config=() +120:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=7cbd5676618d9b507238807c281801b8a817202b0ae648a44cfa32fc16c02547_Device=CPU_Config=() +120:conformance_GatherND/ReadIRTest.ImportExport/Op=GatherND.8_Type=i64_Shape=dynamic_IR=c1cd785825e1b2794d4bc74f6dc257e92a382e95a868a864125da70acc5cdbf4_Device=CPU_Config=() +120:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i64_Shape=static_IR=cbb80f496fd705f24fdb25f6de3734bb2a2b7f49c984bdb32c4f62ec4640797a_Device=CPU_Config=() +120:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=dac1d1bb4f11cef03519894a2853742d914abb0e3225b7caa3bc5f23d167cdaf_Device=CPU_Config=() +120:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=29d8ef1a41f51b6fed0300f97d17a3795a97e4ffb3ef3abda37f790f5f53b389_Device=CPU_Config=() +120:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=13c78a6d628bed4392d2577f132f924d9e17a7e29a2171dafebc0a596d2ade04_Device=CPU_Config=() +120:conformance/OpImplCheckTest.checkPluginImplementation/Function=AdaptiveAvgPool_opset8_Device=CPU_Config=() +120:conformance/OpImplCheckTest.checkPluginImplementation/Function=Acos_opset1_Device=CPU_Config=() +119:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=f84dc200af2852df01662dfbe891b8ed4abb27db6763f3a2b645ab75324834f3_Device=CPU_Config=() +119:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=53da49d7aaa81cbb7c3a3dbc8ea938bbffabda14bd106fa6c2b6abe244ba5cda_Device=CPU_Config=() +119:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i32_Shape=static_IR=1942042c790c3fc6053ad91fa5e45f8ebf3c11bff7e3427a71b8fdc1bc5db053_Device=CPU_Config=() +119:conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=17472505b59f6bcf4f5570eb83b2225b056a403bf2d165562edabb8501fad1e7_Device=CPU_Config=() +119:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=78a5e7f340d63660dc0710d0e390dea2d3f68ac98f16e8dbc11b4c28ac0440e0_Device=CPU_Config=() +119:conformance_Exp/ReadIRTest.Inference/Op=Exp.1_Type=f32_Shape=static_IR=67632b67a0834136cf2f3bcd6b3fbaf0d2f2bbffc1da6c33fd5fce0d0b8a763c_Device=CPU_Config=() +119:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=dynamic_IR=791be312b2af6da6abd2eadadc6185c7052271efbcf314bb678828313fc58414_Device=CPU_Config=() +119:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b83a85737c23e279f8878f6795581dc2b003c55e4eb8baadfbfd73fb0e98758f_Device=CPU_Config=() +119:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=64efb6dd46c36bec02b92148d178bc032417c8c2d999ff7b0a24ba08af365f91_Device=CPU_Config=() +119:conformance/OpImplCheckTest.checkPluginImplementation/Function=ScatterNDUpdate_opset4_Device=CPU_Config=() +118:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=f2403b7d119fabadb1609250bbd0959aeef2cd68c62a4036657518ebfbcedf71_Device=CPU_Config=() +118:conformance_Transpose/ReadIRTest.QueryModel/Op=Transpose.1_Type=f32_Shape=static_IR=564cd54b2564c7e39fda0c5e580c274b7bf99603760f6c66f03b4450f23cc4bf_Device=CPU_Config=() +118:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=b1477d38842775944964d18c13278454256d9610e0ef880fbce0cc87e5977556_Device=CPU_Config=() +117:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=dynamic_IR=f01fb29e8f5ddc7562e954e46b1d2bdbe6144d6bbe2ed2a0f16610f2812ac721_Device=CPU_Config=() +117:conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=c359e1ea71a80fc519e8a2dacfc7f52f5a94a1142058641b0434f40866875c12_Device=CPU_Config=() +117:conformance_Greater/ReadIRTest.Inference/Op=Greater.1_Type=boolean_Shape=static_IR=dce38966c13ac9886c7480261e3483d822355a9bf3835d00795e7627744a60d7_Device=CPU_Config=() +117:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=3ade42cfc9d970963d8f162b001075864e6967034198986f408ec09ce4093d18_Device=CPU_Config=() +117:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=ec60ac68ad3b748ccd56a7c91b3a2461510f05d66e4b64e12a2069483d8243ae_Device=CPU_Config=() +117:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=43a00b4dc097228af52c00054951dd5b57d8e0086207f11a8996e5ac880c8980_Device=CPU_Config=() +116:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=478861c92198ee8679e3e43476abfe79906c4ead6ee80af975af365829822025_Device=CPU_Config=() +116:conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=20af9ae4466332a072f3b04c1219146d272daabf2306b66c755980bfd31f2a76_Device=CPU_Config=() +116:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=c0cbd07b1517991754ef075284aedef586dd4b250e2b867379dacebdf99ce1e1_Device=CPU_Config=() +116:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i32_Shape=static_IR=38d935b0aa7266960b3d349b60c97bb15f535faed953fbe3ff24ae2560828f04_Device=CPU_Config=() +116:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=263843a2c307b91ff7d59d9b21cd8b2126e985d787fc18f44df3525a6bfd71f3_Device=CPU_Config=() +116:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=641b1e74512a5cdc87bcd63515a28a409f155a3475fa923e440868e563daaffd_Device=CPU_Config=() +116:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=57ba21d45369359487dc3b6a8feb0aa2b6fb21ffa328dc8e8eed58ee2896fdad_Device=CPU_Config=() +116:conformance_Equal/ReadIRTest.QueryModel/Op=Equal.1_Type=boolean_Shape=static_IR=9e166ed18be64949ce2451a1dc981381040fb109ee60e13a7f47308caac73e24_Device=CPU_Config=() +116:conformance_Cos/ReadIRTest.QueryModel/Op=Cos.1_Type=f32_Shape=static_IR=e5379d72e978c773e9be98561b316a64f76c6015608d87739211e7c0e8b7bba3_Device=CPU_Config=() +116:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a24dd1485e484f31d0c72f3a0c31f373f883f6ca4a751b1d2ce18132913506dc_Device=CPU_Config=() +116:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0fb6a7848271e000d49d4966647edf55e65f181523883089f43147c14cfb9871_Device=CPU_Config=() +115:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=i64_Shape=static_IR=4892263cb1ea7a434b5771aa16f07885c39710f67fa1411dd9235653a6b8622c_Device=CPU_Config=() +115:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=469d09b261b88011c82288ea622dde06d63805eb41dc256c901b0d206ac5780b_Device=CPU_Config=() +115:conformance_GroupConvolution/ReadIRTest.QueryModel/Op=GroupConvolution.1_Type=f32_Shape=static_IR=2bb16e2257294e3f7d905f66a483a8210f392ea822836e4edcf8910a7fbb4277_Device=CPU_Config=() +115:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=9c651eeba5c3e7b07a8cd0d4ba479fe8c5aaa2c4df9b18ab022e775ea01dd867_Device=CPU_Config=() +115:conformance_GatherND/ReadIRTest.Inference/Op=GatherND.8_Type=i64_Shape=dynamic_IR=c1cd785825e1b2794d4bc74f6dc257e92a382e95a868a864125da70acc5cdbf4_Device=CPU_Config=() +115:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=83d90ef3fac993f7efba4a8ed369781571b1b536af03ceb0267ae979379e1dd9_Device=CPU_Config=() +114:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=b050ebcbd31acbbc43d657d87a54415e0e52d3e91fa95b57aa1dd0451a5bf50f_Device=CPU_Config=() +114:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i32_Shape=static_IR=4e2e2e9dd89aad4bc14634b85c94336a7250dbb8ff61cb451c9507753f54a102_Device=CPU_Config=() +114:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=bf4d5291899ea4eccf6584f62d4ecdfb39de79edd102e509f840664838f59d19_Device=CPU_Config=() +114:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a7d9ffa60c8d1f330ec303edf6a6c0f8d8e0fe8657c561431bfb91a94c2639e8_Device=CPU_Config=() +114:conformance/OpImplCheckTest.checkPluginImplementation/Function=Softmax_opset8_Device=CPU_Config=() +113:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=576ef0e9eaf8fefade547928d4592bc2b341ff1100c3de5104f0a63b2fbeeca0_Device=CPU_Config=() +113:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0a2b1efb810d1dcf7897c3671f1eef0c36bcdca679e24b8e86f078128b381833_Device=CPU_Config=() +113:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=70d4da84623a0af3bc8362a828bac5ef13285498b420a3df6bf2e88bf05311db_Device=CPU_Config=() +113:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=fa2eea1b545d6b876282ed0165fb935f0af249c713e3f20fd97cc06118e615eb_Device=CPU_Config=() +113:conformance_Greater/ReadIRTest.Inference/Op=Greater.1_Type=boolean_Shape=static_IR=aed960e9b7608b89973346cc2ab23c7ff65e72275fa55daa8b13f925a3779701_Device=CPU_Config=() +112:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=e250a19bfbe236f81b6715a92beb0c259080e4a5d379ea1187892e8c8d9add8a_Device=CPU_Config=() +112:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=1a9779319a9cc5f21b6005ebb9b4517e0bb1f868ef8e568453a58c44474c40bf_Device=CPU_Config=() +112:conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=dynamic_IR=b91ccf96246dcf055dd9122c823ccc54ea572f1ad8fcbad3a98c88edb7e454c4_Device=CPU_Config=() +112:conformance_Squeeze/ReadIRTest.QueryModel/Op=Squeeze.1_Type=i64_Shape=static_IR=168e02701204a8f0e325fa1a2a4407612df10c3218c9431981fa6f1f8300eec2_Device=CPU_Config=() +112:conformance_MaxPool/ReadIRTest.QueryModel/Op=MaxPool.8_Type=f32_Shape=static_IR=30f4b90114764377dcd8e010019eefe0ec9c21dc6f0503b52323dfe867a51df5_Device=CPU_Config=() +112:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0c5ac67592b69e8c2b7acbae7a0f877cfed184c572d2fae09eb8fa629e86eeb1_Device=CPU_Config=() +112:conformance_Gelu/ReadIRTest.QueryModel/Op=Gelu.7_Type=f32_Shape=static_IR=8876bc4ad78a178f235f48e06e705a7dbd3f7ca06e3ea5052e6136811da69d20_Device=CPU_Config=() +112:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=79e0a530c5a64063a9af26b438f208476e3bbf5a267c28ddded0459019a1d8e1_Device=CPU_Config=() +112:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=1385af2553c7c9b0f9ce2aa4345d8b767d36136a9cd8e2acae79d4970d6b5c8b_Device=CPU_Config=() +111:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=d74cf8dde02b582dc1efa697474a50738532e0ce5b40831d81d0852a74a94c79_Device=CPU_Config=() +111:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=5953b8e79f64e33e67dd330999ff8e3d8391c8f3fa7eae519b117b1273c8c19f_Device=CPU_Config=() +111:conformance/OpImplCheckTest.checkPluginImplementation/Function=MVN_opset2_Device=CPU_Config=() +110:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=984e628a0090ff9d04bf8f41b795f0682dd3083fb78b71397a51cc2efacee247_Device=CPU_Config=() +110:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=52ee24baa14f302174ce3b13a119ccb6a54994413daa1f052a75464528b07088_Device=CPU_Config=() +110:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=a6b0532b200874d6d1c57719b46f2b301c368ebc35042df00796dfb87eed618b_Device=CPU_Config=() +110:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=651e5fbc222577151cf14e9c8e9bdf9e155f1e0d277206887160d65b532caf53_Device=CPU_Config=() +109:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=1e9f662cfa263a98c546e69de318268918914f2ddd0ee87cba23c2690a81ec19_Device=CPU_Config=() +109:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=c2539b8a06e5dd0e01933c6861e366f8ed565e5956b8b2546647b55e966e7755_Device=CPU_Config=() +109:conformance_MatMul/ReadIRTest.QueryModel/Op=MatMul.1_Type=f32_Shape=static_IR=c61a8f259a8b37e49f9267dbc921d88dd60e5766aa30dd05319f423a01c14aee_Device=CPU_Config=() +109:conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=68c6351cbee22a4783b3c592f69eea3778c17594c48972d5d0d1e9d728f5b47e_Device=CPU_Config=() +108:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2acd53645519bc460dcc71923563fd462ed997366cc7ae08cb5a30245302a859_Device=CPU_Config=() +108:conformance_ShapeOf/ReadIRTest.QueryModel/Op=ShapeOf.3_Type=i32_Shape=static_IR=aa757ffed4165beb3074da6ad09422d7823a1d0d6c8a654adc56343d0e43dc66_Device=CPU_Config=() +108:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=9033954b258cdfa9fa858317ee4588b8c92cc946d7eb305bf130d3ca8ee0f1fe_Device=CPU_Config=() +108:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=185e849a9d8fec26bd81b2098d63bd842d34dc7a8ee7e47086a208e4b8bd9298_Device=CPU_Config=() +108:conformance_Gelu/ReadIRTest.ImportExport/Op=Gelu.7_Type=f32_Shape=static_IR=8876bc4ad78a178f235f48e06e705a7dbd3f7ca06e3ea5052e6136811da69d20_Device=CPU_Config=() +108:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=b11ede8f1aee40577413d8bbe89704e02252e3f02805fcc0ded624857ddb8280_Device=CPU_Config=() +107:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=89dcb383b48e2a4423a7c81461f282b74b1d9ab0f48f0a0427cd4c599672f3fb_Device=CPU_Config=() +107:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=67ed6a8048424f4e44f40c542faf7a2a2d2419e81aa982fe32a054af05caf309_Device=CPU_Config=() +107:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=static_IR=a6b0532b200874d6d1c57719b46f2b301c368ebc35042df00796dfb87eed618b_Device=CPU_Config=() +106:conformance_TopK/ReadIRTest.QueryModel/Op=TopK.11_Type=f32_Shape=static_IR=7798cef9c8734d0908103b3c42fd7fc791806ad61d35dc680dc43d9597c6f1fb_Device=CPU_Config=() +106:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a4e797de860d6e4dcec00062050168ba9745d3da953b9c644de654f4d2818b77_Device=CPU_Config=() +106:conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=4fe95284f224758c29c5198a8b2e6f97e8e737435d36cb94b9cdf0bca3c89dc1_Device=CPU_Config=() +106:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=c98e1e2347c7b6939804dfcfcebbbd57d4c05e8d13b35b2611912290d06107ff_Device=CPU_Config=() +105:conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=d74cf8dde02b582dc1efa697474a50738532e0ce5b40831d81d0852a74a94c79_Device=CPU_Config=() +105:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3e2e16f3ba7681bebb6b4c06788f38a40fe24e26fa3ec3accd756c87bee7d62f_Device=CPU_Config=() +105:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=43ba20ec70e156f4782e1f11a30f02daaaafb2039912a373620d845e995c97cc_Device=CPU_Config=() +104:conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=9f4d316675c933ea5d6511324e3d664440a8ba287cb2ffe768517f9cbfb613e7_Device=CPU_Config=() +104:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=997a090766babacae10464bab19af5db238eb28704c6d463cfcba48767a90c8b_Device=CPU_Config=() +104:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=27d1a1cfdbadd9a8c2d0269f6177d6aabd55320aafe9a0047e90681dcad1cbe9_Device=CPU_Config=() +103:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=d7fbbe9f8f446b009ea2de8594e4cfaad46432734cba27596e3fa721f04c04ee_Device=CPU_Config=() +103:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=75aed7fbb8f7d7e8a1281d4a16c4fe2e55160dfb9e6a1bc446913a223c5aa0de_Device=CPU_Config=() +102:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=45ce409a7078c7e732a092633cee36d6a0aa80fa9249cc98dce44e5b4bfc1693_Device=CPU_Config=() +102:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=boolean_Shape=static_IR=4da22853b6e4b853fa57b9dce8f5a26920d079a74055831d651c10f48ee96e8f_Device=CPU_Config=() +102:conformance_Einsum/ReadIRTest.QueryModel/Op=Einsum.7_Type=f32_Shape=static_IR=f3d704d4f0da6c58c39e279d727dd82fe0e59a41dbaf09a3cbaa8f591daf95f7_Device=CPU_Config=() +102:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=1c9d69e1a85d03b8599961a8a1b90af7b3b2d43bc5c4f4a6b8d5da3c22166abd_Device=CPU_Config=() +102:conformance_Add/ReadIRTest.QueryModel/Op=Add.1_Type=f32_Shape=dynamic_IR=f86f86769ec214942eaf1fdcd312a29e26308676419d8fbd98fdc485c2de0815_Device=CPU_Config=() +101:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=41c1e13447cce632ccd478ec2bf36f09e510942449b0bffd3271f3b1f0b48d54_Device=CPU_Config=() +101:conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=i32_Shape=static_IR=6eac2d4e0df77b93f566f0d226ce4972da143d2b3fd794f7d316faacce442035_Device=CPU_Config=() +101:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=bba92f0e1fe2ee647564aec64223ab2c5b32d3defae9bad5daa5a24df76aac48_Device=CPU_Config=() +101:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=6964f870fd6bf44d1d5ee5925eee8892230b8928aeee1966db73b6c4fcd5acf8_Device=CPU_Config=() +100:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=static_IR=3326cf79d426d1a669158c4db8256fdd956fa4725b0d6fb9e8ab5e5206612eef_Device=CPU_Config=() +100:conformance_PRelu/ReadIRTest.QueryModel/Op=PRelu.1_Type=f32_Shape=static_IR=659cd025e440fdc633859089f52f7f38cab5701c63c79d1e8d1837c217b8cf75_Device=CPU_Config=() +100:conformance_GRUSequence/ReadIRTest.QueryModel/Op=GRUSequence.5_Type=f32_Shape=static_IR=556de70b55386fc9a264a24a9000d075a07636de6461cc5f4cd41af639b0597e_Device=CPU_Config=() +100:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a7eb49934c05ef49a453b19adf40a9d4c2ea9477464e8d42858dc9517c30b88c_Device=CPU_Config=() +100:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a76c4cc0a1f2294a3ceb18dd5d214d842cf37c08d2e34770c66c29b44ee92e48_Device=CPU_Config=() +99:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=516b04726c16c5c01fbeb1c97f8f9d9376b80e9341d2029c634f7fe4975cc4be_Device=CPU_Config=() +99:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=37b1b14a23dbc309d75fbd98158648e1a7fd246684b96e1ebb10a75c3f5b03b6_Device=CPU_Config=() +99:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=e0641a7f9e64123d0d51a75e576fbd0e405105b8ead44a618068e77d2b4bf933_Device=CPU_Config=() +99:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=9650ac479045f70fd763f5c95d0c27c3b3cc4d6fc00b43e8ad627d16f817f342_Device=CPU_Config=() +98:conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=i64_Shape=static_IR=7c1b4dfda36336bb1a943fec9786d89e220f2a811159fe9cbed7d51186f8fdfe_Device=CPU_Config=() +98:conformance_Proposal/ReadIRTest.QueryModel/Op=Proposal.4_Type=f32_Shape=static_IR=b169d6330e4006909e4deaaf78b03e789ccd9538c5b59d9d41e05f878bb60704_Device=CPU_Config=() +98:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=263843a2c307b91ff7d59d9b21cd8b2126e985d787fc18f44df3525a6bfd71f3_Device=CPU_Config=() +98:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=static_IR=a0cee5b220a433f1d76460a1f452bfc26aae12f7b84983a063605b4a8cd0a5d4_Device=CPU_Config=() +97:conformance_TopK/ReadIRTest.QueryModel/Op=TopK.3_Type=f32_Shape=dynamic_IR=fb3cc70d8993f96508516aa7a36cdcb9973edd563c78a7d6d5ac5ca9f816e3fd_Device=CPU_Config=() +97:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=fbd54c37e1db9cd3cd3fc7c571117f65c26d9f5ff0674711a326e02ebd3f9d57_Device=CPU_Config=() +97:conformance_Divide/ReadIRTest.QueryModel/Op=Divide.1_Type=f32_Shape=dynamic_IR=f42d85c8e1388cf2cb69f9efb2970255c6535f1c3f904a9b08cc18cbea6aa6c3_Device=CPU_Config=() +97:conformance_Divide/ReadIRTest.Inference/Op=Divide.1_Type=f32_Shape=static_IR=99e405218c1a96c5f8af65aa814893d8958e8e991d1ed8dbbbd586efa589df39_Device=CPU_Config=() +97:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=9ba199e71a3ff06e6bd330e453a1e1103599902893fc267c60da9ae47575a8a0_Device=CPU_Config=() +96:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=c0c3d43548fe20fc4e63bcfc8ee6d0a70a6076dfc0ee79e31fdcecf6cf35921c_Device=CPU_Config=() +96:conformance_Reshape/ReadIRTest.QueryModel/Op=Reshape.1_Type=f32_Shape=dynamic_IR=634db7c7a580a605f3375f671b3bcb2a1baf5856b32032d2786a5f8061df63c3_Device=CPU_Config=() +96:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=9cd66958dfe8db471d48d6ea35f1b4547a413fcdc6c61c804a456befcbb09d15_Device=CPU_Config=() +96:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2386bb6412e51aa72e9426e12f9f2b2646e7074413b33fff8d95dde141ee12fc_Device=CPU_Config=() +96:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=060423427a9100b6a38aad12a83043441f8af436c1d2502350ae867f45bd721f_Device=CPU_Config=() +94:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=3dcf824c36c868d06d715e3fe24587c31eb7cad18ae9f9e044c7f6abfd261651_Device=CPU_Config=() +94:conformance_Greater/ReadIRTest.QueryModel/Op=Greater.1_Type=boolean_Shape=static_IR=dce38966c13ac9886c7480261e3483d822355a9bf3835d00795e7627744a60d7_Device=CPU_Config=() +94:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=f06ff28476f886d4298a83d39f88aff34399d5cd589e0a6d6395e00b0ad96876_Device=CPU_Config=() +94:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b9bab6ef11eb6ae637924a902a40dff310a45916d50c8f0a4ec667c8d6bde6a6_Device=CPU_Config=() +94:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a50644dc2d133df429ff4aa6a19ca9bafbf41d2948522e584fc5f417ad16d76c_Device=CPU_Config=() +94:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=3209c1cce78c7b856203c0a5676f6fad4d098a3146c7305ee3c0471b3be2e3d5_Device=CPU_Config=() +94:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=31e75a7408a46928e1a3a8babe3da21bccc6d442f87291c0b2bf57b29e18face_Device=CPU_Config=() +93:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=i64_Shape=static_IR=f26c1f41ef689dde33e9d61b0a1066788b8397ba6a170f5eb1362726ba9c0868_Device=CPU_Config=() +93:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=e15d2825807b2c7fda150b7b7b4e2c6914fab2d4af4313e959abaff56dffe6d2_Device=CPU_Config=() +92:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=0495648ac153ca7bb07160aed49b620b855a89b368d363a22fb45ff3428349eb_Device=CPU_Config=() +92:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=9fbf4ccaa68a81191afe2432a2212ee1a559df380d602459ebd2d0266053d82d_Device=CPU_Config=() +92:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=7754523e2d3739481e051eb21a4347f2d157e94db3c37d47f0006ecd8d77d512_Device=CPU_Config=() +92:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=755b95b2e9c5cb5da4d4cd2c46ced327e10dbfc67a0d934667177b5fab73d431_Device=CPU_Config=() +92:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=1eb25d18fbd1070f2a8ff803d76077d092d493f9e9df80e93e2f58f3621a121f_Device=CPU_Config=() +92:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=180e9c4ce23626df8010b5b79a28ecc4c6c75b65dea91938fa99025a65411239_Device=CPU_Config=() +91:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i64_Shape=dynamic_IR=bc8918b82285bb58c2cf1b4b60b023262426de4044e0c2d50ae07f4b22ae0eb0_Device=CPU_Config=() +91:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=08ba7fbf736896f373ea81dd727940aefae22a39e217e84dfc5617ed62133d10_Device=CPU_Config=() +91:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=d671a241de6d46bd5562def47a92602d2c9ba076568feed303765168433ee89b_Device=CPU_Config=() +90:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b3cb0ba09807204990d7e1635ef35fc96aa10330de2ffefd95f6483e68dca532_Device=CPU_Config=() +90:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=718d6ff3b19f498cf4edeb9f7f4a7528fef578dd6fc7edb0796d476505472e46_Device=CPU_Config=() +90:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=2fda32f5fe8957d151306845ffd0f877b2efad70f7bd4921fab2fd770d78c2a8_Device=CPU_Config=() +90:conformance/OpImplCheckTest.checkPluginImplementation/Function=Swish_opset4_Device=CPU_Config=() +89:conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=static_IR=c7a696f3217515ef4ff5eb46fbd15af6533f0fcd268398fbd434f105c0a11328_Device=CPU_Config=() +89:conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=6838901bafb44e26f73134e2c0eb2be8f1f777ab794ae340d61b62d891ff3d59_Device=CPU_Config=() +89:conformance_Gather/ReadIRTest.QueryModel/Op=Gather.8_Type=f32_Shape=dynamic_IR=f73224b14c094974e582d3d903cc332f5c1da138368692e5d0be93127f1bf753_Device=CPU_Config=() +88:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=4d569fc3e7d2fa1724c99fec62e4f31fb000a6f5c306273c404e2b449761feba_Device=CPU_Config=() +88:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2c20f6aace24bf601953b848c173ad475502b91b667c903638acf41fb9a67d3a_Device=CPU_Config=() +88:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=e03d85019ea733c10b7ece4721036f3aeae2e60179d9b044d34e862608fd36a1_Device=CPU_Config=() +88:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=285bcc240dec2c32e171f3866ea33107a109566fb8ef39f0dd84e99664aaf8df_Device=CPU_Config=() +87:conformance_StridedSlice/ReadIRTest.QueryModel/Op=StridedSlice.1_Type=i64_Shape=static_IR=edb5dc5a42b36879d5ced77fc2db7d8b331c888534602893ffb277f742da1005_Device=CPU_Config=() +87:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=eaac8b3d6a4920fa2ac101965805d140502fb409e230821d5c2a370aec15eed8_Device=CPU_Config=() +87:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b4c737aec2f47947d1afbe26d9d8cd124c6fdd24e30cab1f563d91310d1b62c7_Device=CPU_Config=() +87:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=1180dfe50d43ef6b95980bafd3b84816f6d249f8341b03a6f67d20bd8f8ba6a4_Device=CPU_Config=() +86:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=9b4725171957a420a98f908742f18062fbcee198871d527ab5b4d939005ac4e6_Device=CPU_Config=() +86:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=95bbf8a23b19badbde31e9ae7f016aa436d50d797f59bd736e220030f645bd9b_Device=CPU_Config=() +86:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=547fea7da34d5e65ad7ea069be003753e9ef281110c80dde11520bc350c4ca14_Device=CPU_Config=() +86:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=dd9fee8f7cd289b97050e22cb465637c6439230d0d3ebcb20452eb544b40617e_Device=CPU_Config=() +85:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=95ea118f8053f6bd18c8f34bbc475c00921bab5dc3af177492829d5cba16aa39_Device=CPU_Config=() +85:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=99720c46a11f3e84906fd9327f25b187f328c6910868ac89738bc67ce0d90b64_Device=CPU_Config=() +84:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=c5c5d09465cec7f1477d5e02f3f1c4cf593c71aa090532c4e43451fedde7c2c5_Device=CPU_Config=() +84:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=27b03da9a0155039856b1bebe424d10d1b8ad768747cbeb851bfc0463edd5cb6_Device=CPU_Config=() +84:conformance_Broadcast/ReadIRTest.QueryModel/Op=Broadcast.3_Type=i64_Shape=dynamic_IR=2058e018d32d8a73b2bf6471186e555c47e2c1a15ceb4131bacc43110bc17d30_Device=CPU_Config=() +83:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=1ceb1c4ba1a45cbb5cabe7cb4b416cbfeb93f24533c8123e4c2315cc7e9f40a5_Device=CPU_Config=() +83:conformance/OpImplCheckTest.checkPluginImplementation/Function=Relu_opset1_Device=CPU_Config=() +82:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=26d09bb7dc7ce95aac39023ac90bd083da9101b9e7383af49e7467e4f0571f2e_Device=CPU_Config=() +82:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0fe2c94f8e2ed43edc0deb92ffe044a089c6920f886dcf6985ee910e7a4ffaed_Device=CPU_Config=() +82:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=dynamic_IR=327d5120022c693531fe0f1f42429b1ad78f36cd5e414f1c8bab7d0c2ced62f7_Device=CPU_Config=() +81:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=2435ff5e2ac06afcf99563821fa2a2a5e4a9456cb3f74154b3eb364a6f0e450a_Device=CPU_Config=() +81:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=42f3f3a5b34aacb93147f9c77ad5709cf7436ae8cad9318434a9b6ff6852982d_Device=CPU_Config=() +81:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=0edbc14a5d5ac1265a4b880131348aa16e284012547556ddedb36b185d833284_Device=CPU_Config=() +81:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=04db488d856ff6cf4f04ad155967df95830796ad733e589f42c3862224acd874_Device=CPU_Config=() +80:conformance_SpaceToDepth/ReadIRTest.QueryModel/Op=SpaceToDepth.1_Type=f32_Shape=static_IR=9296c80cc93d8ab7448140ad2f31b3b47a0759c383d1bc045704985503732195_Device=CPU_Config=() +80:conformance_DetectionOutput/ReadIRTest.QueryModel/Op=DetectionOutput.8_Type=f32_Shape=static_IR=92c3646daf445784fceeb022afba2831938fed34660bac5445f033a1efdccc34_Device=CPU_Config=() +80:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=a093f44f22a851366eec46b6ed80fcecd2a4a96ca797c2caf288922a2fae1fd1_Device=CPU_Config=() +79:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=b63e04695c1e6145a3fa9835130a4919df52ff3a420d3c800bddff65af7dd76e_Device=CPU_Config=() +78:conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=dynamic_IR=f0ae8e6b136d1db7e5e7748c03eeaed6907460d3d3941fcb1a6651cff61be113_Device=CPU_Config=() +76:conformance_ConvolutionBackpropData/ReadIRTest.QueryModel/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=27a43bf8c20a81f1e244ace4c53f7cd9343a2603ba2c8b50bb041a4046ae6ecd_Device=CPU_Config=() +75:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=79a6d2a402cdd74cf1277a57ff95b71d61384da394ad2a4d9ebcf422eb5c3258_Device=CPU_Config=() +75:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=12e7ea655764a32069a93a3f7ab147983bceeacc8a2bc88fbb2def005a1596b3_Device=CPU_Config=() +74:conformance_Convolution/ReadIRTest.QueryModel/Op=Convolution.1_Type=f32_Shape=static_IR=d932ccb58823509e768be954dc85ef1162d9456db17138d650a2a883e31b99ed_Device=CPU_Config=() +62:conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=dynamic_IR=8bc8753f4d26c5d1f2ea481937dcce0f5b78971f18f5ebb258f49d4a0d86a333_Device=CPU_Config=() +-1:conformance_ScatterNDUpdate/ReadIRTest.Inference/Op=ScatterNDUpdate.4_Type=i32_Shape=dynamic_IR=91f59d10b16e7305a651b8ee9480a0068225d6cd56026139e35ba69b9f84b00f_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=f097978a7f18dafc7577a9dcf2306d82d397faf1bedb106ca3de70b3d9ada557_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=ea63b1a277de19e725624c4d57d7decf2a01f9764510b0849e0b9dc49ad24fbe_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=d84c7cd2094853de1602906a47c4265442c727a532d85199772fdfaaaf7007dc_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=c5ce2b16d47cf93b073c2ba13556fa9fdd1b6f1dbe6387a50b507a40ab1d1c1e_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=af272d91ad67b0c830585f82cd83729fd832744707be8a2be800f76f3faadf6f_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=aed658319c31cdb1d3a47a2a93c7a4f524d9af8540e2019af10e8e1cebc3c2bc_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=a05339454f3f2a599ee9b041f1f01a124bad7d7e5fc1e6d133e00e43d002a086_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=9efb5290056ad2f5ee663d4f67a89edbcc4936e512748bcbc0e9f3935b690b1a_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=93f586b65926f2fb89cf5cc3379013f6df6964cb757fb3396060277dd393bb12_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=8457db0b4ea6829aad99afe4c31b7004b57daef4cd0ae02ca00090cbe5feb72d_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=6b0185f2e61c010924a76c5f136ed90d0e154f507028c500ee78bdc5a7ed65ac_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=570d13e19f312cf288f0f5d651f051c01f0fb65999579c3b06960c2936a18181_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=445aa399303e82b524cce3e0b3522cfdb57200720b3b72584c785fad157117b1_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=3cef1c65fc41c5f96e90007517fb5c911435e8d8ae7db1a1398ae63c2525d6c3_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=30897cde05f349bface3d90a8d730da4c4c3e5133c59495d59258224dcc29ae6_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=1b13b40884ddc8a2afdfc9bf351627746534303122dd4e0c2c5fdeace9e89e7c_Device=CPU_Config=() +-1:conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=100761a180c245ecb5f949d8a3ea0d4e26d7bb15d679ab797362f695bff03be9_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=f110ef35c9642ecd941cd85a67a12b616353d4a8cd33f9770d532759e2846255_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=f0edc45979b98d4401eea2c345bbcb794721dd3cdbfb3963be5a2842b27ccc5b_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=e4baf41ae9a77441993eb0f95c3d7335e9a719e5eac8b1ffaf60d8f515f769a1_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=e48a363cfdabe0b62509e21641bb1cc88edaaa7d2eb82bf3ce747cab8355ff3b_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=e3a5a7f1a73793457fae9520ae122c6bbbfa92f1daac0ef214e47a2ec7ea18e2_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=d8546655166c322e3049ed3a71725c8e89901212007c44c8029ef8379de96db6_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=d36c5ab59d2ab873aa35b35a952e061568edd4ee8e64c1ab200bea63472a97b3_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=cedd3bc0f0a8e20fe947135bd6ab9515283275867e1b837d36f2fac72363f449_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=cdf79cced0ed380052910c95b09b4022841474c87d06061f29791ea2ad9813a4_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=b6669eb568f36e5d649ae67afdecaa481064561d7a71f1aab592968aca7d8bb0_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=b0e3e542180f521cfd4651ae18d3a58962751d3c6de9265240be6d4fe9745bf0_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=b06553539d6e27195623fcbce51610b5671dd70700bcf61703a1f7a8bbc7c5d8_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=98274ec3fc894754adaacedf83b4b7da373e639a51cfa7dc348412898e45e8dc_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=94b08f3c309048124724d9de0d120698fed90ff0237b07c4a4a2b7ccf843d76a_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=939b665ae35f9a384e3119dc3bdc1904b105de495d262648282c859b0cb4c9e3_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=91b6cdd8a7664759217ce0b84a8baed2105bca0ae9876e9efd01c074aa27039c_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=8de81b7de05bdd151427e1b5b03a8b4222284dafd31f9d4b1c3d0917995e9310_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=8216637727ccef527454bfdea7ab22ccd4e5e29709494bf96dde5af3b4a7eaaf_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=80cdfe1797800671134d77fa9c7032cdc1b19b4905fcefb11399610216f6e623_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=7cfae687d408da17a0405d88f47e2b6623a608861114dc76018b8a2142453139_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=67a5010efb429e6dedf35481443b40a77cb01c1b4fb51ec5890fcfcb010fd6f7_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=451a3b635d3882a87cc0d7b3f6f74197c08b708669751bb11fef93da9604e276_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=36f17a498b10c140f8a319d82e5c8f2cc3cdb7eb3be9f82f7ef35d9c9470231d_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=3141ed71fe3efbd7fb026a006824ec24e4673d8b97d23dce275548e92eedad91_Device=CPU_Config=() +-1:conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=2463ef4b8684fd6b391fca0b123328e1d695b47017fe94ffe5a419a3c22ce93e_Device=CPU_Config=() +-1:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=68dc9d01cbbb3546ce77dbc77d705f33a6a48cb6dca9a323f5bcf02b9d589993_Device=CPU_Config=() +-1:conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=17be9a027c25bbfbc08cf4dd106ee25d649680b30d16c74580fb3f8fcab54baa_Device=CPU_Config=() +-1:conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=dynamic_IR=7581193e4db43b0e50c6a1a52f8b348d88587040bf38d1b780ac660781e3d3a4_Device=CPU_Config=() diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv new file mode 100644 index 00000000000000..2a7b644c116539 --- /dev/null +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_API.csv @@ -0,0 +1,27 @@ +Test Name,Fix Priority +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={EXECUTION_DEVICES:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_TYPE:},1.0 +ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_ARCHITECTURE:},1.0 +ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=CPU_,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/3,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/2,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/1,1.0 +ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/0,1.0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_MMAP:YES},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_MMAP:NO},0 +ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={COMPILATION_NUM_THREADS:1},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=CPU_properties={OPTIMAL_BATCH_SIZE:},0 +ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=CPU_properties={MAX_BATCH_SIZE:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOADED_FROM_CACHE:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_UUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_THERMAL:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_LUID:},0 +ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_GOPS:},0 +ov_plugin/OVCheckSetIncorrectRWMetricsPropsTests.ChangeIncorrectProperties/target_device=CPU_properties={ENABLE_MMAP:-10},0 +ov_plugin/OVCheckSetIncorrectRWMetricsPropsTests.ChangeIncorrectProperties/target_device=CPU_properties={COMPILATION_NUM_THREADS:-1},0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_,0 +ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_,0 +ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=CPU_,0 +ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=CPU_,0 diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv new file mode 100644 index 00000000000000..1cd7e681a499f4 --- /dev/null +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/github/skip_configs/CPU/expected_failures_OP.csv @@ -0,0 +1,211 @@ +Test Name,Fix Priority +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=bf235091db192c86756347e70771b4b00a6ac2c8852b93079749ba718d57d022_Device=CPU_Config=(),1.0 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=77e1c522d9ea4975c3071869b7b485038bb4035c9aae6f5d44291f60ae253a0e_Device=CPU_Config=(),0.575008 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=bee11d430236dcbd0fb5efbae712d8d89d84beeb89e0ee60e0ba3ba9512079f8_Device=CPU_Config=(),0.301142 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=1be99c00176df777bd8cdbd9f74ff064237f55053dc7490050d692274182182d_Device=CPU_Config=(),0.273069 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=df8ed5b481f6b03ca63572f2059d20911d3a7757f4c032455bef9933f2c1dc35_Device=CPU_Config=(),0.204151 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=eb966d8fd7e1301280e6ef709dd785d210a35a1346eb88c3f38379bd96036ce4_Device=CPU_Config=(),0.188153 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=90f882a97d637e527900edfb1b7c277b65544832793d08efdf8454be21a2f496_Device=CPU_Config=(),0.136236 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0ffc7d7836be264b6d9f26daa71a8c7100ae6bc6fa1af23614a2736226fbdf0f_Device=CPU_Config=(),0.133748 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=351e48db45e09ca6c4bc54a271eda4cb2ddd69ba43f361b9915a6588913768b0_Device=CPU_Config=(),0.0716183 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ab760f0d90b0fef133a0555cb2a5d40fb525aef88e6568c5387a87d7e82f67f8_Device=CPU_Config=(),0.0684052 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=afc2cb913bcb4e4badd203c9cdf491ea1e6ed4f1cd835e7507889a9bba25b958_Device=CPU_Config=(),0.053864 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=0a16d9d3d8d553c2c747b2c68b12eee3dcc016e29e597992cad8f83aff0aa759_Device=CPU_Config=(),0.0497053 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0f623457008d91f7fcaead549e4a3f90a5ca77dd7c52fba19906f559c34b333b_Device=CPU_Config=(),0.0350581 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c9352ef8b6aae01025051f9c73f023e7b5a13f8987f81bfff4ce0ff9725c21b5_Device=CPU_Config=(),0.0338749 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=469d09b261b88011c82288ea622dde06d63805eb41dc256c901b0d206ac5780b_Device=CPU_Config=(),0.0227255 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=e4523b73661dc593224b91713f8f20f1c87513a62e3b8ee8265e1136eb74f9ed_Device=CPU_Config=(),0.0218253 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=4d2e12e00779d116e2192ca77f2be233d76bdd5ce366ddabcf436cc205a9f811_Device=CPU_Config=(),0.0217789 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=539610c21b2839b71cfecbb15b7b7145f9fee8bfef8ed9e1d73aaad2de661496_Device=CPU_Config=(),0.0198891 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=d77f317dd01a80955f901d0da2930aa1f82531848f4bf22d839c60a84941e6c4_Device=CPU_Config=(),0.0196749 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f977fc239a0230860702f8c1971bd424f10b978bb03937668c37edee6777f12b_Device=CPU_Config=(),0.0193262 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=b08690e29e0249d5a6a30f2ad886ec714067df994bc4d8cbd82d0d02af6335bf_Device=CPU_Config=(),0.0190276 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=d51bc4204bb6079e79da8d0cf95ab8a3454c90a040aee0fc6fedb00f0795c577_Device=CPU_Config=(),0.0180446 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=bca72a16df5bcf81d10dfbbb0e53aceb2a8a70ec94d4247d47333679de7214c5_Device=CPU_Config=(),0.0161398 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=776b4b6d6b102654bbc08df901869e4d16af505a5dff7f2d27686874bd20ccc1_Device=CPU_Config=(),0.0141907 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=969c6a651dc204576d68d7d893ad2dbff1f7c74803b1763857d41aabdd19a72a_Device=CPU_Config=(),0.0133005 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f208ab78a0ef0497856952f499578a17818269d066f4281183ef92ac2f9ce449_Device=CPU_Config=(),0.0119654 +conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=ae538f87e4d49bbdc53184fcaa6082eee131a79b480dab9b46e12976d01ea913_Device=CPU_Config=(),0.0113958 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=ae538f87e4d49bbdc53184fcaa6082eee131a79b480dab9b46e12976d01ea913_Device=CPU_Config=(),0.0113958 +conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=ae538f87e4d49bbdc53184fcaa6082eee131a79b480dab9b46e12976d01ea913_Device=CPU_Config=(),0.0113958 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=69c68c20edefc8789e62a7cc8a0f8fe7e649f884649ac30833fb5a2ce43c4098_Device=CPU_Config=(),0.0112611 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=59eaeea8102953f8ffe85ed1ced2a44ddeed77ec237608b45be0573bb32b1104_Device=CPU_Config=(),0.0106392 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=cc2f28d736d3c67fdd13fbea9b8cef7c0b075f06b37034581fc732966421802f_Device=CPU_Config=(),0.0103985 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=017d4b1dac18731e05634414942698ecbc750e306eb86e773ffe5007bfa9feee_Device=CPU_Config=(),0.0100867 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=486cda3fac543c53e385e5b26f0932be2c2c67d937dce02e9376ba2956321e5f_Device=CPU_Config=(),0.00993054 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=caf20ebc8d39cb23a107a03e819e8ee5b2807fbd311fe65453446251e4b6a611_Device=CPU_Config=(),0.00991679 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=3c03ae2ab13dfccc85d9909840eafb6a291b978e9bf859f27886b4a0d3e87ffa_Device=CPU_Config=(),0.00984804 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=05690f7225eecae70805d45641cd02c02c46bc61f9fa4cf91d3ec7ce94f6fd3f_Device=CPU_Config=(),0.00900461 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=b2ca18b9d9f9e7c05f66a1f197b65ef9ca1d59319ed5f30d4eadf6f8befcd9bf_Device=CPU_Config=(),0.00861005 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=12e571ef61251520c35bd8c0429b1ee71277033ae88101f08dd769a300d86c5c_Device=CPU_Config=(),0.00820869 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=f9f031e1fb61fcf87468eb1f4b2005e7cecc5f073eca95c161fe62fbbfc983f4_Device=CPU_Config=(),0.00807685 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=af1f864a9f4bc94bdb713b0fed3f4c39dbd290cf7464f3cee8f1aded11981d4d_Device=CPU_Config=(),0.00585062 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a917525b3e5a37fc2be5f35fd5a3d50b57627cd9b985333e082b169c29f848f3_Device=CPU_Config=(),0.0058006 +conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=dynamic_IR=2d924ba2d56e6b5c7423c6d622e7bd250ab275e0a0ab4745e232046a3223ce7d_Device=CPU_Config=(),0.00568133 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=910dee337e395f94d7673f664a3e58647ead8bcedf50ea1439250bdfe8da25dc_Device=CPU_Config=(),0.00556665 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2b59c9f67435c46699dc1c66ee7ddbdd333bfa544d0aef7bd1389db2635868c7_Device=CPU_Config=(),0.00499282 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=37337436d0d481c689caabec3bbc8f21ecec65560c70de4dd1f5b0ed9e444bf9_Device=CPU_Config=(),0.004941 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c1923c409aa2da9da8daf339b8b26be9ec6a106e65098182015c21881b0b5379_Device=CPU_Config=(),0.00482108 +conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=a3e1aaa7054d4d046bab4614737c931b25a574051a2f8b79799aaf6fbbd2c2e3_Device=CPU_Config=(),0.00426381 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a3e1aaa7054d4d046bab4614737c931b25a574051a2f8b79799aaf6fbbd2c2e3_Device=CPU_Config=(),0.00426381 +conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=a3e1aaa7054d4d046bab4614737c931b25a574051a2f8b79799aaf6fbbd2c2e3_Device=CPU_Config=(),0.00426381 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ca3d0cbccca665493e85a757798ab5e12399ad295466cea744c7a2d278c86c97_Device=CPU_Config=(),0.00424488 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=c359e1ea71a80fc519e8a2dacfc7f52f5a94a1142058641b0434f40866875c12_Device=CPU_Config=(),0.00401399 +conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=1a29918856ba0f88e99346fda6e6c21ff2bf129f5599d8a1c8611346ab41f2f7_Device=CPU_Config=(),0.00373719 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=1a29918856ba0f88e99346fda6e6c21ff2bf129f5599d8a1c8611346ab41f2f7_Device=CPU_Config=(),0.00373719 +conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=1a29918856ba0f88e99346fda6e6c21ff2bf129f5599d8a1c8611346ab41f2f7_Device=CPU_Config=(),0.00373719 +conformance_Range/ReadIRTest.Inference/Op=Range.4_Type=i64_Shape=static_IR=9402d607ff481567bf322dcea9aa597387a195b9d3756ff46de81c3ac2737a49_Device=CPU_Config=(),0.00251179 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=6adce7c66c1630295ec8938bcb429f20b628b0ceed938bf81ac0fca8580f8d34_Device=CPU_Config=(),0.0023531 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c90ac17f02f16c647a0a206326f24ac348a0f8a7787037486e52ecc8c091818e_Device=CPU_Config=(),0.00203846 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=8f622d39d560109549e99d37f3c9cb476f4d69e8525e7a0ad8fce6fe79a6f982_Device=CPU_Config=(),0.00199296 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=9c32e47cd885805256c3e3053412f7d8c448762b4b509507f6e4dd78e2aeb56c_Device=CPU_Config=(),0.00186797 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=87baad85c649084e386ca502375581e9dc47c68c076bacae5e5ac1ddbaaa7830_Device=CPU_Config=(),0.00173828 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=d27e8ca8280dc9219f4b76a2c8f47cf526b32a58710126c7549e2c04026944de_Device=CPU_Config=(),0.00170877 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=34784838e98e93a6b024109ef3a8a5d4e1fc7f89b98ca23c81cf085f19acc663_Device=CPU_Config=(),0.00150234 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=e2da6d928938b6445170cd69fd4a7aab40130a560cef3ffa2d268a428f56fcec_Device=CPU_Config=(),0.0013649 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=81954ff76e3fd04ec3b3e3c26e28a79ac259c9b255f90ebe3cc0772fb673874e_Device=CPU_Config=(),0.00133616 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f729a1e882f1894319a357f6c5474552e883ae9322cc3dc399b3a292b13e6de4_Device=CPU_Config=(),0.00121233 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5d7273e7772d3578b3c8dcefcce25913c8e843b7a1045722f80f9feed4770ba1_Device=CPU_Config=(),0.00120442 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=aecc8a062c16343ac138f351d774858b523e42d5a09ab67b1b61e64fe62e73ff_Device=CPU_Config=(),0.00111048 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=6095afd484c177267854bcab902c3057a2a1bbf37b2188d3a31fd2cec48de2fe_Device=CPU_Config=(),0.0010979 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=53828d433bfa231cac709949db0e4ff72010e5cf9df167ecda7ac72bd5a69e10_Device=CPU_Config=(),0.00100779 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=dynamic_IR=c5ff38504273a230addadadf4fef517ef73154c5f9f10ef2ace961b1dc3cb794_Device=CPU_Config=(),0.000946854 +conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=static_IR=3c200607c5e2b90b5d75a439011d83643ba042c276c3033f58b3409c068faf8a_Device=CPU_Config=(),0.000944591 +conformance_subgraph/ReadIRTest.QueryModel/Extractor=fused_names_Shape=static_IR=478861c92198ee8679e3e43476abfe79906c4ead6ee80af975af365829822025_Device=CPU_Config=(),0.000907351 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=478861c92198ee8679e3e43476abfe79906c4ead6ee80af975af365829822025_Device=CPU_Config=(),0.000907351 +conformance_subgraph/ReadIRTest.ImportExport/Extractor=fused_names_Shape=static_IR=478861c92198ee8679e3e43476abfe79906c4ead6ee80af975af365829822025_Device=CPU_Config=(),0.000907351 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ae0e669fbddc34e8aaaefff248959e3fe53196e68bc1b3a9e66be16a495d7cd2_Device=CPU_Config=(),0.000833132 +conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=dynamic_IR=5c05bbc013fc857a8f2b340df778f3ad5bdbc1b7273cf41b23d6da410205c612_Device=CPU_Config=(),0.000823042 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=35525421f74fa15c49098ff1c7faed4fe65763d72ed13add33c6fe8d4dcfb0ed_Device=CPU_Config=(),0.000813329 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=77dbcc61a98e0bf3c1bdcbec543818a8a959751f10b8ec1489b66570ff4e634e_Device=CPU_Config=(),0.000694344 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=f0ae8e6b136d1db7e5e7748c03eeaed6907460d3d3941fcb1a6651cff61be113_Device=CPU_Config=(),0.000689831 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=5b6503944921be5fa3feb0b7647c6715465af16702c645dec4e2f2556d8d679c_Device=CPU_Config=(),0.00068405 +conformance_HardSigmoid/ReadIRTest.QueryModel/Op=HardSigmoid.1_Type=f32_Shape=static_IR=cf9b80bd696164fc7c8f077eb532fffc455eaf648589c54943cd1b5668e2f077_Device=CPU_Config=(),0.000669094 +conformance_HardSigmoid/ReadIRTest.Inference/Op=HardSigmoid.1_Type=f32_Shape=static_IR=cf9b80bd696164fc7c8f077eb532fffc455eaf648589c54943cd1b5668e2f077_Device=CPU_Config=(),0.000669094 +conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_Shape=static_IR=cf9b80bd696164fc7c8f077eb532fffc455eaf648589c54943cd1b5668e2f077_Device=CPU_Config=(),0.000669094 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=42afa027ada245d36900a89c54a870ba5fc7fe3cc3bc0fc7dbda23af3e5111d8_Device=CPU_Config=(),0.000662206 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0d74ee98934e32799620ac90fd3ae8335bca026b9225782458949c64139d89c3_Device=CPU_Config=(),0.00064385 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ede6f5f8a1d9bcfd1979965f575c8f267870e0d6a5d3a62d229ea029893525b6_Device=CPU_Config=(),0.000641885 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=0e58762b5cd9926391cba6f63db3c7db49285b900ad0abc93b4d05d4baec800c_Device=CPU_Config=(),0.000588452 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=8978b8e985b54cc12e2cefa8d9097f4a3a03d477129230b6c7e3daf8112e2c0e_Device=CPU_Config=(),0.000557934 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=ade98724a678a73bf789fc539dfa277031242ea3a694227dae29c11b45cdfb9e_Device=CPU_Config=(),0.00054894 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a9c40d7a1ada834400ffbdff779b9970c83bd576891dfa7f637182cadf9e9681_Device=CPU_Config=(),0.000540683 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=c6b8f476c9b5cf1a102cb33d5e68033bb074a520d01e360ff46b3e479addf407_Device=CPU_Config=(),0.00053922 +conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=static_IR=35ab7a27cb56964d974f5e1b55c1ed76d7f9443f97da0b977370ca9fc414e093_Device=CPU_Config=(),0.000537253 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=08fa156c3f25fc8836356fd1a8edb73222f9fe2b3476c0ae32a26636b5870247_Device=CPU_Config=(),0.000492921 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=5aaa81d6f07ed880b1e93a0fce7b6aab4c3c88bfb1b4b6cda4ead15eb145af63_Device=CPU_Config=(),0.000487154 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=fd97b6aab7b86b0dd2f8c0ce622601e80f3b864d23d7d4f61d2dfa42195936b1_Device=CPU_Config=(),0.00043813 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7895fea00309326a052d47dbd2f9e562b86bb9d0501f2a2fd8843a0340359b67_Device=CPU_Config=(),0.000427698 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=138e0258106faf2065b52655adfb8b45d49b677f9cd04850bc5ac9335a9d16d7_Device=CPU_Config=(),0.000426517 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=14c8a8bb712c40d63edf76de9a75dd1dcd53a2df8c6098c80ee760119966f364_Device=CPU_Config=(),0.000402047 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=ba28829f211d64d6d4922682b85f1bad6a3c28cc30b4f9651186b1e8fab39fec_Device=CPU_Config=(),0.000383712 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=c0cbd07b1517991754ef075284aedef586dd4b250e2b867379dacebdf99ce1e1_Device=CPU_Config=(),0.00036287 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9730f247ba4a13fb03274850f295de500156107d33db957188846fe49c2f4566_Device=CPU_Config=(),0.000357687 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=97f8a2367c5590d5fe7e405d32ec48e5318a6cb3c0862f2b0e8705a7842e8105_Device=CPU_Config=(),0.000319329 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=1a9779319a9cc5f21b6005ebb9b4517e0bb1f868ef8e568453a58c44474c40bf_Device=CPU_Config=(),0.000285942 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=9e0cfe97e08c7b2974ef224799ccaa3fa777802a5fd320a089e527f00a594dbc_Device=CPU_Config=(),0.000268331 +conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=f32_Shape=static_IR=05e89f7690a9c7d235c753aa4af28229a44fab527f44ff4832ebcebf0c9debfe_Device=CPU_Config=(),0.000255276 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=7340b50308272b86e1b98e6962ee280e9575fc0d7042b9cc076c530268e2ca74_Device=CPU_Config=(),0.000237433 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=dc350b3fec164adcb096b8fc922e342cf7b0c6f7a4aa25074bec5566225cff01_Device=CPU_Config=(),0.000215236 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=af272d91ad67b0c830585f82cd83729fd832744707be8a2be800f76f3faadf6f_Device=CPU_Config=(),0.000207265 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=9efb5290056ad2f5ee663d4f67a89edbcc4936e512748bcbc0e9f3935b690b1a_Device=CPU_Config=(),0.000207265 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=570d13e19f312cf288f0f5d651f051c01f0fb65999579c3b06960c2936a18181_Device=CPU_Config=(),0.000207265 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=445aa399303e82b524cce3e0b3522cfdb57200720b3b72584c785fad157117b1_Device=CPU_Config=(),0.000207265 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=100761a180c245ecb5f949d8a3ea0d4e26d7bb15d679ab797362f695bff03be9_Device=CPU_Config=(),0.000207265 +conformance_subgraph/ReadIRTest.QueryModel/Extractor=repeat_pattern_Shape=static_IR=13f3d097d5e17c2add48d6f9b6f86454a1b521408d7fb8252e3638d9f17ea6fb_Device=CPU_Config=(),0.000189037 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=13f3d097d5e17c2add48d6f9b6f86454a1b521408d7fb8252e3638d9f17ea6fb_Device=CPU_Config=(),0.000189037 +conformance_subgraph/ReadIRTest.ImportExport/Extractor=repeat_pattern_Shape=static_IR=13f3d097d5e17c2add48d6f9b6f86454a1b521408d7fb8252e3638d9f17ea6fb_Device=CPU_Config=(),0.000189037 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=1e56a3e2379d29d81af93174e56ef91408af41dfc085d4851ff58dbec781b8fa_Device=CPU_Config=(),0.000164938 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=14f15558b2c7699f7877a9e04e1e0e7d2a2d7e1307aaca519a98ea5f39afc415_Device=CPU_Config=(),0.000150034 +conformance_HardSigmoid/ReadIRTest.QueryModel/Op=HardSigmoid.1_Type=f32_Shape=static_IR=08a7845e89900ed725c984b42b6bc262a7f7956ec50e0a7bbdfe8e4a34d584e2_Device=CPU_Config=(),0.000138293 +conformance_HardSigmoid/ReadIRTest.Inference/Op=HardSigmoid.1_Type=f32_Shape=static_IR=08a7845e89900ed725c984b42b6bc262a7f7956ec50e0a7bbdfe8e4a34d584e2_Device=CPU_Config=(),0.000138293 +conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_Shape=static_IR=08a7845e89900ed725c984b42b6bc262a7f7956ec50e0a7bbdfe8e4a34d584e2_Device=CPU_Config=(),0.000138293 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=a4ab938f33d0b58425ed98a56789d0ee94beeca13ec7fe3358c9d3751ef136a5_Device=CPU_Config=(),0.000129335 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=3e1e1cd684c1bcfcf06febedcb4eb0f4f62b5c0920098fa0715c828e9a9761a7_Device=CPU_Config=(),0.000104221 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=6289210c93bab9199850c9aef5ac3144ad0a900007dbca3e889a9f875318e9b5_Device=CPU_Config=(),9.94375e-05 +conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=a1b6d340122e8e3a7a665c69fb11b3c7b460eae79ec81ed3c32e878d10d5c3eb_Device=CPU_Config=(),9.36129e-05 +conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=0f670e49f962b0a7abc6b4f1fbf9592db592a6a78eb3e083dd4027b9f9607430_Device=CPU_Config=(),8.71821e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2435ff5e2ac06afcf99563821fa2a2a5e4a9456cb3f74154b3eb364a6f0e450a_Device=CPU_Config=(),8.34273e-05 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=789949951bc3afd20fdff943ca2a706f79eb4f95be60086ddf632b43c3e401e6_Device=CPU_Config=(),8.2448e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=57c57d85bad2b76d3d65d88baf2b3677dca6e5d534121e87efd618efbe5b1547_Device=CPU_Config=(),8.05701e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=dynamic_IR=deec30214c79ceb43a503bf521937a2bd554588775195d0e6302c521cd2b55ab_Device=CPU_Config=(),8.00406e-05 +conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=360205b273a323d2cea16c9ac98847c904ed6cabb2412d3b49c27fd2eec52ab1_Device=CPU_Config=(),7.97528e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=f99a212a117855e6e2dc4a338444a8ecee441f989638f7a0700ce24e037d29e3_Device=CPU_Config=(),7.90737e-05 +conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=static_IR=d296b02cead8f38f8a2c9fa73ab8103d3050549c92fb807b040dd6e3bbd7e2ff_Device=CPU_Config=(),7.86775e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=bbe05f014b2e4602f4e44d9c07795321404d2459bf782d2dd406de14bd2bd523_Device=CPU_Config=(),7.59607e-05 +conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_Shape=static_IR=9c57b92a55a929edae54a9705d80d730f7682ef015aa6923bd4658e244e9ca89_Device=CPU_Config=(),7.31894e-05 +conformance_ROIAlign/ReadIRTest.Inference/Op=ROIAlign.9_Type=f32_Shape=dynamic_IR=7260d5fcecb95f9632da5784702239161bdcab6bee60e0c1296a46e5120d5ca0_Device=CPU_Config=(),7.31894e-05 +conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=i64_Shape=dynamic_IR=2058e018d32d8a73b2bf6471186e555c47e2c1a15ceb4131bacc43110bc17d30_Device=CPU_Config=(),7.31894e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=dd575df40c907e85f7561296f2b1b5bb9786bf44bc27f26e33f235ba57391e26_Device=CPU_Config=(),6.61996e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=ea63b1a277de19e725624c4d57d7decf2a01f9764510b0849e0b9dc49ad24fbe_Device=CPU_Config=(),5.24098e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=aed658319c31cdb1d3a47a2a93c7a4f524d9af8540e2019af10e8e1cebc3c2bc_Device=CPU_Config=(),5.24098e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=a05339454f3f2a599ee9b041f1f01a124bad7d7e5fc1e6d133e00e43d002a086_Device=CPU_Config=(),5.24098e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=8457db0b4ea6829aad99afe4c31b7004b57daef4cd0ae02ca00090cbe5feb72d_Device=CPU_Config=(),5.24098e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=6b0185f2e61c010924a76c5f136ed90d0e154f507028c500ee78bdc5a7ed65ac_Device=CPU_Config=(),5.24098e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=30897cde05f349bface3d90a8d730da4c4c3e5133c59495d59258224dcc29ae6_Device=CPU_Config=(),5.24098e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=d84c7cd2094853de1602906a47c4265442c727a532d85199772fdfaaaf7007dc_Device=CPU_Config=(),5.19909e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=c5ce2b16d47cf93b073c2ba13556fa9fdd1b6f1dbe6387a50b507a40ab1d1c1e_Device=CPU_Config=(),5.19909e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=93f586b65926f2fb89cf5cc3379013f6df6964cb757fb3396060277dd393bb12_Device=CPU_Config=(),5.19909e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=3cef1c65fc41c5f96e90007517fb5c911435e8d8ae7db1a1398ae63c2525d6c3_Device=CPU_Config=(),5.19909e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=1b13b40884ddc8a2afdfc9bf351627746534303122dd4e0c2c5fdeace9e89e7c_Device=CPU_Config=(),5.19909e-05 +conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_Shape=dynamic_IR=6c91ebbae26ffbeec9778f2db476ad7ecb6eca6710cba24a86d3a2a262f68e43_Device=CPU_Config=(),4.72987e-05 +conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=1c727cc96123227a9fe6c3079a497fd64a04f273bff45b5ea56a3c0d577eca8e_Device=CPU_Config=(),4.72987e-05 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=c078bcf5a6a207fd76d9cddc1a35df577529e71ba0a120b28c7ed17bd12673bb_Device=CPU_Config=(),4.4408e-05 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=0bbbd97c4428b9565666e9a1e56acc70035b378e16abafc54559a155583d9e6b_Device=CPU_Config=(),4.4408e-05 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=dd9fee8f7cd289b97050e22cb465637c6439230d0d3ebcb20452eb544b40617e_Device=CPU_Config=(),4.12031e-05 +conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=17be9a027c25bbfbc08cf4dd106ee25d649680b30d16c74580fb3f8fcab54baa_Device=CPU_Config=(),4.10145e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=7dcfe3f43645f6b9f3290b524024a1a3d48efa3ce346eacc2330be7e27a046fd_Device=CPU_Config=(),3.9377e-05 +conformance_MatMul/ReadIRTest.Inference/Op=MatMul.1_Type=f32_Shape=dynamic_IR=68dc9d01cbbb3546ce77dbc77d705f33a6a48cb6dca9a323f5bcf02b9d589993_Device=CPU_Config=(),3.63643e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=8c43b49d99c64bec883205ca15c7b2d9dbb47b9fe5140fedaeb8eb7220a36f6c_Device=CPU_Config=(),3.38007e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=2d886a31e22f61d30c33ddd300ba7d8ba1cd9796ee1a4f688db9126b1d8d9c83_Device=CPU_Config=(),3.09972e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=static_IR=0a2b1efb810d1dcf7897c3671f1eef0c36bcdca679e24b8e86f078128b381833_Device=CPU_Config=(),3.01889e-05 +conformance_NonMaxSuppression/ReadIRTest.ImportExport/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=802164adc9e651b0a3ec0b5f96341fc3cbd098042412236b65e0c8f77b5153f2_Device=CPU_Config=(),2.94866e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=98274ec3fc894754adaacedf83b4b7da373e639a51cfa7dc348412898e45e8dc_Device=CPU_Config=(),2.93959e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=94b08f3c309048124724d9de0d120698fed90ff0237b07c4a4a2b7ccf843d76a_Device=CPU_Config=(),2.93959e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=939b665ae35f9a384e3119dc3bdc1904b105de495d262648282c859b0cb4c9e3_Device=CPU_Config=(),2.93959e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=67a5010efb429e6dedf35481443b40a77cb01c1b4fb51ec5890fcfcb010fd6f7_Device=CPU_Config=(),2.93959e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=36f17a498b10c140f8a319d82e5c8f2cc3cdb7eb3be9f82f7ef35d9c9470231d_Device=CPU_Config=(),2.93959e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=2463ef4b8684fd6b391fca0b123328e1d695b47017fe94ffe5a419a3c22ce93e_Device=CPU_Config=(),2.93959e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=28e31d83986a1435f11ba6355b98472025fcf2c3c6e090103283d9486356b5de_Device=CPU_Config=(),2.86055e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=09d4b4ea324f91ba6006bad4c82ca08e723c83c1b862d8075475e986696220da_Device=CPU_Config=(),2.83829e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=df97761393479b4c56cc923a2b89888b7c3fb949f5c3a93f4bba0ac8a44178aa_Device=CPU_Config=(),2.66896e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=repeat_pattern_Shape=dynamic_IR=47423c3e9443249e3dbbf58ee0f5b69b15e677f84de44ddb9d2851d1341dae96_Device=CPU_Config=(),2.4413e-05 +conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=dynamic_IR=cd6084826e0efefc7f1c9c3c7c9f8c1cb35b9a5f61d1a2c8131ecec5babf1af4_Device=CPU_Config=(),2.43685e-05 +conformance_NonMaxSuppression/ReadIRTest.ImportExport/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=d12f2033cdee7e244afad462ca1d9295c314836b593b2a30730861c2a3c8e9f2_Device=CPU_Config=(),2.35027e-05 +conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=f097978a7f18dafc7577a9dcf2306d82d397faf1bedb106ca3de70b3d9ada557_Device=CPU_Config=(),1.99836e-05 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=445a2c47e85b116d03e5f6fe43863a39778b78ca5175fba1bb0eec669f7610cf_Device=CPU_Config=(),1.7481e-05 +conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=fc530f5b6bbe8f06808eeaba33889867e705fa69591d01da4dd3dee9515f323f_Device=CPU_Config=(),1.16885e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=f0edc45979b98d4401eea2c345bbcb794721dd3cdbfb3963be5a2842b27ccc5b_Device=CPU_Config=(),1.12696e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=e48a363cfdabe0b62509e21641bb1cc88edaaa7d2eb82bf3ce747cab8355ff3b_Device=CPU_Config=(),1.12696e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=cedd3bc0f0a8e20fe947135bd6ab9515283275867e1b837d36f2fac72363f449_Device=CPU_Config=(),1.12696e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=b0e3e542180f521cfd4651ae18d3a58962751d3c6de9265240be6d4fe9745bf0_Device=CPU_Config=(),1.12696e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=7cfae687d408da17a0405d88f47e2b6623a608861114dc76018b8a2142453139_Device=CPU_Config=(),1.12696e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=451a3b635d3882a87cc0d7b3f6f74197c08b708669751bb11fef93da9604e276_Device=CPU_Config=(),1.12696e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=f110ef35c9642ecd941cd85a67a12b616353d4a8cd33f9770d532759e2846255_Device=CPU_Config=(),1.06272e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=e3a5a7f1a73793457fae9520ae122c6bbbfa92f1daac0ef214e47a2ec7ea18e2_Device=CPU_Config=(),1.06272e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=d36c5ab59d2ab873aa35b35a952e061568edd4ee8e64c1ab200bea63472a97b3_Device=CPU_Config=(),1.06272e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=cdf79cced0ed380052910c95b09b4022841474c87d06061f29791ea2ad9813a4_Device=CPU_Config=(),1.06272e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=b06553539d6e27195623fcbce51610b5671dd70700bcf61703a1f7a8bbc7c5d8_Device=CPU_Config=(),1.06272e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=8216637727ccef527454bfdea7ab22ccd4e5e29709494bf96dde5af3b4a7eaaf_Device=CPU_Config=(),1.06272e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=80cdfe1797800671134d77fa9c7032cdc1b19b4905fcefb11399610216f6e623_Device=CPU_Config=(),1.06272e-05 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=e4baf41ae9a77441993eb0f95c3d7335e9a719e5eac8b1ffaf60d8f515f769a1_Device=CPU_Config=(),9.45415e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=d8546655166c322e3049ed3a71725c8e89901212007c44c8029ef8379de96db6_Device=CPU_Config=(),9.45415e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=b6669eb568f36e5d649ae67afdecaa481064561d7a71f1aab592968aca7d8bb0_Device=CPU_Config=(),9.45415e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=91b6cdd8a7664759217ce0b84a8baed2105bca0ae9876e9efd01c074aa27039c_Device=CPU_Config=(),9.45415e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=8de81b7de05bdd151427e1b5b03a8b4222284dafd31f9d4b1c3d0917995e9310_Device=CPU_Config=(),9.45415e-06 +conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=3141ed71fe3efbd7fb026a006824ec24e4673d8b97d23dce275548e92eedad91_Device=CPU_Config=(),9.45415e-06 +conformance_ScatterNDUpdate/ReadIRTest.Inference/Op=ScatterNDUpdate.4_Type=i32_Shape=dynamic_IR=91f59d10b16e7305a651b8ee9480a0068225d6cd56026139e35ba69b9f84b00f_Device=CPU_Config=(),9.17486e-06 +conformance_GatherND/ReadIRTest.Inference/Op=GatherND.8_Type=i64_Shape=dynamic_IR=c1cd785825e1b2794d4bc74f6dc257e92a382e95a868a864125da70acc5cdbf4_Device=CPU_Config=(),9.17486e-06 +conformance_Pad/ReadIRTest.Inference/Op=Pad.1_Type=f32_Shape=static_IR=fbb53c04f3cfadff9d6543e2fb4eb88d882c3189b4212e77a6ca6e50bdba6e07_Device=CPU_Config=(),7.73649e-06 +conformance_subgraph/ReadIRTest.Inference/Extractor=fused_names_Shape=static_IR=9efd5749a1591709057d6e97334c9b5b89f5864d705c91774e0196d42966d1b9_Device=CPU_Config=(),6.64554e-06 +conformance_Broadcast/ReadIRTest.Inference/Op=Broadcast.3_Type=f32_Shape=dynamic_IR=7562536120d473cca837bb2ad1e3969484868111954ac0b168a5c2805264a689_Device=CPU_Config=(),6.07468e-06 +conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=3df69301c7a4d857a546a30a0d76674c52e3abd819d644ec036636eb7cb92fc1_Device=CPU_Config=(),5.49514e-06 +conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=d11097e7fa04dc0b540bf3b963cde252591b39b7dcbfae66e64ed19cd2b3b06e_Device=CPU_Config=(),2.89071e-06 +conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=287a7562757ef0295cc38442e3d775cff0fb1ea9b27e6897bd456f01ce82d455_Device=CPU_Config=(),2.89071e-06 +conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=f86f86769ec214942eaf1fdcd312a29e26308676419d8fbd98fdc485c2de0815_Device=CPU_Config=(),2.89071e-06 +conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=dynamic_IR=7581193e4db43b0e50c6a1a52f8b348d88587040bf38d1b780ac660781e3d3a4_Device=CPU_Config=(),7.05081e-05 +conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=5ae2e8ce34957ac812bd04943714d0b0ca6e2098c46caccfd775620d7f373cbf_Device=CPU_Config=(),2.76503e-05 +conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=99720c46a11f3e84906fd9327f25b187f328c6910868ac89738bc67ce0d90b64_Device=CPU_Config=(),2.76503e-05 +conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=b7a53df966d640f075cea7421ca5989ca91ca638e7af16aff33bc275eb7dfe9c_Device=CPU_Config=(),0.000352708 +conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=5b185120e46fc0a2238ff4de19e278888ecda5fbae130c62e1ec21b4883ee61d_Device=CPU_Config=(),6.62629e-06 +conformance_Unsqueeze/ReadIRTest.Inference/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=bda73cc94d837df9fb535743febd300cf0baf7fdf48ff538c079a4a7ca291592_Device=CPU_Config=(),2.89071e-06 diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py index d209e60c53bd87..0efdd4aa26df81 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py @@ -54,7 +54,7 @@ def parse_arguments(): ov_config_path_helper = "Specify path to a plugin config file as `.lst` file. Default value is ``" special_mode_help = "Specify shape mode (`static`, `dynamic` or ``) for Opset conformance or API scope type (`mandatory` or ``). Default value is ``" entity_help = "Specify validation entity: `Inference`, `ImportExport` or `QueryModel` for `OP` or "\ - "`ov`. Default value is `ov_compiled_model`, `ov_infer_request` or `ov_plugin` for `API`. Default value is ``(all)" + "`ov_compiled_model`, `ov_infer_request` or `ov_plugin` for `API`. Default value is ``(all)" parallel_help = "Parallel over HW devices. For example run tests over GPU.0 and GPU.1 in case when device are the same" expected_failures_help = "Excepted failures list file path as csv" cache_path_help = "Path to the cache file with test_name list sorted by execution time as `.lst` file!" @@ -68,7 +68,7 @@ def parse_arguments(): parser.add_argument("-m", "--models_path", help=models_path_help, type=str, required=False, default=NO_MODEL_CONSTANT) parser.add_argument("-ov", "--ov_path", help=ov_help, type=str, required=False, default="") - parser.add_argument("-j", "--workers", help=workers_help, type=int, required=False, default=os.cpu_count() - 1) + parser.add_argument("-j", "--workers", help=workers_help, type=int, required=False, default=os.cpu_count()) parser.add_argument("-c", "--ov_config_path", help=ov_config_path_helper, type=str, required=False, default="") parser.add_argument("-s", "--dump_graph", help=dump_graph_help, type=int, required=False, default=0) parser.add_argument("-sm", "--special_mode", help=special_mode_help, type=str, required=False, default="") diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv deleted file mode 100644 index d45bbc6e8afce5..00000000000000 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_API.csv +++ /dev/null @@ -1,3764 +0,0 @@ -Test Name,Fix Priority -ov_plugin_mandatory/OVPropertiesTests.canSetPropertyAndCheckGetProperty/target_device=HETERO_properties={PERF_COUNT:YES},1.0 -ov_plugin_mandatory/OVPropertiesTests.canSetPropertyAndCheckGetProperty/target_device=BATCH_properties={PERF_COUNT:YES},1.0 -ov_plugin_mandatory/OVPropertiesTests.SetCorrectProperties/target_device=HETERO_properties={PERF_COUNT:YES},1.0 -ov_plugin_mandatory/OVPropertiesTests.SetCorrectProperties/target_device=BATCH_properties={PERF_COUNT:YES},1.0 -ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.LoadedTensor/target_device=MULTI.CPU,1.0 -ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.LoadedTensor/target_device=BATCH.CPU,1.0 -ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.LoadedTensor/target_device=AUTO.CPU,1.0 -ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.CreateRequestWithCoreRemoved/target_device=MULTI.CPU,1.0 -ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.CreateRequestWithCoreRemoved/target_device=BATCH.CPU,1.0 -ov_plugin_mandatory/OVHoldersTestOnImportedNetwork.CreateRequestWithCoreRemoved/target_device=AUTO.CPU,1.0 -ov_plugin_mandatory/OVHoldersTest.Orders/target_device=BATCH.CPU,1.0 -ov_plugin_mandatory/OVHoldersTest.LoadedTensor/target_device=BATCH.CPU,1.0 -ov_plugin_mandatory/OVHoldersTest.LoadedState/target_device=BATCH.CPU,1.0 -ov_plugin_mandatory/OVHoldersTest.LoadedRemoteContext/target_device=BATCH.CPU,1.0 -ov_plugin_mandatory/OVHoldersTest.LoadedAny/target_device=BATCH.CPU,1.0 -ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_OPTIMIZATION_CAPABILITIES/3,1.0 -ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/4,1.0 -ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/3,1.0 -ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/2,1.0 -ov_plugin_mandatory/OVGetMetricPropsTest.GetMetricAndPrintNoThrow_AVAILABLE_DEVICES/1,1.0 -ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/4,1.0 -ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/3,1.0 -ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/2,1.0 -ov_plugin_mandatory/OVGetAvailableDevicesPropsTest.GetAvailableDevicesNoThrow/1,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/4,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/3,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/2,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithMatMul/1,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithInvalidDeviceIDThrows/4,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithInvalidDeviceIDThrows/2,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithBigDeviceIDThrows/4,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelWithBigDeviceIDThrows/2,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/4,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/3,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/2,1.0 -ov_plugin_mandatory/OVClassQueryModelTest.QueryModelHETEROWithDeviceIDNoThrow/1,1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={NUM_STREAMS:3},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERF_COUNT:YES},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERF_COUNT:NO},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERFORMANCE_HINT_NUM_REQUESTS:1},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERFORMANCE_HINT:THROUGHPUT},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERFORMANCE_HINT:LATENCY},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={PERFORMANCE_HINT:CUMULATIVE_THROUGHPUT},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={NUM_STREAMS:3},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_WARNING},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_TRACE},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_NONE},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_INFO},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_ERROR},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOG_LEVEL:LOG_DEBUG},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={EXECUTION_MODE_HINT:PERFORMANCE},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={EXECUTION_MODE_HINT:ACCURACY},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_WARNING},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_TRACE},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_NONE},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_INFO},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_ERROR},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOG_LEVEL:LOG_DEBUG},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERF_COUNT:YES},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERF_COUNT:NO},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERFORMANCE_HINT_NUM_REQUESTS:1},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERFORMANCE_HINT:THROUGHPUT},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERFORMANCE_HINT:LATENCY},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={PERFORMANCE_HINT:CUMULATIVE_THROUGHPUT},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={NUM_STREAMS:3},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_WARNING},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_TRACE},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_NONE},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_INFO},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_ERROR},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOG_LEVEL:LOG_DEBUG},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={EXECUTION_MODE_HINT:PERFORMANCE},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={EXECUTION_MODE_HINT:ACCURACY},1.0 -ov_plugin_mandatory/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={NUM_STREAMS:3},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={EXECUTION_DEVICES:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_TYPE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_ARCHITECTURE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={EXECUTION_DEVICES:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_TYPE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_ARCHITECTURE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={EXECUTION_DEVICES:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_TYPE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_ARCHITECTURE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={EXECUTION_DEVICES:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_TYPE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_ARCHITECTURE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={EXECUTION_DEVICES:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_TYPE:},1.0 -ov_plugin_mandatory/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_ARCHITECTURE:},1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=MULTI.CPU_,1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=HETERO.CPU_,1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=CPU_,1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=BATCH.CPU_,1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_InferencePrecision.ChangeCorrectProperties/target_device=AUTO.CPU_,1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=MULTI.CPU_,1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=HETERO.CPU_,1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=BATCH.CPU_,1.0 -ov_plugin_mandatory/OVCheckChangePropComplieModleGetPropTests_DEVICE_ID.ChangeCorrectDeviceProperties/target_device=AUTO.CPU_,1.0 -ov_infer_request_mandatory/OVInferenceChainingStatic.StaticOutputToStaticInput/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestPerfCountersTest.NotEmptyAfterSyncInfer/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestPerfCountersTest.NotEmptyAfterAsyncInfer/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestPerfCountersTest.CheckOperationInProfilingInfo/targetDevice=HETERO.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestPerfCountersTest.CheckOperationInProfilingInfo/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3SyncRequestsConsistentlyFromThreads/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsParallelWithWait/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyWithWait/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetOutputDoNotReAllocateData/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetOutputAfterInferSync/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetInputDoNotReAllocateData/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.secondCallGetInputAfterInferSync/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetTensorWithIncorrectName/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetOutputWithIncorrectSizes/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetNullptrForOutput/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetNullptrForInput/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.failToSetInputWithIncorrectSizes/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.canSetAndGetOutput/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.canSetAndGetInput/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithoutSetAndGetInOutSync/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithoutSetAndGetInOutAsync/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithSetInOutBlobs/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferWithGetIn/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.canInferAfterIOBlobReallocation/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.InferStaticNetworkSetChangedOutputTensorThrow/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.InferStaticNetworkSetChangedInputTensorThrow/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorTest.CheckInferIsNotChangeInput/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u8_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u64_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u32_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=u16_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i8_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i64_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i32_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=i16_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f64_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f32_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=f16_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=boolean_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u8_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u64_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u32_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=u16_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i8_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i64_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i32_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=i16_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f64_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f32_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=f16_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=boolean_target_device=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=u16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=i16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=f16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=boolean_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=u16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=i16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=f16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=boolean_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=u16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=i16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=f16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=boolean_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=u16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=i16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=f16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=boolean_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=u16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i8_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=i16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f64_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f32_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=f16_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=boolean_target_device=BATCH:CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelInferRequest/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelBeforeAsyncRequest/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCancellationTests.canCancelAsyncRequest/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCancellationTests.CanResetAfterCancelAsyncRequest/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCallbackTests.syncInferDoesNotCallCompletionCallback/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCallbackTests.returnGeneralErrorIfCallbackThrowException/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCallbackTests.canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCallbackTests.canCallAsyncWithCompletionCallback/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCallbackTests.ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout/targetDevice=BATCH.CPU_,1.0 -ov_infer_request_mandatory/OVInferRequestCallbackTests.ImplDoesNotCopyCallback/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=MULTI:CPU,1.0 -ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=HETERO:CPU,1.0 -ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=BATCH:CPU,1.0 -ov_compiled_model_mandatory/OVCompiledModelPropertiesDefaultSupportedTests.CanCompileWithDefaultValueFromPlugin/target_device=AUTO:CPU,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.precisionsAsInOriginalFunction/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.loadIncorrectV11Model/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputsFromSplitFunctionWithSeveralOutputs/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputsFromFunctionWithSeveralOutputs/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.getOutputFromFunctionWithSingleInput/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.getInputsFromFunctionWithSeveralInputs/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.getInputFromFunctionWithSingleInput/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCreateTwoCompiledModel/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCompileModelAndCreateInferRequest/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.canCompileModel/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetOutputsInfoAndCheck/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetOutputsInfo/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetInputsInfoAndCheck/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVCompiledModelBaseTest.CanGetInputsInfo/targetDevice=BATCH.CPU_,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelPropertiesTests.canCompileModelWithPropertiesAndCheckGetProperty/targetDevice=BATCH.CPU_properties={PERF_COUNT:NO},1.0 -ov_compiled_model_mandatory/OVClassCompiledModelPropertiesTests.CanUseCache/targetDevice=BATCH.CPU_properties={PERF_COUNT:NO},1.0 -ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=MULTI.CPU_properties={PERF_COUNT:NO},1.0 -ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=HETERO.CPU_properties={PERF_COUNT:NO},1.0 -ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=BATCH.CPU_properties={PERF_COUNT:NO},1.0 -ov_compiled_model_mandatory/OVClassCompiledModelPropertiesDefaultTests.CheckDefaultValues/targetDevice=AUTO.CPU_properties={PERF_COUNT:NO},1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/7,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/6,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/5,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/4,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/3,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/2,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/15,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/14,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/13,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/12,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/1,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest_MODEL_PRIORITY.GetMetricNoThrow/0,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_SUPPORTED_CONFIG_KEYS/3,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_OPTIMAL_NUMBER_OF_INFER_REQUESTS/3,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_NETWORK_NAME/3,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetIncorrectPropertyTest.GetConfigThrows/3,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigNoThrow/3,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/4,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/3,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/2,1.0 -ov_compiled_model_mandatory/OVClassCompiledModelGetConfigTest.GetConfigFromCoreAndFromCompiledModel/1,1.0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_AUTO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_MULTI.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_HETERO.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_BATCH.CPU,0 -ov_plugin_numeric/CompileModelCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_AUTO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_MULTI.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_HETERO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_BATCH.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_AUTO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_MULTI.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_HETERO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_BATCH.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_AUTO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_MULTI.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_HETERO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_BATCH.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_AUTO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_MULTI.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_HETERO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_BATCH.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_AUTO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_MULTI.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_HETERO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_BATCH.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_AUTO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_MULTI.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_HETERO.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_BATCH.CPU,0 -ov_plugin_floating_point/CompileModelCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_AUTO.CPU,0 -ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/4,0 -ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/3,0 -ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/2,0 -ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_STREAMS/1,0 -ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/4,0 -ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/3,0 -ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/2,0 -ov_plugin/OVGetMetricPropsOptionalTest.GetMetricAndPrintNoThrow_RANGE_FOR_ASYNC_INFER_REQUESTS/1,0 -ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/4,0 -ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/3,0 -ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/2,0 -ov_plugin/OVClassModelOptionalTestP.getVersionsNonEmpty/1,0 -ov_plugin/OVClassModelOptionalTestP.CompileModelCreateDefaultExecGraphResult/3,0 -ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDeviceUsingDevicePropertiesNoThrow/4,0 -ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDeviceNoThrow/4,0 -ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDeviceNoThrow/3,0 -ov_plugin/OVClassModelOptionalTestP.CompileModelActualHeteroDevice2NoThrow/4,0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={INFERENCE_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_MMAP:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_MMAP:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_HYPER_THREADING:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_HYPER_THREADING:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_CPU_PINNING:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={ENABLE_CPU_PINNING:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={COMPILATION_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={AFFINITY:NUMA},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={AFFINITY:NONE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={AFFINITY:HYBRID_AWARE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={AFFINITY:CORE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={INFERENCE_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_MMAP:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_MMAP:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_HYPER_THREADING:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_HYPER_THREADING:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_CPU_PINNING:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={ENABLE_CPU_PINNING:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={COMPILATION_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={AFFINITY:NUMA},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={AFFINITY:NONE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={AFFINITY:HYBRID_AWARE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={AFFINITY:CORE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_MMAP:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={ENABLE_MMAP:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={COMPILATION_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={INFERENCE_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_MMAP:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_MMAP:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_HYPER_THREADING:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_HYPER_THREADING:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_CPU_PINNING:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={ENABLE_CPU_PINNING:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={COMPILATION_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={AFFINITY:NUMA},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={AFFINITY:NONE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={AFFINITY:HYBRID_AWARE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={AFFINITY:CORE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={SCHEDULING_CORE_TYPE:PCORE_ONLY},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={SCHEDULING_CORE_TYPE:ECORE_ONLY},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={SCHEDULING_CORE_TYPE:ANY_CORE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={INFERENCE_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_MMAP:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_MMAP:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_HYPER_THREADING:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_HYPER_THREADING:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_CPU_PINNING:YES},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={ENABLE_CPU_PINNING:NO},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={COMPILATION_NUM_THREADS:1},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={AFFINITY:NUMA},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={AFFINITY:NONE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={AFFINITY:HYBRID_AWARE},0 -ov_plugin/OVCheckSetSupportedRWMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={AFFINITY:CORE},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=MULTI.CPU_properties={OPTIMAL_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=MULTI.CPU_properties={MAX_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=HETERO.CPU_properties={OPTIMAL_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=HETERO.CPU_properties={MAX_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=CPU_properties={OPTIMAL_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=CPU_properties={MAX_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=BATCH.CPU_properties={OPTIMAL_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=BATCH.CPU_properties={MAX_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=AUTO.CPU_properties={OPTIMAL_BATCH_SIZE:},0 -ov_plugin/OVCheckMetricsPropsTests_ModelDependceProps.ChangeCorrectDeviceProperties/target_device=AUTO.CPU_properties={MAX_BATCH_SIZE:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={LOADED_FROM_CACHE:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_UUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_THERMAL:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_LUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=MULTI.CPU_properties={DEVICE_GOPS:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={LOADED_FROM_CACHE:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_UUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_THERMAL:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_LUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=HETERO.CPU_properties={DEVICE_GOPS:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={LOADED_FROM_CACHE:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_UUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_THERMAL:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_LUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=CPU_properties={DEVICE_GOPS:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={LOADED_FROM_CACHE:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_UUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_THERMAL:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_LUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=BATCH.CPU_properties={DEVICE_GOPS:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={LOADED_FROM_CACHE:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_UUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_THERMAL:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_LUID:},0 -ov_plugin/OVCheckGetSupportedROMetricsPropsTests.ChangeCorrectProperties/target_device=AUTO.CPU_properties={DEVICE_GOPS:},0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u8_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u64_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u32_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_u16_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i8_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i64_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i32_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_i16_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f64_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f32_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_f16_batch1_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch2_AUTO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_MULTI.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_HETERO.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_BATCH.CPU,0 -ov_plugin/CompileModelCacheTestBase.CompareWithRefImpl/ReadConcatSplitAssign_boolean_batch1_AUTO.CPU,0 -ov_plugin/OVCheckSetIncorrectRWMetricsPropsTests.ChangeIncorrectProperties/target_device=CPU_properties={ENABLE_MMAP:-10},1.0 -ov_plugin/OVCheckSetIncorrectRWMetricsPropsTests.ChangeIncorrectProperties/target_device=CPU_properties={LOG_LEVEL:-3},1.0 -ov_plugin/OVCheckSetIncorrectRWMetricsPropsTests.ChangeIncorrectProperties/target_device=CPU_properties={COMPILATION_NUM_THREADS:-1},1.0 -ov_infer_request_2/OVInferRequestDynamicTests.InferUpperBoundNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferUpperBoundNetworkAfterIOTensorsReshaping/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorUpper/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorLower/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=MULTI.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=HETERO.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=AUTO.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=MULTI.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=HETERO.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=AUTO.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithoutSetShape/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithLocalCore/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputShapeBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkGetOutputThenSetOutputTensorPreAllocatedMemoryBeforeInfer/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetworkBoundWithoutSetShape/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.InferDynamicNetwork/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_2/OVInferRequestDynamicTests.GetSameTensor2times/function=SplitAddConcat_inOutShape=(((1.4.20.20)_(1.2.20.40))((2.4.20.20)_(2.2.20.40)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferUpperBoundNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferUpperBoundNetworkAfterIOTensorsReshaping/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorUpper/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferOutOfRangeShapeNetworkWithGetTensorLower/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=MULTI.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=HETERO.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=AUTO.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=MULTI.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=HETERO.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferFullyDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=AUTO.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithoutSetShape/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithSetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithLocalCore/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkWithGetTensor/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetUnexpectedOutputTensorBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputTensorPreAllocatedMemoryBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkSetOutputShapeBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkGetOutputThenSetOutputTensorPreAllocatedMemoryBeforeInfer/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetworkBoundWithoutSetShape/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.InferDynamicNetwork/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request_1/OVInferRequestDynamicTests.GetSameTensor2times/function=AddTwoOutputEdges_inOutShape=(((1.4.20.20)_(1.4.20.20))((2.4.20.20)_(2.4.20.20)))_targetDevice=BATCH.CPU_,0 -ov_infer_request/OVInferenceChaining.StaticOutputToStaticInput/targetDevice=BATCH.CPU_,0 -ov_infer_request/OVInferenceChaining.StaticOutputToDynamicInput/targetDevice=BATCH.CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=MULTI.CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=HETERO.CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=BATCH.CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicOutputToDynamicInput/targetDevice=AUTO.CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=MULTI.CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=HETERO.CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=BATCH.CPU_,0 -ov_infer_request/OVInferenceChaining.DynamicInputToDynamicOutput/targetDevice=AUTO.CPU_,0 -ov_infer_request/OVInferRequestIOTensorSetPrecisionTest.CanSetOutBlobWithDifferentPrecision/type=bf16_target_device=BATCH.CPU_,0 -ov_infer_request/OVInferRequestIOTensorSetPrecisionTest.CanSetInBlobWithDifferentPrecision/type=bf16_target_device=BATCH.CPU_,0 -ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputsFromSplitFunctionWithSeveralOutputs/type=bf16_target_device=BATCH:CPU_,0 -ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputsFromFunctionWithSeveralOutputs/type=bf16_target_device=BATCH:CPU_,0 -ov_infer_request/OVInferRequestCheckTensorPrecision.getOutputFromFunctionWithSingleInput/type=bf16_target_device=BATCH:CPU_,0 -ov_infer_request/OVInferRequestCheckTensorPrecision.getInputsFromFunctionWithSeveralInputs/type=bf16_target_device=BATCH:CPU_,0 -ov_infer_request/OVInferRequestCheckTensorPrecision.getInputFromFunctionWithSingleInput/type=bf16_target_device=BATCH:CPU_,0 -"ov_compiled_model_AutoBatch/OVClassCompiledModelPropertiesTests.canCompileModelWithPropertiesAndCheckGetProperty/targetDevice=BATCH_properties={AUTO_BATCH_DEVICE_CONFIG:CPU,AUTO_BATCH_TIMEOUT:1}",0 -"ov_compiled_model_AutoBatch/OVClassCompiledModelPropertiesTests.canCompileModelWithPropertiesAndCheckGetProperty/targetDevice=BATCH_properties={AUTO_BATCH_DEVICE_CONFIG:CPU,AUTO_BATCH_TIMEOUT:10}",0 -"ov_compiled_model_AutoBatch/OVClassCompiledModelPropertiesTests.CanUseCache/targetDevice=BATCH_properties={AUTO_BATCH_DEVICE_CONFIG:CPU,AUTO_BATCH_TIMEOUT:1}",0 -"ov_compiled_model_AutoBatch/OVClassCompiledModelPropertiesTests.CanUseCache/targetDevice=BATCH_properties={AUTO_BATCH_DEVICE_CONFIG:CPU,AUTO_BATCH_TIMEOUT:10}",0 -ov_compiled_model/OVCompiledModelBaseTestOptional.checkGetExecGraphInfoIsNotNullptr/targetDevice=BATCH.CPU_,0 -ov_compiled_model/OVCompiledModelBaseTestOptional.CheckExecGraphInfoBeforeExecution/targetDevice=BATCH.CPU_,0 -ov_compiled_model/OVCompiledModelBaseTestOptional.CheckExecGraphInfoAfterExecution/targetDevice=BATCH.CPU_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=MULTI.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=BATCH.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.readFromV10IR/targetDevice=AUTO.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=MULTI.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=HETERO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=BATCH.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.ovImportExportedFunction/targetDevice=AUTO.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=MULTI.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=BATCH.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkParameterResultOnly/targetDevice=AUTO.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=MULTI.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=BATCH.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetworkConstantResultOnly/targetDevice=AUTO.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=MULTI.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=HETERO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=BATCH.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedIENetwork/targetDevice=AUTO.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=MULTI.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=BATCH.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionParameterResultOnly/targetDevice=AUTO.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=MULTI.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=BATCH.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunctionConstantResultOnly/targetDevice=AUTO.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=MULTI.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=HETERO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=BATCH.CPU_elementType=bf16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=u8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=u64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=u32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=u16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=i8_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=i64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=i32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=i16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=f64_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=f32_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=f16_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=boolean_,0 -ov_compiled_model/OVCompiledGraphImportExportTest.importExportedFunction/targetDevice=AUTO.CPU_elementType=bf16_,0 -ov_compiled_model/OVClassCompiledModelSetIncorrectConfigTest.canNotSetConfigToCompiledModelWithIncorrectConfig/3,0 -ov_compiled_model/OVClassCompiledModelImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/4,0 -ov_compiled_model/OVClassCompiledModelImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/3,0 -ov_compiled_model/OVClassCompiledModelImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/2,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcat_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranch_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatNestedInBranchNestedOut_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SplitConvConcatInputInBranch_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SingleConv_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionRelu_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/SimpleFunctionMultiply_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/NestedSplitConvConcat_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/MatMulBias_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/KSOFunction_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvPoolRelu_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/ConvBias_f16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_u16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i8_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_i16_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f64_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f32_batch1_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch2_AUTO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_MULTI.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_HETERO.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_BATCH.CPU,0 -ie_plugin_numeric/LoadNetworkCacheTestBase.CompareWithRefImpl/2InputSubtract_f16_batch1_AUTO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_MULTI.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_HETERO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_BATCH.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch2_AUTO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_MULTI.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_HETERO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_BATCH.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f64_batch1_AUTO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_MULTI.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_HETERO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_BATCH.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch2_AUTO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_MULTI.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_HETERO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_BATCH.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f32_batch1_AUTO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_MULTI.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_HETERO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_BATCH.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch2_AUTO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_MULTI.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_HETERO.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_BATCH.CPU,0 -ie_plugin_float/LoadNetworkCacheTestBase.CompareWithRefImpl/TIwithLSTMcell1_f16_batch1_AUTO.CPU,0 -ie_plugin_AutoBatch/CorrectConfigTests.CanUseCache/target_device=BATCH_config=(=_AUTO_BATCH_DEVICE_CONFIG=CPU_),0 -ie_plugin_AutoBatch/CorrectConfigTests.CanLoadNetworkWithCorrectConfig/target_device=BATCH_config=(=_AUTO_BATCH_DEVICE_CONFIG=CPU_),0 -ie_plugin_/CoreThreadingTests.smoke_QueryNetwork/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_,0 -ie_plugin/VersionTest.pluginCurrentVersionIsCorrect/targetDevice=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessTest.SetScalePreProcessSetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetScalePreProcessSetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetScalePreProcessGetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetScalePreProcessGetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetPreProcessToInputInfo/netPRC=FP32_targetDevice=HETERO_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetPreProcessToInputInfo/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetPreProcessToInputInfo/netPRC=FP16_targetDevice=HETERO_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetPreProcessToInputInfo/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetPreProcessToInferRequest/netPRC=FP32_targetDevice=HETERO_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetPreProcessToInferRequest/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetPreProcessToInferRequest/netPRC=FP16_targetDevice=HETERO_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetPreProcessToInferRequest/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetMeanValuePreProcessSetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetMeanValuePreProcessSetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetMeanValuePreProcessGetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetMeanValuePreProcessGetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetMeanImagePreProcessSetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetMeanImagePreProcessSetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetMeanImagePreProcessGetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.SetMeanImagePreProcessGetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessSetBlob/netPRC=FP32_targetDevice=HETERO_CPU_,0 -ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessSetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessSetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessGetBlob/netPRC=FP32_targetDevice=HETERO_CPU_,0 -ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessGetBlob/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessGetBlob/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessGetBlob/netPRC=FP16_targetDevice=HETERO_CPU_,0 -ie_plugin/InferRequestPreprocessTest.ReverseInputChannelsPreProcessSetBlob/netPRC=FP32_targetDevice=AUTO_CPU_,0 -ie_plugin/InferRequestPreprocessTest.InferWithRGB2BGRConversion/netPRC=FP32_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessTest.InferWithRGB2BGRConversion/netPRC=FP16_targetDevice=BATCH_CPU_,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=1_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=1_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=1_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP32_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=1_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=1_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=1_oPRC=0_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=1_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=1_oLT=0_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=MULTI_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=HETERO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=1_setIBlob=1_setOBlob=1_target_device=AUTO_CPU,0 -ie_plugin/InferRequestPreprocessDynamicallyInSetBlobTest.Infer/netPRC=FP16_iPRC=0_oPRC=0_netLT=NCHW_iLT=0_oLT=0_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP32_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=U8_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=U8_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NHWC_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NHWC_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=1_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=1_target_device=BATCH_CPU,0 -ie_plugin/InferRequestPreprocessConversionTest.Infer/netPRC=FP16_iPRC=FP32_oPRC=FP32_netLT=NCHW_iLT=NCHW_oLT=NCHW_setIBlob=0_setOBlob=0_target_device=BATCH_CPU,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithInvalidDeviceIDThrows/4,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithInvalidDeviceIDThrows/3,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithInvalidDeviceIDThrows/2,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithInvalidDeviceIDThrows/1,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithDeviceID/4,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithDeviceID/3,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithDeviceID/2,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithDeviceID/1,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithBigDeviceIDThrows/4,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithBigDeviceIDThrows/3,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithBigDeviceIDThrows/2,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkWithBigDeviceIDThrows/1,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithDeviceIDNoThrow/4,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithDeviceIDNoThrow/3,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithDeviceIDNoThrow/2,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithDeviceIDNoThrow/1,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithBigDeviceIDThrows/4,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithBigDeviceIDThrows/3,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithBigDeviceIDThrows/2,0 -ie_plugin/IEClassQueryNetworkTest.QueryNetworkHETEROWithBigDeviceIDThrows/1,0 -ie_plugin/IEClassNetworkTestP.SetAffinityWithKSO/3,0 -ie_plugin/IEClassNetworkTestP.SetAffinityWithConstantBranches/3,0 -ie_plugin/IEClassNetworkTestP.SetAffinityWithConstantBranches/1,0 -ie_plugin/IEClassNetworkTestP.LoadNetworkCreateDefaultExecGraphResult/3,0 -ie_plugin/IEClassNetworkTestP.LoadNetworkActualNoThrow/3,0 -ie_plugin/IEClassNetworkTestP.LoadNetworkActualHeteroDeviceNoThrow/4,0 -ie_plugin/IEClassNetworkTestP.LoadNetworkActualHeteroDeviceNoThrow/3,0 -ie_plugin/IEClassNetworkTestP.LoadNetworkActualHeteroDevice2NoThrow/4,0 -ie_plugin/IEClassLoadNetworkTest.QueryNetworkMULTIWithHETERONoThrow_V10/4,0 -ie_plugin/IEClassLoadNetworkTest.QueryNetworkMULTIWithHETERONoThrow_V10/3,0 -ie_plugin/IEClassLoadNetworkTest.QueryNetworkMULTIWithHETERONoThrow_V10/2,0 -ie_plugin/IEClassLoadNetworkTest.QueryNetworkMULTIWithHETERONoThrow_V10/1,0 -ie_plugin/IEClassLoadNetworkTest.QueryNetworkHETEROWithMULTINoThrow_V10/4,0 -ie_plugin/IEClassLoadNetworkTest.QueryNetworkHETEROWithMULTINoThrow_V10/3,0 -ie_plugin/IEClassLoadNetworkTest.QueryNetworkHETEROWithMULTINoThrow_V10/2,0 -ie_plugin/IEClassLoadNetworkTest.QueryNetworkHETEROWithMULTINoThrow_V10/1,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithInvalidDeviceIDThrows/4,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithInvalidDeviceIDThrows/3,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithInvalidDeviceIDThrows/2,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithInvalidDeviceIDThrows/1,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithDeviceIDNoThrow/4,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithDeviceIDNoThrow/3,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithDeviceIDNoThrow/2,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithDeviceIDNoThrow/1,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithBigDeviceIDThrows/4,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithBigDeviceIDThrows/3,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithBigDeviceIDThrows/2,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkWithBigDeviceIDThrows/1,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkMULTIwithHETERONoThrow/4,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkMULTIwithHETERONoThrow/3,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkMULTIwithHETERONoThrow/2,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkMULTIwithHETERONoThrow/1,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROwithMULTINoThrow/4,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROwithMULTINoThrow/3,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROwithMULTINoThrow/2,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROwithMULTINoThrow/1,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithDeviceIDNoThrow/4,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithDeviceIDNoThrow/3,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithDeviceIDNoThrow/2,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithDeviceIDNoThrow/1,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithBigDeviceIDThrows/4,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithBigDeviceIDThrows/3,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithBigDeviceIDThrows/2,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROWithBigDeviceIDThrows/1,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROAndDeviceIDThrows/4,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROAndDeviceIDThrows/3,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROAndDeviceIDThrows/2,0 -ie_plugin/IEClassLoadNetworkTest.LoadNetworkHETEROAndDeviceIDThrows/1,0 -ie_plugin/IEClassGetMetricTest_RANGE_FOR_STREAMS.GetMetricAndPrintNoThrow/4,0 -ie_plugin/IEClassGetMetricTest_RANGE_FOR_STREAMS.GetMetricAndPrintNoThrow/3,0 -ie_plugin/IEClassGetMetricTest_RANGE_FOR_STREAMS.GetMetricAndPrintNoThrow/2,0 -ie_plugin/IEClassGetMetricTest_RANGE_FOR_STREAMS.GetMetricAndPrintNoThrow/1,0 -ie_plugin/IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS.GetMetricAndPrintNoThrow/4,0 -ie_plugin/IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS.GetMetricAndPrintNoThrow/3,0 -ie_plugin/IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS.GetMetricAndPrintNoThrow/2,0 -ie_plugin/IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS.GetMetricAndPrintNoThrow/1,0 -ie_plugin/IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES.GetMetricAndPrintNoThrow/3,0 -ie_plugin/IEClassGetMetricTest_AVAILABLE_DEVICES.GetMetricAndPrintNoThrow/4,0 -ie_plugin/IEClassGetMetricTest_AVAILABLE_DEVICES.GetMetricAndPrintNoThrow/3,0 -ie_plugin/IEClassGetMetricTest_AVAILABLE_DEVICES.GetMetricAndPrintNoThrow/2,0 -ie_plugin/IEClassGetMetricTest_AVAILABLE_DEVICES.GetMetricAndPrintNoThrow/1,0 -ie_plugin/IEClassGetConfigTest.GetConfigNoThrow/3,0 -ie_plugin/IEClassGetConfigTest.GetConfigHeteroNoThrow/3,0 -ie_plugin/IEClassBasicTestP.getVersionsNonEmpty/1,0 -ie_plugin/IEClassBasicTestP.SetConfigAllNoThrow/3,0 -ie_plugin/IEClassBasicTestP.SetConfigAllNoThrow/1,0 -ie_plugin/DefaultConfigurationTest.checkDeviceDefaultConfigurationValue/configKey=PERF_COUNT_targetDevice=HETERO,0 -ie_plugin/DefaultConfigurationTest.checkDeviceDefaultConfigurationValue/configKey=PERF_COUNT_targetDevice=BATCH,0 -ie_plugin/CorrectConfigTests.CanUseCache/target_device=CPU_config=(=_),0 -ie_plugin/CorrectConfigTests.CanLoadNetworkWithCorrectConfig/target_device=CPU_config=(=_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=MULTI.CPU_,0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=HETERO.CPU_,0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 -ie_plugin/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig/target_device=AUTO.CPU_,0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=MULTI.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=MULTI.CPU_,0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=HETERO.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=HETERO.CPU_,0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=BATCH.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=THROUGHPUT_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=LATENCY_PERFORMANCE_HINT_NUM_REQUESTS=1_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=AUTO.CPU_config=(PERFORMANCE_HINT=LATENCY_),0 -ie_plugin/CorrectConfigCheck.canSetConfigAndCheckGetConfig/target_device=AUTO.CPU_,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork_MultipleIECores/targetDevice=MULTI_config=MULTI_DEVICE_PRIORITIES=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork_MultipleIECores/targetDevice=HETERO_config=TARGET_FALLBACK=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork_MultipleIECores/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy_SingleIECore/targetDevice=MULTI_config=MULTI_DEVICE_PRIORITIES=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy_SingleIECore/targetDevice=HETERO_config=TARGET_FALLBACK=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy_SingleIECore/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy/targetDevice=MULTI_config=MULTI_DEVICE_PRIORITIES=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy/targetDevice=HETERO_config=TARGET_FALLBACK=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetworkAccuracy/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork/targetDevice=MULTI_config=MULTI_DEVICE_PRIORITIES=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork/targetDevice=HETERO_config=TARGET_FALLBACK=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTestsWithIterations.smoke_LoadNetwork/targetDevice=BATCH_config=AUTO_BATCH_DEVICE_CONFIG=_numThreads=4_numIter=50,0 -ie_plugin/CoreThreadingTests.smoke_SetConfigPluginExists/targetDevice=HETERO.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_SetConfigPluginExists/targetDevice=BATCH.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_QueryNetwork/targetDevice=MULTI.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_QueryNetwork/targetDevice=HETERO.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_QueryNetwork/targetDevice=BATCH.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_QueryNetwork/targetDevice=AUTO.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_GetMetric/targetDevice=MULTI.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_GetMetric/targetDevice=HETERO.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_GetMetric/targetDevice=BATCH.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_GetMetric/targetDevice=AUTO.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_GetConfig/targetDevice=MULTI.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_GetConfig/targetDevice=HETERO.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_GetConfig/targetDevice=BATCH.CPU_config=PERF_COUNT=YES_,0 -ie_plugin/CoreThreadingTests.smoke_GetConfig/targetDevice=AUTO.CPU_config=PERF_COUNT=YES_,0 -ie_infer_request/InferRequestWaitTests.returnDeviceBusyOnSetBlobAfterAsyncInfer/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestWaitTests.returnDeviceBusyOnGetBlobAfterAsyncInfer/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestWaitTests.canWaitWithotStartAsync/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestWaitTests.canStartAsyncInferWithGetInOutWithStatusOnlyWait/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestWaitTests.FailedAsyncInferWithNegativeTimeForWait/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestWaitTests.CorrectOneAsyncInferWithGetInOutWithInfWait/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=MULTI.CPU_Config=(),0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=HETERO.CPU_Config=(),0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=CPU_Config=(),0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=BATCH.CPU_Config=(),0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Remote_Device=AUTO.CPU_Config=(),0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=MULTI.CPU_Config=(),0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=HETERO.CPU_Config=(),0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=BATCH.CPU_Config=(),0 -ie_infer_request/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=AUTO.CPU_Config=(),0 -ie_infer_request/InferRequestPerfCountersTest.NotEmptyAfterSyncInfer/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestPerfCountersTest.NotEmptyAfterAsyncInfer/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestMultithreadingTests.canRun3SyncRequestsConsistentlyFromThreads/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyWithWait/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestMultithreadingTests.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.setNotAllocatedOutput/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.setNotAllocatedInput/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.secondCallGetOutputDoNotReAllocateData/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.secondCallGetOutputAfterInferSync/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.secondCallGetInputDoNotReAllocateData/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.secondCallGetInputAfterInferSync/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.getAfterSetInputDoNotChangeOutput/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.getAfterSetInputDoNotChangeInput/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.failToSetUninitializedOutputBlob/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.failToSetUninitializedInputBlob/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.failToSetOutputWithIncorrectSizes/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.failToSetNullptrForOutput/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.failToSetNullptrForInput/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.failToSetInputWithIncorrectSizes/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.failToSetBlobWithIncorrectName/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canSetOutputBlobForInferRequest/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canSetInputBlobForInferRequest/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canReallocateExternalBlobViaGet/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedOutputBlobAfterSetBlob/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedOutputBlobAfterGetAndSetBlob/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedInputBlobAfterSetBlobSync/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedInputBlobAfterSetBlobAsync/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedInputBlobAfterGetBlob/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canProcessDeallocatedInputBlobAfterGetAndSetBlob/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canInferWithoutSetAndGetInOutSync/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canInferWithoutSetAndGetInOutAsync/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canInferWithSetInOutBlobs/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canInferWithGetOut/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.canInferWithGetIn/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestIOBBlobTest.CanCreateInferRequest/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCancellationTests.canResetAfterCancelAsyncRequest/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCancellationTests.canCancelInferRequest/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCancellationTests.canCancelBeforeAsyncRequest/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCancellationTests.canCancelAsyncRequest/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCallbackTests.syncInferDoesNotCallCompletionCallback/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCallbackTests.returnGeneralErrorIfCallbackThrowException/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCallbackTests.canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCallbackTests.canCallAsyncWithCompletionCallback/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCallbackTests.ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCallbackTests.LegacyCastAndSetuserDataGetUserData/targetDevice=BATCH.CPU_,0 -ie_infer_request/InferRequestCallbackTests.ImplDoseNotCopyCallback/targetDevice=BATCH.CPU_,0 -ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkThrowsIfNoDeviceName/4,0 -ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkThrowsIfNoDeviceName/3,0 -ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkThrowsIfNoDeviceName/2,0 -ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/4,0 -ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/3,0 -ie_executable_network/IEClassImportExportTestP.smoke_ImportNetworkNoThrowWithDeviceName/2,0 -ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/4,0 -ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/3,0 -ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/2,0 -ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/1,0 -ie_executable_network/IEClassExecutableNetworkSetConfigTest.SetConfigThrows/3,0 -ie_executable_network/IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported.GetMetricThrow/3,0 -ie_executable_network/IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS.GetMetricNoThrow/3,0 -ie_executable_network/IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS.GetMetricNoThrow/3,0 -ie_executable_network/IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS.GetMetricNoThrow/3,0 -ie_executable_network/IEClassExecutableNetworkGetMetricTest_NETWORK_NAME.GetMetricNoThrow/3,0 -ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigThrows/3,0 -ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoThrow/4,0 -ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoThrow/3,0 -ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoThrow/1,0 -ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoEmptyNoThrow/4,0 -ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoEmptyNoThrow/3,0 -ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoEmptyNoThrow/2,0 -ie_executable_network/IEClassExecutableNetworkGetConfigTest.GetConfigNoEmptyNoThrow/1,0 -ie_executable_network/ExecutableNetworkBaseTest.loadIncorrectV11Model/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.loadIncorrectV10Model/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.checkGetMetric/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.checkGetExecGraphInfoIsNotNullptr/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetWithIncorrectConfig/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=MULTI.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=HETERO.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNetAndCheckConfigAndCheck/target_device=AUTO.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=MULTI.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=HETERO.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canSetConfigToExecNet/target_device=AUTO.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableWithIncorrectConfig/target_device=MULTI.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableWithIncorrectConfig/target_device=HETERO.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableWithIncorrectConfig/target_device=AUTO.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableAndCreateInferRequest/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutableAndCheckConfig/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canLoadCorrectNetworkToGetExecutable/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canExport/target_device=MULTI.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canExport/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.canExport/target_device=AUTO.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CheckExecGraphInfoSerialization/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CheckExecGraphInfoBeforeExecution/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CheckExecGraphInfoAfterExecution/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CanGetOutputsInfoAndCheck/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CanGetOutputsInfo/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CanGetInputsInfoAndCheck/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CanGetInputsInfo/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CanCreateTwoExeNetworksAndCheckFunction/target_device=BATCH.CPU_,0 -ie_executable_network/ExecutableNetworkBaseTest.CanCreateTwoExeNetworks/target_device=BATCH.CPU_,0 -ie_executable_network/ExecGraphUniqueNodeNames.CheckUniqueNodeNames/IS=(1.2.5.5)_inPRC=UNSPECIFIED_netPRC=FP32_targetDevice=BATCH_CPU,0 -ie_executable_network/ExecGraphSerializationTest.ExecutionGraph/TargetDevice=HETERO.CPU,0 -ie_executable_network/ExecGraphSerializationTest.ExecutionGraph/TargetDevice=BATCH.CPU,0 -ie_executable_network/IEClassImportExportTestP.smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName/0,0 diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv b/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv deleted file mode 100644 index cbfbfde443f9a2..00000000000000 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs/CPU/expected_failures_OP.csv +++ /dev/null @@ -1,1134 +0,0 @@ -Test Name,Fix Priority -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_Shape=static_IR=673350fadcd262cd7a62cde83e5fd9249c623abced2eb67197b730895bf0767b_Device=CPU_Config=(),1.0 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=ee9526ee9d1aaeec99a0c59b65b9ea92c5578739e34ec6337d30472b2c1fc62e_Device=CPU_Config=(),0.999773 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=289d303553fbee0e0ab09eb7c9e022257af26a2ee5404009e07495fdce5e5cc5_Device=CPU_Config=(),0.901468 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=3b12339cb1eb1fb0f5b717aa08e14ed1035bdee31e925738c09450e8d201f0e4_Device=CPU_Config=(),0.832254 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=e6072c00650315f4b1d496473d1aa4cd29d398de13cd06101b48a76585a1ce0d_Device=CPU_Config=(),0.672244 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=static_IR=f16a04a44dad091a2091f9f22697ad9a697490d233e99afa7718cb3b7e8b4a26_Device=CPU_Config=(),0.671828 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_Shape=static_IR=641909d407f76c57ef31c99795284255ab110959b2c06d0d0105a40350ab3697_Device=CPU_Config=(),0.665562 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i32_Shape=static_IR=23e75ebc73f2be90e07a59af3d3c482c1ffd7b0b6c02d737029ab461e6fb3690_Device=CPU_Config=(),0.665409 -conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=static_IR=947d3f49818bd1a73f7a998de4eee0da3d386c740999bce5eba85be60d7a7994_Device=CPU_Config=(),0.581056 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=435723c35b233b67f5af5af4c09172deeb94d45650f3d7dfb43a3f33f594015c_Device=CPU_Config=(),0.378778 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=be811dcfd3d370d4bf74c8a96bc4bedc3abb3eadca0269c60d69cff1d86a6c76_Device=CPU_Config=(),0.373302 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i32_Shape=static_IR=6ed49f949dfeb680604d7abf27a330885f7c733692f7747e9457bf7641c61822_Device=CPU_Config=(),0.332705 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=db733dcfab287c327cba66fe3d3a3534d8d19ad9ab88e1b501bcefcc6f585ebe_Device=CPU_Config=(),0.332705 -conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=i32_Shape=static_IR=eca73c8b50e0d6c12f3b1478ef02df65119bca2277e3d1c15ee61cc666c0fe01_Device=CPU_Config=(),0.332705 -conformance_FloorMod/ReadIRTest.ImportExport/Op=FloorMod.1_Type=i32_Shape=static_IR=95a4e5ec2946cc4464b3d5c079d701277daa69e895e1b21f25833a4d7345f2b8_Device=CPU_Config=(),0.332705 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=02070b77af7b11928cb8ccc284b1491b8d51c8479f74b2900246585638456d21_Device=CPU_Config=(),0.332705 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=i64_Shape=static_IR=79d866eba77de1c58b283aa573c9e4dcdbe98baa80e6efd9154e4f4f7ba99e38_Device=CPU_Config=(),0.331205 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=a3b348df69cc23edf064b4df507e25fade0ec84e55a19ae4f34f6cbf9d15da0b_Device=CPU_Config=(),0.298373 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=75f0ca24cdc5be0b8bfdeed80190dcc9253fa515947aefb8c1d5db0de709a930_Device=CPU_Config=(),0.289196 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=static_IR=bc5429e365164c21ef4b57c1198dfb5d387242edb5fade35b7a99ad62747426b_Device=CPU_Config=(),0.274148 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=750445fbeb8ad10861a677ede8b45a09a66924be75ac830e8760e4bfd2d02b21_Device=CPU_Config=(),0.271262 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=6b6b7a0e007e741f4ec85184693a560ea80ac146e87e9d3e4cfa41bb2105ed5c_Device=CPU_Config=(),0.246682 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=58fa35690be29b683722f4d37ef0c6da4b201f5693001e9bc5807a8f044dc9f2_Device=CPU_Config=(),0.246682 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=959e7e2e931ab2d4afa5af6dfe811ea8673d66658a3f4f06268a88d5c42ee2e9_Device=CPU_Config=(),0.246669 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4955b0047c769357dd8dbdf9612cd61c4882443087f0be34936f6feb804d696e_Device=CPU_Config=(),0.219421 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b07cbf9a298168f96b4e0d0648b6a2dbf28994a0e3f6f925a3f01f5ad86d119b_Device=CPU_Config=(),0.213845 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=4baf3e2f4f21767bc3e2cabdedd5f73bea933198f5856f28a7f271c0d92e0b2e_Device=CPU_Config=(),0.209451 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=7c483857da60c84265f78cd2725fd7517163e3bba61a2735696c19a36610f186_Device=CPU_Config=(),0.204316 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=7efcc0b11ccdc19094aaeb153c7b79023783aa683cbe12df25a65c2470dc17c3_Device=CPU_Config=(),0.186312 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=81f8c41d64c2b0b3f05f5e279efddcfd2d95baae8bac0ad4a5e60e64b5509e96_Device=CPU_Config=(),0.181998 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=static_IR=3bafd2af6c5aea1c35751e1ef647259f09a3e77d43671074f976da67dfccf0a0_Device=CPU_Config=(),0.180103 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=32de11a5f1655b7c483ab5aaf23a91d5130a3ab5316505b07ac34d6ba5f0dc31_Device=CPU_Config=(),0.178089 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i32_Shape=static_IR=9a463ace58ac105170c8caabfab1686634d342d4cf3cda95a2f8048acc934253_Device=CPU_Config=(),0.166352 -conformance_SpaceToBatch/ReadIRTest.ImportExport/Op=SpaceToBatch.2_Type=f32_Shape=static_IR=45b8453f72bf7feb3851f9a53e4396c782373d48ed6d44c9ba927833f4b4e36e_Device=CPU_Config=(),0.166352 -conformance_BatchToSpace/ReadIRTest.ImportExport/Op=BatchToSpace.2_Type=f32_Shape=static_IR=9f40615a720ffea87e596553df6edf33b0be19d5045366507ea1ae38774e6d9e_Device=CPU_Config=(),0.166352 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=bea6472f16de5cd4d0d2f92c1a6726afd8301ba1a1fd0cd81245a65601e7894a_Device=CPU_Config=(),0.135704 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i32_Shape=static_IR=c655670d7f95e1c1a8687f6dd6d9871119510507ea60d3f767d382bd12683e4c_Device=CPU_Config=(),0.135383 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i32_Shape=static_IR=84f41b6615c2092dc715f00b76538eb2bbf1e948bff938268acc13dfb12c1719_Device=CPU_Config=(),0.134246 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=495a58f5c44e76df384c0fd03fcf059b2c855737e50935185d19ea4cb267a68c_Device=CPU_Config=(),0.132957 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=dynamic_IR=3baf0478996bfee873f9347dd16397f469979fa96e9d86d9046f80cc31d8c10b_Device=CPU_Config=(),0.12194 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=015eee57022b8981adcc83819d26d2e380536c2d501a1cb00a52eb22de483773_Device=CPU_Config=(),0.118997 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=8983b94a29122c128467eaeba13701417ba9c5791479bb6a4a3c0ec377dbab9a_Device=CPU_Config=(),0.0986761 -conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_Shape=dynamic_IR=116e9cb63683baba544cd4250799395a5e1ec9406ade0e893c86b22d9f4bb3fd_Device=CPU_Config=(),0.0957363 -conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=8b5855402774fe64888224c96d51405ba201e85f560c8a945efcf0cc8b2c9270_Device=CPU_Config=(),0.0931045 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=8b5855402774fe64888224c96d51405ba201e85f560c8a945efcf0cc8b2c9270_Device=CPU_Config=(),0.0931045 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=7757f03df98458ac7581cee89b96c46d0e40e358cce06083cd51abe45ac09aea_Device=CPU_Config=(),0.0931045 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=8c0f296e7a87e896962c8bd80ba45b52d06c4003f43014d691747db2261b63f9_Device=CPU_Config=(),0.0880612 -conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=012525d1c0f56e6da11f52eeff3e568dda3f560cca9c722d74ae11dc9aa8d7c1_Device=CPU_Config=(),0.0810536 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=static_IR=515cc9ba645c2d222c6aacba8e931f2905ff072365f7e985ebc88a8fbfad45af_Device=CPU_Config=(),0.0795413 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=921061115928610953f85bf7f35f39d2625cc3416324eaac1b864785b70ba077_Device=CPU_Config=(),0.0776437 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=93da330692e3e2eb8ced7f94ad11c77ed4670da278a163269176fe1b47697e24_Device=CPU_Config=(),0.0744718 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=6c6e5f1549c61483dffdbc6d56b827636b6be45dadaab7b13d73aae27b1419da_Device=CPU_Config=(),0.0707316 -conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=dynamic_IR=8ba9d67f24be8d1129847c020cfdfc396f5027c26671ac180cff7b8e10d4fa62_Device=CPU_Config=(),0.0696201 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=7a278b3a3acec45758a464c87efa19e2c09fd53c6191d552f45f8b53ed1405ba_Device=CPU_Config=(),0.0688829 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=static_IR=d05229ad04d9027cde364cb214386e8994e3b61253b3a9e38de74615c9b1e014_Device=CPU_Config=(),0.0652603 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=5c97baea94607deb7ec7273ca73be24708bf795cdcdf5e95db5a8b7bb482c781_Device=CPU_Config=(),0.0644895 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=e8f5e44d24a7c48953fab68bc27364ac2c603945ec50dcb127452450a7327aa9_Device=CPU_Config=(),0.0589244 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i64_Shape=static_IR=91274090503dee360ed6957185820290ae03c632c3dea6479fbc712af0bc1594_Device=CPU_Config=(),0.0570404 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i64_Shape=static_IR=c2920e603bf0c091c56afa12bd7c50adc2d6d41c0875c65bf0913e5389b9eee4_Device=CPU_Config=(),0.0569168 -conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=i64_Shape=static_IR=79292d461bc7d40630332e279109a859a695a4212a04178a217673b63d8dab53_Device=CPU_Config=(),0.0569168 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=static_IR=de3441e1c462b6cb4108e95b475dead8d2560cbafd9c02523ae1af84c276f4e9_Device=CPU_Config=(),0.0569168 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=static_IR=7f49206436a90afd655edbae79c15cfcb70d1668cab626bffcd722c2db6dc010_Device=CPU_Config=(),0.0569168 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=1deb7c8c4c65eb8bbc6aa29fc35be018f9487ef869eefa144898dbbb0fb85996_Device=CPU_Config=(),0.0563675 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_Shape=static_IR=d8c7ffedc1e0fcfe4fef6b027d51b30f56a9215d356811a858c96fdf80705211_Device=CPU_Config=(),0.0558155 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=96aa325c6b2fd9d3d121c2d5c3b4ece42f387ca9db81acafac9d0d4b50501806_Device=CPU_Config=(),0.0537137 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=56c74da508a2e5caa5252406596ee8eb19dde42d4b4d1252a019dca73f7738a0_Device=CPU_Config=(),0.048913 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a42d3e87f577469a5265c21941c8ead0947472bc310a78b91f142521408259bf_Device=CPU_Config=(),0.048853 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=da1e0da18417d2ed2ce8b54142bde883d2f9b407cc4cc9a505999ccced3c8fec_Device=CPU_Config=(),0.0477481 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=c49f1a91331b478cad0ecdfb910bf25118db70698d78097fa23ae0114924cd64_Device=CPU_Config=(),0.0428098 -conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_Shape=static_IR=2e413e64305e10fc63031011c51a7b7770e41d7ba73e36c93bb2a324f507a0ee_Device=CPU_Config=(),0.0427882 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=c2f6dc18a8a51d936cd53abf903dc784183366a50034b5cd30d4367dc0c190f6_Device=CPU_Config=(),0.042676 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=5922124ba06a65ec3cdda85b1b13802914221acca21c10dafcc6083e48db45df_Device=CPU_Config=(),0.0421306 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=4bd7161dddb8220a2f3c78190cacaf1e7958fa67b45ef8ef823afd56e4e5b3b3_Device=CPU_Config=(),0.0421082 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=9b083ad61b4eabec6071d5f747aab982428ba468409571227940d8b21e2b5a68_Device=CPU_Config=(),0.0420677 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=156f9ddcae5546475fd827dcbb5fe9bb009fe2e93cc3daa519412ab1de96342a_Device=CPU_Config=(),0.0418705 -conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=9651b0191204a425cdf8fe72b04e0f4ec201bd9be7b03aeb0dfa28b938482cb4_Device=CPU_Config=(),0.041569 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=af7ccfd01fa7510e09b2c59d6a4955f3025b0d3d8881d51b81e17ad511e54365_Device=CPU_Config=(),0.0407652 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=i64_Shape=static_IR=493543671b825e6d048b8ea0af7b49bf2e940f776aa518342e02f95e06da6558_Device=CPU_Config=(),0.0398763 -conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=static_IR=2c36962e7c6fff7a6d504a6a5a8315432d023c5f33954e490d353b34e99a28bd_Device=CPU_Config=(),0.0398188 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=a08770a83a9619323ab2db111996acee7f11b247ac4cae9a75b387b7eaa153de_Device=CPU_Config=(),0.0393199 -conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i64_Shape=static_IR=6edaa734e215f7c4ad6ba69798731a96fdf2bab7fe1c50f5bea84c36bb9e9453_Device=CPU_Config=(),0.0387517 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=62bf39c23da0f2840ab9e5a91a2c0cf7afba070aa66090bdaaa094b8cae573ea_Device=CPU_Config=(),0.0373961 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_Shape=static_IR=7d99619da324fe7940479a1e74bb78dbff2d978279d4ee97472632fcc0b58903_Device=CPU_Config=(),0.0373756 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=92844ccbd8c494e23b839b10339e16f38ea98423989e53a0bfcdc0eff6d640f0_Device=CPU_Config=(),0.0335313 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=8d81a71edde523e9fd3d8a05af75bd2f93ef77372c3eb77f3dcc507625ae929a_Device=CPU_Config=(),0.0328868 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2f8c29ce317db520ec217ba6bf1903193cf3ad239073528a06b536eb1a0eceba_Device=CPU_Config=(),0.0328868 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=i64_Shape=static_IR=effde21c71b10c54b9a75982d2990604839e673e202e0201d3ed1cd98228f452_Device=CPU_Config=(),0.0314886 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=4277adf0897c541911c5195f1940aa8cb1c553a0c169796309ba5540ca572f38_Device=CPU_Config=(),0.0314323 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=6248a7a36da5bf2d22ec987a5a7a9546f99dc7fb105cbe1bdaa9c7f09eac06e3_Device=CPU_Config=(),0.0313963 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=181677be9e90edcb02f53b9b5bdc3a7b78d05a590bd1794045d6e4fc37827295_Device=CPU_Config=(),0.0305188 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=287cf907287e73809a08e7668b8e8a221b6729c239a61bcdc13e7c2e72466f99_Device=CPU_Config=(),0.0297306 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=18119d6ae879f263f5c7e40c7afa6bdb04604ea91eda8b484c5669493133d951_Device=CPU_Config=(),0.0297306 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=5ba67bedf28f65266a777765f39eae6f23972fb76ecb75ad79b77708e8847a1d_Device=CPU_Config=(),0.0297045 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=24e4b673007068a0740c75c5acbb44d3b50b78161a2c63983860ef923c233f2e_Device=CPU_Config=(),0.0281836 -conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=static_IR=35792850a7dd5ad4a02683e1081bb4545a217b763ae1d96a1b23e10824115120_Device=CPU_Config=(),0.0281836 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=c1af66622df4ee3bab6949ae7d9997b10279370807ebd36b604c4e39ccedd1f4_Device=CPU_Config=(),0.0281836 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=69be0539fcba434eab3fecde8ed60c58ff9484aa9613f3851948080651aeaa26_Device=CPU_Config=(),0.0275631 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4ee6c09b308afe04fb7463a0fefc5d7213c7ea6d70d2b8c3ada6240567ecd2f3_Device=CPU_Config=(),0.0268406 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=662130dbd301b809acaa39a553fdcfaf52d5d67c4e286f3c25e26ceee6bac36b_Device=CPU_Config=(),0.0253782 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=d8188607ce80ab08ebd10fa9538e11d19413aca4db7e3f63b49256d097bfd89f_Device=CPU_Config=(),0.0250005 -conformance_Swish/ReadIRTest.ImportExport/Op=Swish.4_Type=f32_Shape=static_IR=f7ddbfc1322949a31ce9a4577e1019ee5855b47bde5272735093b60e3a4e3c5c_Device=CPU_Config=(),0.0247045 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=f9766ba3e833aad7e53270bbf40a31021b9c97cbb58b03d31efef04414978258_Device=CPU_Config=(),0.0246585 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=42c894ffdf419c0d71fe6bfa20664d552394d52963b5ae2bca69193867c6731e_Device=CPU_Config=(),0.0242879 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=411ba8c994b992fe5f486e5632010cc64c780b4560623fabdcce7eaeae914a49_Device=CPU_Config=(),0.0242879 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=dynamic_IR=10d10c74f3b358661571be269594f003f240506d0b2df84e20f1181011124010_Device=CPU_Config=(),0.0238858 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=dynamic_IR=Softmax-8_596_Device=CPU_Config=(),0.023874 -conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=9519f0f007d875179d5a4236590f4254f0a2954c8ed453484a475b2b3a9d2d6b_Device=CPU_Config=(),0.0236502 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=dynamic_IR=1e90f15ac4cccc70721a3d177d9a1c54da3d8a9534595d40fdb21c89594b0a8c_Device=CPU_Config=(),0.023276 -conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_Shape=dynamic_IR=5b7ebaab99946e71bcb8aafe28711160144a7aa3c17fdf014304711c7bf62bcb_Device=CPU_Config=(),0.023276 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=a21b8641bb78c7aa79914b64afd28d63c5b503bf6ab15d823b280071e5660ff2_Device=CPU_Config=(),0.023276 -conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=dynamic_IR=90fb30f8c780dbd8137e3aa5e6b5a575bbe658259931f44c7084809804e15b52_Device=CPU_Config=(),0.023276 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_Shape=dynamic_IR=90fb30f8c780dbd8137e3aa5e6b5a575bbe658259931f44c7084809804e15b52_Device=CPU_Config=(),0.023276 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=3de649a3fe1e333f3722ec17f8e4f518aa87d7e08a2bfd1ba21399ad3d469888_Device=CPU_Config=(),0.0229607 -conformance_ScatterUpdate/ReadIRTest.ImportExport/Op=ScatterUpdate.3_Type=i64_Shape=static_IR=59cecddb7722c2a12a869435c38863e27e9b3ead7fa47a268241d0cb7b524d9b_Device=CPU_Config=(),0.0228497 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=9da6d7f7669f9f09c739496035cc760162d1371e31aec96752e0628f6f80d87a_Device=CPU_Config=(),0.0228497 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=621fc8f6cb7ec36474fd43d8658886d20ada78b8ff90f21a1dc5f44ef87a5797_Device=CPU_Config=(),0.0226964 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=c11c12ac7fe4b82c554799ef0a56b4605c3835ee505a5ae4e6f0f79231d43b4f_Device=CPU_Config=(),0.0218955 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=dynamic_IR=37b3937023f6db3dcb7e0c10452bd06d11bbd6d40e83b89bf04185a347a085a8_Device=CPU_Config=(),0.0218805 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=0f85dcbf41aeeb2642d5084973552e963986ebdf6ff4243b7e2b40465fb07786_Device=CPU_Config=(),0.021815 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=a88f475a3b278f5f7af50d8ccca29d647b2d84101fea5f894aa02f6edb7e8fa0_Device=CPU_Config=(),0.0211883 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=5933593e09765497380873b7bfe96f7c5f609673d5943c7e810931c1617146e3_Device=CPU_Config=(),0.0206648 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=b2b3d853a37428ee35fa950ecc1e6dbd2fb70b62e49dba54329a464953002301_Device=CPU_Config=(),0.0203433 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=69288cafa02066780e732eed8dfab50ebc3575e4d57fe4e2e4f55db87decef72_Device=CPU_Config=(),0.0195235 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=2eded39dfd7ac5046f51ef6ef6e33e759ce39c06d2641d877895d0a43cb4a6d2_Device=CPU_Config=(),0.0166151 -conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6c8eed6a6895ce60fa6b86361e8eee4cec9c8f85128ffa2f913df16c48839191_Device=CPU_Config=(),0.0164502 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6c8eed6a6895ce60fa6b86361e8eee4cec9c8f85128ffa2f913df16c48839191_Device=CPU_Config=(),0.0164502 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=c47e995bd374df7c18e3a95107fd4caff3615eda26c422b91ffa96c6418d4006_Device=CPU_Config=(),0.0162285 -conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_Shape=static_IR=aea02f3c3b0404dcab9cdb13a0fe007af01e6be80d807ad870ec241d2d49f65a_Device=CPU_Config=(),0.0159963 -conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=static_IR=a390802bd8c25df38450ff2c8149a560200a2b125c1fea04b09535f2c71dc012_Device=CPU_Config=(),0.0159963 -conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=dynamic_IR=3ae8829c4d6055ffe442b07c447b815ea50238534f49ddb610ce0feaaf769473_Device=CPU_Config=(),0.0159241 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=MatMul-1_646_Device=CPU_Config=(),0.0159167 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=f027755690bedff8f2782bb1993d2d92bcec4981b3a0e4c1d9c6dd3b10a00c2f_Device=CPU_Config=(),0.0158331 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=37b3060eea0781b1d483e181cccd1a361570fbbdcf0ddbe5c281a7c541aec6db_Device=CPU_Config=(),0.0158015 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=25f2e85c4559b20da7e5d98431dea3bbf4afa3cfcea581c25d3f754d3fc01d2f_Device=CPU_Config=(),0.0158015 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=65239e34c3c31c17a90305271e6d182d345b334d3731fd53ef96de1894c6859f_Device=CPU_Config=(),0.0151353 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=5ec27755fb04cb98f9e65c0279beb3b390ba06a409e5b117df85052fdf9e9dc5_Device=CPU_Config=(),0.0150812 -conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=fae78534370da36a4babd3fb038590501102f83d9ef2964e5a07ff15d99e3c3e_Device=CPU_Config=(),0.0149938 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=681a46c0635072ade7d9d4a91834513ebe215af64b390b1962365ee16bf7c38c_Device=CPU_Config=(),0.0148906 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=000c81a34f81c3efda11ab91a9bc7be609c2f46a8f9e7921f39337666e1ffdd0_Device=CPU_Config=(),0.0147421 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=0d293ddd339f696ae48e8a5fff22c95de3c06b84307f6116a2bb0103905e6b31_Device=CPU_Config=(),0.0146742 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=22778eceddc10aa01f8c55f51e290e067b136653b797c0a62986ca31e92e8a36_Device=CPU_Config=(),0.0145297 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=04fe7a6d7151d405093cb00d55390a280fa94987869ad5129f01c4b96b28bca1_Device=CPU_Config=(),0.014321 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=a81eeecb2ca6b3bea6a3e2e61bbc5c7f07fa3253e9ba19b9b53a7a3b1746cb2a_Device=CPU_Config=(),0.0142721 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=448efd2be628c2184cdbc37975b906144ff875bf252ff3218ae85ce3d9525084_Device=CPU_Config=(),0.0142515 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=907fd7700664738545f3f2f277c50406d767fc101701ba4d41dc532090114903_Device=CPU_Config=(),0.0142371 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_Shape=static_IR=3a90c9a2040b33603eee7a2b4e03215813089680ca26b7184450b38bfd1b5ca8_Device=CPU_Config=(),0.0142091 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=880a326f16f31b6ad99e7e1ebb7b1d68237b626a4d08e9093b830025dacc13c4_Device=CPU_Config=(),0.0134903 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_Shape=static_IR=ba6cda6f3967c05d846e67774df2b39815a21279658eb64503e8d60379d9b0d5_Device=CPU_Config=(),0.0134014 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=87a6c487fa875ad12aef43b6bf87c0673810054173312e16534b2988fc80cdfb_Device=CPU_Config=(),0.0133972 -conformance_ScatterNDUpdate/ReadIRTest.ImportExport/Op=ScatterNDUpdate.4_Type=i32_Shape=static_IR=a732f273066d374acdca416747507d2faae556883e889e9b722fb66f4ee2b57a_Device=CPU_Config=(),0.0133972 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=99034f2132c9d40f5157baf60481ef43c25bdba6b8878541d8bc32d9f4b6b662_Device=CPU_Config=(),0.0130759 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=626feb9f9600951976b771100e07f28dddcf8d13f68b29dc48c707346f9cb698_Device=CPU_Config=(),0.0130047 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=affe5eee2813d0c7a9403233d62243b060eecc4b7bd5ee8948cbc7da77c98514_Device=CPU_Config=(),0.0127628 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=704ba6fbcc16d8f88a04decf46127516ab809befed2e9cf94bc746322913a961_Device=CPU_Config=(),0.0124724 -conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=56d6a6e8decfcceeb35bc53e605ba6c83594056d26af7ea48f022a1b60fd103b_Device=CPU_Config=(),0.0124537 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=98a922389230aac2aff43a0a380e67d9f14b9d091ab61e90a37105529d0a211d_Device=CPU_Config=(),0.0122012 -conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=Clamp-1_31_Device=CPU_Config=(),0.0121839 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=07dcad9b29dddad274496815af8a4e58170fb15188d6a6299a4a975f8c03c05b_Device=CPU_Config=(),0.0117967 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=59929a87c852e17a6ae08c2d55a8d2591897c22d08977b9b6d843a3fd90a3908_Device=CPU_Config=(),0.0115026 -conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=f32_Shape=dynamic_IR=ed1a9abf3e73e2bc5df473132c64b3ed1795e33d18c76607fe3b5842edd88276_Device=CPU_Config=(),0.0114804 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=3b2426ad5064ebc5ffdae90c7f895562ce3d046ecbf6c6fd8733245c6aed2066_Device=CPU_Config=(),0.0113197 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=Softmax-8_283_Device=CPU_Config=(),0.0113045 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=static_IR=75318a34c705d8c8f5e4861fdb4e6e9c0834d77dd5ffd3dd55b3f01cac70891f_Device=CPU_Config=(),0.0112084 -conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=static_IR=11cacdd465d7fe0e32568f8a17f8b623f4f2b58de34f30ae709d9643a00afc9b_Device=CPU_Config=(),0.0111659 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=95f51a08c808163b0e7c072480dc341da6243e4a7a0dd59af57c5d4f5e24acf9_Device=CPU_Config=(),0.0110416 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=0bd257d3c0af5fa7cab5329c930be9f7a4b249b367f1ca78810627caf6ffe230_Device=CPU_Config=(),0.0110416 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=d48fdd131bef036852bc118ebfc179b359e7db7856d6369347588a2a26231204_Device=CPU_Config=(),0.0109442 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=dynamic_IR=e48ed87de5febdc5c1300e68dad4c51d7e97f0ea34b7f9c73949926634b95685_Device=CPU_Config=(),0.0109401 -conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=dynamic_IR=83e5261201c85bdc708176dd4631bd12875f7a197428f4b79e8c9877efe33139_Device=CPU_Config=(),0.0109401 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=c87da4885b3edcecc40038d053baf9059f674ba4a9e598f20fa83b61f7e9ec91_Device=CPU_Config=(),0.0107847 -conformance_SoftPlus/ReadIRTest.ImportExport/Op=SoftPlus.4_Type=f32_Shape=static_IR=95dc29ef42853ef728462a5a72b472018c058011b6faa222daed717cd72d7881_Device=CPU_Config=(),0.0107018 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=644ec28c2d27db943c11c8802bf28aaf699eebdd1712f1ab6681d8a0cb0896bf_Device=CPU_Config=(),0.0106568 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=0940386130c9b189a4e1cac6c8798e1f6fd05cb2f9098204e74378df9a4c2b5a_Device=CPU_Config=(),0.0106568 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=64e8a6ee56262645f17af6a3f334bf00e56e3712be5ef66f9f10cd32f80258cb_Device=CPU_Config=(),0.0106118 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=150df8d12195c8e144a3eef6338a3ffbd6b6f3fbbebfc66757b1f79270f9bcb3_Device=CPU_Config=(),0.0105383 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=dd5a14fcf166ed7f3e9b173cb3a4e89a48962e865d2a4025a7209b47325ec6c1_Device=CPU_Config=(),0.0104435 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=8edd2105c3fb600c9ffdf53b2949d3d98fdf682457508081805faa5b25447be2_Device=CPU_Config=(),0.0104219 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=3236962e2389af3c1af3caccbd56411e3b230a6d760ee4ed3c9fbe4daa22625d_Device=CPU_Config=(),0.0102603 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=static_IR=7f0978a6173bc967f1a56da2aa7ac9a8ea817f6988fb5ed96511af2d799dfd00_Device=CPU_Config=(),0.0102283 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=59d92654e12b73b99b4fd5bf34b64dfdbad6d1b34dbb5ad67e5baace21490bb4_Device=CPU_Config=(),0.00987668 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=baa686f705a406660a8e77cc04ea3bcd35351489b50b2f8fadbbffa502f694c0_Device=CPU_Config=(),0.00978874 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=e00d5b13c12c8d618de0de7a11dadd363e1d47bf3fd8499562cba78ccd5c81c3_Device=CPU_Config=(),0.00978274 -conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_Shape=static_IR=6dc0431d82bc80d48dfc97fbab04605e5cf8ada37f024b101153620df62424b9_Device=CPU_Config=(),0.00976088 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=b49875760194a46f0ec6cc449f39e549851b25f14b11fff14c13e8194e05974f_Device=CPU_Config=(),0.00915451 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=de1a57ba5fe4c6fcb783470723e90b7fa67c5b91d9fc3d0ac3563ba0ea854eb6_Device=CPU_Config=(),0.00895819 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=b564b2276303fad81c752012ef1b3324bb2dd79518cb1512c5e0bf353301ac43_Device=CPU_Config=(),0.00894717 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=78120d308d75322fb4a249d40ebde4d59cfaedfc7aa96504ba759ac5e1a76ffe_Device=CPU_Config=(),0.00887317 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=48383fceab6e40aa7658c277d564254ebe0dc2ad97add147355963758ab63ccb_Device=CPU_Config=(),0.00883933 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=MatMul-1_707_Device=CPU_Config=(),0.00883452 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=1598476e13d17642505eaa9c1b8e70883e75a5304abd407434941c9487a96cb3_Device=CPU_Config=(),0.00845929 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=33719d2fc9285c40f5a7ef6a679d3d7cd6e1776a2af022bfd37d27fe14cf7711_Device=CPU_Config=(),0.00844865 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=0bde620ed06caaece32792dedc44f685d5e25da07c429196c033637fa2cce99a_Device=CPU_Config=(),0.00825316 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=7e14ebac4d39514f21befc50dd0038b8de7a34b27847c80f0dbf802816a425b5_Device=CPU_Config=(),0.00824875 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=d2b71d56aa3ceb9e89ae7d8f85d95490898c5d70732f550e9342591928348dc0_Device=CPU_Config=(),0.0081504 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=b67a5e877b0a643550bf856cf873a5c49d4b25619052963a168027b55c961df4_Device=CPU_Config=(),0.0081349 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=15199c5cf33e6d47dbacc3f6996130e104a06e73fac4635e8ae75c0cc0aeb46f_Device=CPU_Config=(),0.00807368 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=5caed40ca7c65cc5ce6f0958876237fefb1ee5e99a97230d52d373fe7deca7c9_Device=CPU_Config=(),0.00807368 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=00712fc27968887da2b8c8305fd95d17ca73243b0a8199786409df03cbe894a8_Device=CPU_Config=(),0.00796827 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=61dfdb414561907fe7bbeb6a76956faf210c2e51a7253cc1423220452bf784ce_Device=CPU_Config=(),0.0079656 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=i64_Shape=static_IR=f956cf5fb56bf0ace1f7759a931579cf7857a5672bc0e43876e2640cdeb9c1ee_Device=CPU_Config=(),0.00790587 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=684bc4ab91e211f69d642714baf6e16602736f54fcb7afa9d03f29e90c41aa92_Device=CPU_Config=(),0.00782781 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=f50865599f7260e05c92e4c4af2b23673d9c72228397e36a1e3fb694d01bd2b6_Device=CPU_Config=(),0.00782781 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=cb5b2e21d2218773d120651ce7fa0513cc4cceb20f85eb6a5575a385dc377bb4_Device=CPU_Config=(),0.00782781 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=db82fcef6ae0e96d00ff5802813f136082c26c9830cf60131cfc9dd9edf97fd7_Device=CPU_Config=(),0.00782609 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=1edf476069174dfd3a7cd3e44accf10c3307ae06a1e79531c87e2847daf3268c_Device=CPU_Config=(),0.00769562 -conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6e425463657c7ada080914883df93e6f76afb14da959dd90d60ffb32f4e40d30_Device=CPU_Config=(),0.00765294 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6e425463657c7ada080914883df93e6f76afb14da959dd90d60ffb32f4e40d30_Device=CPU_Config=(),0.00765294 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=u8_Shape=dynamic_IR=3e5e29399ace21ea387d6e9ef94266b16549271a55badc376ef9a42b024fcf23_Device=CPU_Config=(),0.00761644 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=u8_Shape=dynamic_IR=02078fa4aeea941f80b3d41995fe889c3d50696fbb83b00c7c2e1fd26eff0d22_Device=CPU_Config=(),0.00761644 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=d02c304f5dc9d0e0e3b7b1bf59d67a2e9b25b55afc4137c38c2da33b6922df77_Device=CPU_Config=(),0.00761644 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=7f826e9cff9d0bdfbe349b216151765ebb6bdf40d8f2e443f2d9412a16388af5_Device=CPU_Config=(),0.0073883 -conformance_Gelu/ReadIRTest.ImportExport/Op=Gelu.7_Type=f32_Shape=static_IR=4127903b253038cda36eece48daf11e7255ff9bc6daaad10cf763a0c528f9287_Device=CPU_Config=(),0.00735097 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=701c8cac9901694d74715cf8c855a3a399b021a7478e4a4e6c99ae2989b1162a_Device=CPU_Config=(),0.00731518 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=1c610a7e945be537f80fb5dae18eccedfb650940acc12469588621ef4b9d46dc_Device=CPU_Config=(),0.00731309 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=aa611e5d03e752c161f6b8f51a6fdd5f96870b93b3d6ed4ea3e2c91cf190ef4b_Device=CPU_Config=(),0.0072154 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=2aeee98715d34a6d195fb40b33a05a60b14970f766ddfa8436bfb703c24661cc_Device=CPU_Config=(),0.0071632 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=fdeaed3237a0794ce3ba6655c8e21590e6aeb35504730b0ea5f5ded0acb73f45_Device=CPU_Config=(),0.00716217 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=MatMul-1_823_Device=CPU_Config=(),0.00715919 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=15e5b0e0515bc578b9cf0f3b33f1fc2049749b99ffe1525b27f82973f69e8c59_Device=CPU_Config=(),0.00714277 -conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i64_Shape=static_IR=d4367bfd7a56153cf1a3ade9af680d7c66dc5296f57a150d47238f0212259ce6_Device=CPU_Config=(),0.00706339 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=9728dcf8550f62af4b4908e2467a03606982e94ec695582c10a86d75e5199a3b_Device=CPU_Config=(),0.0068293 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=static_IR=660303cf2026018794a020306c6cdce198d2a7b1297ca36b94dc3de152c2121c_Device=CPU_Config=(),0.0068293 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=static_IR=1b127181c8972ffed5fcb3b6d7bae2b1232ef1d287341c30dd2bca8029ee01e1_Device=CPU_Config=(),0.0068293 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=fd52395f8ed9e7833d5f9d723eafb990fc83ff45577b0df389e3d0975422713f_Device=CPU_Config=(),0.00666364 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=1eea2d24d41037f619efe5fd5159170ae7de93a26c9da9add795008cfd2395d9_Device=CPU_Config=(),0.00655082 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f552ae7f024f140c80fc762b702bbfc97992935834acf40e4e1c1a4634b8166c_Device=CPU_Config=(),0.00644546 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=dynamic_IR=510005ff9ba3a023fcba5026f3e289f98f4ad7a8ece1f058a573143acccee262_Device=CPU_Config=(),0.00640201 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=static_IR=76b583a30803001c17f27be1594d5f2591b55983398ab16f1ba143cecbf65e7e_Device=CPU_Config=(),0.00638788 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=816214015175abe6cd3aada5e209a5107811de5010bfe8ee1a2ebcd998c0c444_Device=CPU_Config=(),0.00626376 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=260d12b9a5e8a71c726df95d4b8fb30bb764e1ba82a50fbe4091e386a280a4ac_Device=CPU_Config=(),0.00626324 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=960183c0bc2aaff513e4e49e65f056479572bd2c3a7e5d58a230273a8eb255f0_Device=CPU_Config=(),0.00622342 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=d65f51bdade5ded2bbb821d50954070419ca1ade57ebc69efb6e20105cf8707e_Device=CPU_Config=(),0.00607089 -conformance_Range/ReadIRTest.Inference/Op=Range.4_Type=i64_Shape=static_IR=e7b10e4cadc7aac08f4c2275f37ef0aa95351ddceb40e48422ea3c4893f69764_Device=CPU_Config=(),0.00589991 -conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i64_Shape=static_IR=e7b10e4cadc7aac08f4c2275f37ef0aa95351ddceb40e48422ea3c4893f69764_Device=CPU_Config=(),0.00589991 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=6f68825fed94645e4a5c23338a98770d770be8f9600566ab93521bc858733911_Device=CPU_Config=(),0.00576172 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=ca090e13d7734505d44fb6281626c442a5740326a0575175de68b1f210bdca07_Device=CPU_Config=(),0.00576154 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=abc38c684591dfbc656b1c9aa0553b107f2b6a593665cfb1d802d1812177518d_Device=CPU_Config=(),0.00573863 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=05bc655a96a41f363fff6e5d86bb46b1d81d52aa40eaa494aba10bc94a6ffb12_Device=CPU_Config=(),0.00564171 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=ce9b4ef17f734c1e93c026b42e90c7fd23f03bb1b3acaf125a689e9ebc99b910_Device=CPU_Config=(),0.00563516 -conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_Shape=static_IR=674f918d6dc4e58697845798361884f78757bfd77105e77c2ed9a3373bafd26b_Device=CPU_Config=(),0.00561033 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=533c387f9897d975697f04ecebefd862bb6d00ae0c0d9e7a44c2e8cd15520358_Device=CPU_Config=(),0.00549911 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=e9ecdac1f1bbf6bb39fe2793cc4483eafecb7f19ce9d4cde3aa59b677b70270e_Device=CPU_Config=(),0.00545125 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=3ac96e01f3190a6709a0e4708c2022350d132352cb72c04aaccfe5a2f57a715f_Device=CPU_Config=(),0.00539298 -conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_Shape=static_IR=94fc3ad4faaa80acf61d6ba93e2bb5ebebd64368c70f1e9f000030acac74f428_Device=CPU_Config=(),0.00536907 -conformance_Mish/ReadIRTest.ImportExport/Op=Mish.4_Type=f32_Shape=static_IR=46c99592c04d5338e2fb1133e3523a64116533d5a3bf2861b1c483e5ca1dfd8e_Device=CPU_Config=(),0.00530756 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=c3926b158f4f047c751083863a119cf9d9cf7312cbb15622bfe803af9caeab68_Device=CPU_Config=(),0.00521133 -conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_Shape=static_IR=d963dcc3864672daa6b46f5ca80e74ee69849d1d63cf73a3e68430be6a6873d9_Device=CPU_Config=(),0.00519159 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=63cea76f3465d3438fe3fab7c354b67fe089d3587fa65df90ffda23427acb61d_Device=CPU_Config=(),0.00510348 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=71051cba4c9a998a5cc0b0b0dcac8c1aaa21cd74ae3922114ec450f9963335d5_Device=CPU_Config=(),0.00509136 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=fe082a13b0104870535aa4114b1e2ad1250f1a7f0eab1a85a0ea1137a3a9560f_Device=CPU_Config=(),0.00508787 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_415_Device=CPU_Config=(),0.00508375 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=7cd317501bbb5df948ba37f6938e6f98e79650dbd366568ab48b63c5363de438_Device=CPU_Config=(),0.00507665 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=Convolution-1_691_Device=CPU_Config=(),0.00492155 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=3b455db06e1767c713c41d6a018939f5e54a6e331d12f22a9fc6e6879d5ac733_Device=CPU_Config=(),0.00492155 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=178ef8e30ed115607129b39d0da9f7ba3a3f3b636a720cd69a96ecddce06670f_Device=CPU_Config=(),0.00467613 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=930f47a3f43d08eaa383c40c23d6477de2bacf1017d925516afe545889e5a3be_Device=CPU_Config=(),0.00464813 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=5e378ae29726d3ab15b6b1c87d668695d13001fdcac0e4e7b12dcf46382f1aa0_Device=CPU_Config=(),0.00452309 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=dbd5259502eb92838d5c9623fa4c498318cb6802bdb56d7dc91d48c144db5ea8_Device=CPU_Config=(),0.00435978 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=81a5cba7b8f166a3fe060fa1f663918a91f9c20d222c31e18ca015db4876b07c_Device=CPU_Config=(),0.00433097 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=09dc61efcefbafa700aadf5407e1910665d8503804995a4986fcafb5266f5619_Device=CPU_Config=(),0.00418814 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=ab6ab6b03327d74e1bd90ec8aef8ecc04ce78003619df2a93e501db9519535ae_Device=CPU_Config=(),0.00411937 -conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=d73496134933cf2b817367232cc3b8d772f8fff2f6c0a3309d669ac946f4a4d2_Device=CPU_Config=(),0.00404628 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=458fd4f9fc9c2480c5fd282c704d3fdd67bafeb7e47e44596a1f7bd0d9d6d11f_Device=CPU_Config=(),0.00399299 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=18275c839f2d0626c30db50e1dc88ce16387d6585e27fa846e18f06f6a71e95a_Device=CPU_Config=(),0.00397768 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=u8_Shape=dynamic_IR=8e7ad7a4f3edddf289d7e31c4e20b1bd9d5f9908d6b3d1b5ac5560277515d04d_Device=CPU_Config=(),0.00395025 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=u8_Shape=dynamic_IR=0a81684b0e1516061a9da90c8109a3886601b5338e995ac21efce7d56624f552_Device=CPU_Config=(),0.00395025 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=a1455b163f00eb1dd765b3a90c98bc2b86671e0a0d0c7bc835aead2db0ba497b_Device=CPU_Config=(),0.00391343 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f51172a1ec5f3abbd052e4117052d377c5f2be29ab33ba9bec96eae457788e2a_Device=CPU_Config=(),0.00389879 -conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=i64_Shape=dynamic_IR=5369e1bf0be90823d2b1fbe6c33bb53e5e472d9bd468280463ffee061f730242_Device=CPU_Config=(),0.00387919 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=dynamic_IR=5369e1bf0be90823d2b1fbe6c33bb53e5e472d9bd468280463ffee061f730242_Device=CPU_Config=(),0.00387919 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=feb78daf38562ff738b472995782cf2029436b9141ae3347267f5036e5dde521_Device=CPU_Config=(),0.00380596 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b9fb74824bf09cb294553346aea1d0bd832f937710195682483b047548624a02_Device=CPU_Config=(),0.00375229 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_173_Device=CPU_Config=(),0.00375163 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=538bfd91d81c5037b702fa4ef55ca80ca95adc806d7d0ef4a156e63b814645f1_Device=CPU_Config=(),0.00373727 -conformance_ScatterUpdate/ReadIRTest.ImportExport/Op=ScatterUpdate.3_Type=i64_Shape=static_IR=85934ecf09fdd72598fc1990e56ba62425449c729acba542b2fe5c4cf88ae95c_Device=CPU_Config=(),0.00372432 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=0665ac1959a4ddeb6837093c940c3a20c999ac5823a5b265da5eab0d7f99f466_Device=CPU_Config=(),0.0036044 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=ccd548c5a022f7e8b502e80ed6d1d9e4fb6d890fa965969be13c385bd3a21fdb_Device=CPU_Config=(),0.00358025 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=4e31bc41f44952d1e204d953202075064726498abb59eaa3c5eec875b57d11a8_Device=CPU_Config=(),0.00356738 -conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.4_Type=f32_Shape=dynamic_IR=861cbf1fb134c16c3e3d261d4bc24623005a9bc5717ca1beb94c59d4d94a7a57_Device=CPU_Config=(),0.00355216 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=dynamic_IR=861cbf1fb134c16c3e3d261d4bc24623005a9bc5717ca1beb94c59d4d94a7a57_Device=CPU_Config=(),0.00355216 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=68efffe08a572361f909108557384609e81e7af2d16d88ccfab2f0c1e1464e00_Device=CPU_Config=(),0.00352679 -conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=static_IR=d99cf84d9427bc9cfb033acaac6483a575ad2ad6223313530830ee62b4fbc71f_Device=CPU_Config=(),0.00349326 -conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=25341a0e07b6326f7f6f39c2c219e79f5d47107002c8e43081608d4aa0a10d88_Device=CPU_Config=(),0.00347893 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=43f6a5f5df1d35e809d906ff3e090c1ad553210529150c5abea04ae6b0ad407e_Device=CPU_Config=(),0.00347893 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_357_Device=CPU_Config=(),0.00342249 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=f2ebc2722659946861b815e5297e0fbf9a0ae6371c8ccc19b7e29046d5d8c84f_Device=CPU_Config=(),0.00341457 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=db442fe6323dbfc05cea296323949b8501a1d76ea48f923a6c014726c7c335d5_Device=CPU_Config=(),0.00341457 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=2eb305b9f755a12e33ae641a197e8c8db0f5c691df1bfd70ad450f49847030a0_Device=CPU_Config=(),0.00341457 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=cdd6639b63587f2aa8eab5bb2be5cc035faa5651b9f00abc05bfe5d551785527_Device=CPU_Config=(),0.00336187 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=fa67b55cff69e125f4c8428fc96638757022850b5c4400ff9c3ab5c60f4ad2b4_Device=CPU_Config=(),0.00334917 -conformance_Gelu/ReadIRTest.ImportExport/Op=Gelu.7_Type=f32_Shape=static_IR=6f583d3cfb1ca982039b721b059f4ec51a3e6458999021d5c724bbd4a7d6b205_Device=CPU_Config=(),0.00331519 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=e600e9ae6b48eb34f84102989a83e27f66a505de2d89029a93106c2f46b65784_Device=CPU_Config=(),0.00315934 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=62cc8f60ebb09be607c0a916bb4ae8c0e11e2f760aeae7ed788229b2251e23b4_Device=CPU_Config=(),0.00304211 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_552_Device=CPU_Config=(),0.00304203 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=dynamic_IR=9fddcbe0bc871e4a3b396aebb62df0fd1d47add6c71a42de4f0af7981c91d112_Device=CPU_Config=(),0.00304197 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=static_IR=da68667f0a4399a6a11d4965f89a43ad427d6a78205c94c80cd80b01f8e3c9fd_Device=CPU_Config=(),0.00301216 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=9fa54c3adb08e981c96e0e55f79617f264b982018cc5d5a93b734cdd9f4b2f3b_Device=CPU_Config=(),0.00295612 -conformance_Abs/ReadIRTest.ImportExport/Op=Abs.1_Type=f32_Shape=static_IR=3c623601469fbdda6999501e41873f23bedad2ddce6c577483133547edf9c67e_Device=CPU_Config=(),0.00295612 -conformance_Abs/ReadIRTest.ImportExport/Op=Abs.1_Type=f32_Shape=static_IR=1e46e99909597b0493b0184f359c42a59a8f5e1f5c0faf0e86a6fb41482fab4c_Device=CPU_Config=(),0.00295612 -conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_Shape=dynamic_IR=56088c6cb719f2acb0fecc391361473ed932a73d208b8a93d6f28d82bcdb578f_Device=CPU_Config=(),0.00292031 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=db59590787f5c6556484ba1e5f630ec3ab69c9653e1e307c5d47ac823a021ee2_Device=CPU_Config=(),0.00292031 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=939f70db639ab8103259decc5d4aa610919116bd0b0c6742a2422f5ee0fe801a_Device=CPU_Config=(),0.00289925 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=c1b3d6c7677bcbecbf5a9e3563a599f5d36b6113327cf118b48588009b973ede_Device=CPU_Config=(),0.00288947 -conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_Shape=static_IR=ROIPooling-2_361_Device=CPU_Config=(),0.00288503 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_218_Device=CPU_Config=(),0.00288503 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=7f484b471cd794bc3149e37d69371ac7db90ec0f9c4ad44d337cc20aeebbab36_Device=CPU_Config=(),0.00279896 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=b4ff4ca4ddcdbaa749dd4317b457c474cb7f88877dc40464109216441c331187_Device=CPU_Config=(),0.00278428 -conformance_MVN/ReadIRTest.ImportExport/Op=MVN.6_Type=f32_Shape=static_IR=28427b222b58c0a643337e767c42d0a8f8218fc1a1c8f4b4d4831e15a2a304f0_Device=CPU_Config=(),0.0027644 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=58cfbdfb631546bf631afe078eb03f065f0bcd4128116ba247237954d05db814_Device=CPU_Config=(),0.00276389 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=1509c59ddbc083a013eea8367035705ffa19d64f7d7de0f81ff7a4a33792060d_Device=CPU_Config=(),0.00274947 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=fee152f9aa9e58118b3c20338fbd7260a0242669fcdf3e1b5609e5ba59883901_Device=CPU_Config=(),0.00273145 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=cde993e07f256cfa6dd9812818758938a8d4988b5f294ef45ee363db1611b51e_Device=CPU_Config=(),0.00273145 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=be41be0fb7469fab4d57619133ac8c394851a1d4a2507e15cfcf921a1b6a3476_Device=CPU_Config=(),0.00273145 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=abdcd60fb7cbb8e0b829a58524a530fd79b8267aedc4b991f819892ddd78a837_Device=CPU_Config=(),0.00273145 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=86385a3e20e9fedf62fe83adccff86912bf26c0a0b31434ee88e28e173395610_Device=CPU_Config=(),0.00273145 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=554183f144c4ebe76374c9f648ad34ee9c05276a19414b1c6566f2e1da3ee643_Device=CPU_Config=(),0.0027285 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=4941019c334ec49feaa36ac991e897b45eedbcbdde0f54b81101d27bfc978b6c_Device=CPU_Config=(),0.00270038 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=0029548ee6faf2332d990bf282ea8e234afc74a89103071d6b36efa248ff0b29_Device=CPU_Config=(),0.00264197 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=2822148946a36a04db9a47e2483ca082436672e5413671e8e63740c7945b6789_Device=CPU_Config=(),0.00263962 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=6287113070f51e9657625f6137684c029078e795ff376daced90b412d8b9a6fa_Device=CPU_Config=(),0.0026359 -conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_Shape=static_IR=b3747c5c51a2d8c135479cce6f248fa18f4d0ddf7dbfea0f7fedb4234f821e46_Device=CPU_Config=(),0.00261496 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=600d7d5357fa360c5721df3091682a4186c5bb9a4daf62ec08cb4ad53996cbee_Device=CPU_Config=(),0.00255958 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=3f5a6ad6cf5a08c7d5c512f503b0887e2d82f1f27207f24b018f6d9036d9c0cd_Device=CPU_Config=(),0.00255106 -conformance_BatchNormInference/ReadIRTest.ImportExport/Op=BatchNormInference.5_Type=f32_Shape=static_IR=0438a6d95025d6988703cb0fa507541364e63315a3808ec28332ae3f4298aed9_Device=CPU_Config=(),0.00242287 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=20427a621b7176ec0040abfe0de9e8463aa0630b838a6218036c3e1f8417c86a_Device=CPU_Config=(),0.00241738 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=ea04e03272f14274e58391356f85bfca758d0b6b2a3a57bcf43c5fce4faa646d_Device=CPU_Config=(),0.00236311 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=54ce3e3e2c10b4a7464ec1cc97b284c6d33da77d2647b722f58712f173e64b75_Device=CPU_Config=(),0.00233153 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=cef57da724640624033aa9fe59d8112fbb9e0b453f7a76d2476ca4a5b840aa13_Device=CPU_Config=(),0.00230965 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=de3425001e84345da4e6d36f0131bc37961d8e10501615884da15d1841137c0f_Device=CPU_Config=(),0.0023093 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ead04f1f792dcc7223266f1fd50a315cf3ef9581069a6f7ff058496fc00f67d2_Device=CPU_Config=(),0.00228101 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b06875f1369c3f5b24934294396dcfa467cc15da4241b76c535063d8d32ff541_Device=CPU_Config=(),0.00227426 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=96607c0244d5ab1e0b8898dac797e275557e6a16f31635ffd6fddf145e4b9bd4_Device=CPU_Config=(),0.00220675 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=67e6efe4d3f9a640fa481bbbd9ab914f53391c70757a69f82f29ccba42586516_Device=CPU_Config=(),0.00220357 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=6371e50a4f696c471b5ae89585bd3f55b01591bdad1f050d3223eca0f8ab8be4_Device=CPU_Config=(),0.00216075 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=e09652edfbe32d6b619e1805e0d9eb7ef017137fcc07ee0b9d8ec00387a1386d_Device=CPU_Config=(),0.00216075 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=652fd8c7d8c062dc9ea784a8d0e189b1e0cf15d3fe5d03ad31853f83bce829d4_Device=CPU_Config=(),0.00207407 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=0f365811024474cc556b000b46b320e5b6119d0e0d0a3b4ff6ab9f94f863c9a5_Device=CPU_Config=(),0.00206661 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=dynamic_IR=e3c3f39010a384196def7feced98455dc41ad1242974df3dedd428b944f335d1_Device=CPU_Config=(),0.00206538 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=aca3fbdcfe4768148cc6ba26d50232f880c1e06bdbcc4156c20c9b1159c72e58_Device=CPU_Config=(),0.00206275 -conformance_ROIPooling/ReadIRTest.ImportExport/Op=ROIPooling.2_Type=f32_Shape=static_IR=ROIPooling-2_359_Device=CPU_Config=(),0.00206206 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=fac4323fbc883646eaec76d38ada76e383732b45632cb58c9ab9609f2eea9c9d_Device=CPU_Config=(),0.00206206 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=123784c58cdc556baa03367b4693966ab4b260066d5cb3592e9fd96c02daa026_Device=CPU_Config=(),0.00206206 -conformance_Sin/ReadIRTest.ImportExport/Op=Sin.1_Type=f32_Shape=static_IR=efca25341ca2e6f359b9d2fb54aa226439c70e4093c0461c3b06e7159339b109_Device=CPU_Config=(),0.0020382 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=d23c45343d51bcafc990238f0326291d893135e55c509dd66388b372353b0f1a_Device=CPU_Config=(),0.0020382 -conformance_CumSum/ReadIRTest.ImportExport/Op=CumSum.3_Type=f32_Shape=static_IR=203272d8b86300e50b9888a77ba70023f4841a46912390e1df1b20c20fa6b17f_Device=CPU_Config=(),0.0020382 -conformance_Cos/ReadIRTest.ImportExport/Op=Cos.1_Type=f32_Shape=static_IR=220eed11dd447f3daa78dae29066e78d8dcd57f55ae05e4e6d8e7e4ace361206_Device=CPU_Config=(),0.0020382 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_276_Device=CPU_Config=(),0.0020382 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=134e05436470da9789928f69ff10f1b79e579e0a51908590ed827cc0c68c717b_Device=CPU_Config=(),0.00203457 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=a8e4944e942b6e72d5aca3dff6c48341017c497953cfe1e076267bfde01e5617_Device=CPU_Config=(),0.00202756 -conformance_Select/ReadIRTest.ImportExport/Op=Select.1_Type=f32_Shape=dynamic_IR=356c2dba567ca294280bd00e21d233c7665f498430d0aee13237d80f687e9f25_Device=CPU_Config=(),0.00201829 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=34f915c4d9ad464fcaf3d1c7f796907a5fae626812a1f16b817fea9f8799eeea_Device=CPU_Config=(),0.00201829 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=b7fab07303568f60e61fded86f3e62a80ffe344c106fbe53b1a05149ed38e75a_Device=CPU_Config=(),0.00201829 -conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=f2163daa6d032cbf90453d98931ce5161ca11d893faefe94e8c36ca8e8b470d8_Device=CPU_Config=(),0.00199584 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=f2163daa6d032cbf90453d98931ce5161ca11d893faefe94e8c36ca8e8b470d8_Device=CPU_Config=(),0.00199584 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=66aeab923c3f0a040d2f6ec872f2751ffc9110624bd0cfb41cb1caeaa8d1c45c_Device=CPU_Config=(),0.00196649 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=Softmax-8_447_Device=CPU_Config=(),0.00196368 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=a7032427adec32a9a4246c6669029be566632be7fe11623777107e2d248769f0_Device=CPU_Config=(),0.00195404 -conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_Shape=static_IR=045faf67aa648ff30404a7cad86dbe8460848ce7e95376da7ef546d1671081dd_Device=CPU_Config=(),0.00194466 -conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i64_Shape=dynamic_IR=df149b95dfb03c2e4299b6d5eb2b069d3a87781cf75fb2c9e86c9eecc3ec53e6_Device=CPU_Config=(),0.00193951 -conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=f357e6bfb7a9d33be8f8d5a19a380acae1e10fed80a818335793633a5c6d7e07_Device=CPU_Config=(),0.0019359 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=5fea1d99cdd3b53ea9f19ea7eeb47d7f16333ce4057c4ccdccea297e512cea01_Device=CPU_Config=(),0.00193442 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_661_Device=CPU_Config=(),0.00193224 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=MatMul-1_852_Device=CPU_Config=(),0.00187206 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=641c0e19add62dea6db1975963ec68dca6a309043eb4651650e6b8920a3babd2_Device=CPU_Config=(),0.00187206 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_50_Device=CPU_Config=(),0.00185189 -conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=3b642bc0b7f41144ee2f97bfccca523bb892b945055c8f48349ceee1687e4145_Device=CPU_Config=(),0.00183344 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_635_Device=CPU_Config=(),0.00181953 -conformance_Erf/ReadIRTest.ImportExport/Op=Erf.1_Type=f32_Shape=static_IR=f2865ee0550a6ff85c769deb985feb0c58976d87fc46ddd8fbaba3635c94d2dc_Device=CPU_Config=(),0.00180203 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=5c9d6dde3c9e9e600ff744ca698cceb3e40d9ddddf4dec674bdece39600cea38_Device=CPU_Config=(),0.00179768 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_Shape=static_IR=d515cb30db019ddd10dfc0eabe8bb48f6564697323f5c6df0a169e56d086dad7_Device=CPU_Config=(),0.00179207 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_253_Device=CPU_Config=(),0.00178775 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a53ec485395da84a687a69fa91d8a622e99ec1701d46e500e303ca47d8c6d8a3_Device=CPU_Config=(),0.00178323 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=cb2522d361c805056518707f0ae07309c6bad78389cf9a36170c9afe2e0bed04_Device=CPU_Config=(),0.00177128 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=afdbbf6d31fa18a3db2795bda00b1aa63eabe1239ab56c3fd2794c5343d5da5e_Device=CPU_Config=(),0.0017421 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=a75cc90501633eb1dde02ec0cf499c1e17209fb81bac98c98815602538e3e850_Device=CPU_Config=(),0.00173194 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=6e77e818e3e26a8d23120ef257d4318a4fa16bb7d4b6c29f20c1b856cf7c78d6_Device=CPU_Config=(),0.00173194 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=3078fdc470a094b5feaa5c2499b9248320f7fd2addb4ed0358dd53215e8fba7b_Device=CPU_Config=(),0.00173194 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=Softmax-8_616_Device=CPU_Config=(),0.00173194 -conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=b0a0127148b9869a80213c43a1d272656332a96b24dbbb9780e1a6d63d6fa553_Device=CPU_Config=(),0.00173194 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=9a84cc33f7c6ae74d44807e271e00cd6761e5286365ac8feae37ac61b5372313_Device=CPU_Config=(),0.00171604 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=10e5bfcf914927f70ebfaaea34b4db42054442cf84461442bc8e562d0b232412_Device=CPU_Config=(),0.00169767 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=64f87102b398d9ef37ca87e6f68b01662b3a73e64830af196aa241ca3bdb071b_Device=CPU_Config=(),0.00167393 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=32a7c4639e898f827c9c946cc80e62d42e9dc2cd70484f96f274c2438aa63445_Device=CPU_Config=(),0.00167313 -conformance_Elu/ReadIRTest.ImportExport/Op=Elu.1_Type=f32_Shape=static_IR=964ee358c09b8802eafae2cbba15bade7b0e2d1b00561505d996edde6eaf2189_Device=CPU_Config=(),0.00165184 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d64fcecd88fb1043823dce2bc3d43bb0dbe7b20cd2c92bf303a7df3577499a07_Device=CPU_Config=(),0.00163771 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=739aabae66d0e83243943f0325f4b4a77f277812fc72e516235c457f9a225d0f_Device=CPU_Config=(),0.00163711 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=93b9ad574d297981f1756a96ef67b5a1bd61474db4ebdd0a83457e114d332602_Device=CPU_Config=(),0.00152861 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_347_Device=CPU_Config=(),0.001525 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=4b71ff0f18300d06c8c51fc34b0d02029a1f89622828e1cee4529d1c98e9db1e_Device=CPU_Config=(),0.00152226 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=09284efccf665ceffb2dcf0f852fc0487fb5d67f0db632b588758f53e8c867e9_Device=CPU_Config=(),0.00150378 -conformance_HardSigmoid/ReadIRTest.ImportExport/Op=HardSigmoid.1_Type=f32_Shape=static_IR=bbfbd3996fba98536af25155f2eef85d0a0487b38373a6ccc72b44794929b2c3_Device=CPU_Config=(),0.00148146 -conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_Shape=static_IR=f47e85c7d3e4249ac4a7b5f4797cbf087f988535ab65e38031a1e32c9e31d3b2_Device=CPU_Config=(),0.00147797 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_786_Device=CPU_Config=(),0.00147797 -conformance_ScatterNDUpdate/ReadIRTest.ImportExport/Op=ScatterNDUpdate.4_Type=f32_Shape=static_IR=5a03de8f6d5cce2605d26c224b95fbf0db576372f23978439ecfbf24bc2a785f_Device=CPU_Config=(),0.00146304 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=8c2b69cbcef565a426e65d50dec6a2b054cc89b9db122e136b6eb5309397f853_Device=CPU_Config=(),0.00145598 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=c4147b30fdbae4c10e488fdc534e1d3c441c0d2d6b22be772d7312e5eeb02af8_Device=CPU_Config=(),0.00145583 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=ae48356df3666dec6a60ee8f49cf62936e4bce68a3854c2f57a7a404a9f9d378_Device=CPU_Config=(),0.00144508 -conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_Shape=static_IR=403fdd72f12115bc7bb4ac9d7802b20ee48a24c5f9864f6aa9f669528c35c2f1_Device=CPU_Config=(),0.00142068 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=3b696e9ae674f5a9e011f47a0041f2aa2c435a87014c78a4b895693dfc5bc500_Device=CPU_Config=(),0.00141267 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=bf632fb70e238b952f8fb95e75a81cd08742773eca647145ce1c93eccd6d3d7e_Device=CPU_Config=(),0.00138186 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=8671d875fff11f60ce7a3897bff8e03f5ed0ed96f4493380530b122073a76384_Device=CPU_Config=(),0.00136261 -conformance_Floor/ReadIRTest.ImportExport/Op=Floor.1_Type=f32_Shape=static_IR=614e0755eb064a22a496f2b4fcf211a6691b6f70c2a870b66eadee2cb084eb7b_Device=CPU_Config=(),0.00136261 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=1cc3aea68e6ed44b77c3fbdd16df191781d3af9846b3c2f0b3ebe6f49a378f5f_Device=CPU_Config=(),0.00135254 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=801496d05617ee288fb7b3544f7708845c162a9bea88113b6d31e13ab1ba8c4c_Device=CPU_Config=(),0.00135199 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=9d0a442f5a469d186d024ae3f86385a1eef9a0807851c2c9db651e9872aa29f8_Device=CPU_Config=(),0.00134779 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=ed07d5352a805e13266b73df0608be7a40eb8fc2b34ea7151248f9a8a0734eae_Device=CPU_Config=(),0.00133205 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=121081232c8cc31b4b548e9d87ab6a8a3bdadc29817f21e62ad3fe30b86bddaa_Device=CPU_Config=(),0.00133168 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=a4a56bdfc2aefd644d2c0731ba1d6ead715f2fbaf7a3a8f2cbdbf7b54c38363e_Device=CPU_Config=(),0.00132733 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=76621e332c0f1ab69f243793c338c59e8ca06ca4c65efac68d70d19b97ed49ff_Device=CPU_Config=(),0.00131472 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=86f11a576f71831388fd8b0c048a8301c64efaa7d425055aff724225ce648fc0_Device=CPU_Config=(),0.00128359 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_671_Device=CPU_Config=(),0.0012797 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=642f70339e67182227ed09635978146168c2ad9af72c09e529aa53af7d431a84_Device=CPU_Config=(),0.00127853 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=i64_Shape=static_IR=1eda6db3e52c5d066c2da1643f862c488e125a64d4188c735d662d12c6ec5725_Device=CPU_Config=(),0.00127395 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=44dc53add3776593c70c89d823a8f3c4a0af2ff09dda542c2523f3409d749117_Device=CPU_Config=(),0.00126188 -conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=dynamic_IR=56afbcf969ecf89775ce352d4a892959ad64c97bb99e445eeaf32e4caa7e0918_Device=CPU_Config=(),0.00124781 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=00bfc760c075c84ac8a9600fc2320bcccde0c56bc784295ddf69a6011c82864f_Device=CPU_Config=(),0.00123639 -conformance_Range/ReadIRTest.ImportExport/Op=Range.4_Type=i32_Shape=static_IR=112647c3c470208a9dd0fc0fed993e322d5532760428d674033f3c26da355992_Device=CPU_Config=(),0.00123391 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=43cd567a0b8aeaf8cb7d80f4231a2784af92dbbec7edbbd5e5fdab2e69ca36bb_Device=CPU_Config=(),0.00121789 -conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_Shape=static_IR=Proposal-4_941_Device=CPU_Config=(),0.00121031 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=541ce61ee51c3aec940984d4308b740a812c3948cf64ea2f0976933627cee9d9_Device=CPU_Config=(),0.00120833 -conformance_GroupConvolutionBackpropData/ReadIRTest.ImportExport/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=ae148b767c0755950ff8624cd181229a89622fbb48bcf157596b35f92ca8e517_Device=CPU_Config=(),0.00120164 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=cb86c4cb1a3c95ed76e841de54dd50432e0a759ad3f2ef20acd6f71f682b781e_Device=CPU_Config=(),0.0011807 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=c48021eb54c56dc966a179cca81966eccbd4acf01d1910fa3a9aaa9e479d07f0_Device=CPU_Config=(),0.00117641 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=a7dd6c61f34e039c376899b0c94c4d06c4c14172112ae0e25976876fef781825_Device=CPU_Config=(),0.00115753 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=Reshape-1_990_Device=CPU_Config=(),0.00115753 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=0309d7155bfc64e1eab204d76dbd6340d3146276ec57db16e59957fbf757eb82_Device=CPU_Config=(),0.00115753 -conformance_ReduceProd/ReadIRTest.ImportExport/Op=ReduceProd.1_Type=i32_Shape=static_IR=b2710dc13ec76c15875b4fcc0231b99d5c59dfa73e7957b726a769b17250f959_Device=CPU_Config=(),0.00115753 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=ac860e8f9af8a45c088a6fbaf2aa1e1acfae94f7691543469d6a9cbb85b3e2af_Device=CPU_Config=(),0.00112143 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_688_Device=CPU_Config=(),0.00111959 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=bdc91572009f19aa348cb4b2d3e0ab6262061cbc94a4be9c794c9c840ba028b0_Device=CPU_Config=(),0.00109399 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=286917561e64e7099017c0977dec3771de03b78b87fcf7370ceb95296f513d35_Device=CPU_Config=(),0.00108758 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=99f16766bd5311b2da02f2547838b0e19dc7a315866f9fa00ee7abda0a7f2f21_Device=CPU_Config=(),0.00107534 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_97_Device=CPU_Config=(),0.0010558 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=668daeca5ceeb15d60fc10bea4f6ccb2e183e52bf6f485efa5f0a5cb8b4ec22d_Device=CPU_Config=(),0.00105317 -conformance_LogicalNot/ReadIRTest.ImportExport/Op=LogicalNot.1_Type=boolean_Shape=static_IR=cf4e48f8253e7128ce7cad1d197a3509ded9dc504bd7a84747a94b8095fc7a65_Device=CPU_Config=(),0.00101901 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=boolean_Shape=static_IR=e854a5cf31021d197ff3e77c01d04865864dd64a82473502c32e3d9eb6fb8cc3_Device=CPU_Config=(),0.00101901 -conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=static_IR=70a5db6d59043db04d67a285c78c664ca1fe7a0715e0174c7454871dadbc5dca_Device=CPU_Config=(),0.00101901 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_Shape=static_IR=70a5db6d59043db04d67a285c78c664ca1fe7a0715e0174c7454871dadbc5dca_Device=CPU_Config=(),0.00101901 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_860_Device=CPU_Config=(),0.00101901 -conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=static_IR=7105f832c72fd33def244b3670ccee9c7e2031dee5f551a5fb3a28af46e1bfb8_Device=CPU_Config=(),0.00100889 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=df0572a295bb708ac5ef18811fe0b04e7c36cbe0b5fcc37e235c2e3f602d6168_Device=CPU_Config=(),0.00098975 -conformance_ReduceMax/ReadIRTest.ImportExport/Op=ReduceMax.1_Type=f32_Shape=static_IR=ff2f03cd964e107514e96ff8f7290272689d0c5439fecdf0cd40d96c642e98d9_Device=CPU_Config=(),0.000958856 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=2be485f9d7041b425c832a7bac72acecde6b2dad8ca5b4a779ea2cd7e17582e7_Device=CPU_Config=(),0.000917405 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=2be485f9d7041b425c832a7bac72acecde6b2dad8ca5b4a779ea2cd7e17582e7_Device=CPU_Config=(),0.000917405 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=99fd0e62c96fb4c7d42d009574da8af983bdc62525cc6d79c4830d95fb0ff2bb_Device=CPU_Config=(),0.000916719 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=962afe68df7ca93e98315b255faab467fd30cd16dddfc9f603a43843466fdd37_Device=CPU_Config=(),0.000916719 -conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=Einsum-7_432_Device=CPU_Config=(),0.000916719 -conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=01d20201de4cc725f09d2b4995114e12213d56c1337f53c42cd4bcf9ffde0467_Device=CPU_Config=(),0.000916719 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=2e5f0490b6cb1240d0eaba036b4f0d8b4bbda03034f3be3b3c2dba1d10ffaad7_Device=CPU_Config=(),0.000916375 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=246259acfac3eed0ccc776bcbdebff2748e12f2961248fc01c42eda61ec9eb1e_Device=CPU_Config=(),0.000909166 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ad73c638b382ed50dc3c8bb44cf9c10d3a79b404bb667521aa93e856fcb095f8_Device=CPU_Config=(),0.000836392 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6a4d890c129ea292c49f6046944f10ae1f16beeefa76be31ef5c09919ac3fc0c_Device=CPU_Config=(),0.000836392 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f4eca4552a5488fdd594ab673ebf2d1f210b67337f300c48740cb0615fc8b7f0_Device=CPU_Config=(),0.00083459 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_Shape=static_IR=8639142b9cf5b3a191cad6533f04017c8ed33df26bffa1cdc5d53809f2afb0bd_Device=CPU_Config=(),0.000832988 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=dcd65481f62b4db1c8c0c0e965d3bedf255a6355e904fc619e94bb9f04b936e1_Device=CPU_Config=(),0.00082884 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=da75200540256321df4761c0ed0a302ce92314b2194c47c59b30645b8f014b74_Device=CPU_Config=(),0.00082884 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=c437af0c9031dfaec3bc3ce59a4f6a64ddc2595803a713e8ae28431a3e1fcb56_Device=CPU_Config=(),0.00082884 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=ab4e17e8b640cae726d4c0dbbe8813188c417cec8031c8671abbc77fdd7be280_Device=CPU_Config=(),0.00082884 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=285a48370fe7ef4dadb07d2a34b8ebab3648bd66519707f498238c12d32944cc_Device=CPU_Config=(),0.00082884 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=408c0c1b5cfebf617f8d7784d9571bdbf7fdf898c43c8fdf47365148007cabb3_Device=CPU_Config=(),0.000816396 -conformance_Exp/ReadIRTest.ImportExport/Op=Exp.1_Type=f32_Shape=static_IR=cdd1a0d6159d160d51d18f70532cb6000f852703cd3ab7551591eaec32a7ebc5_Device=CPU_Config=(),0.000807557 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=1f1a50ee1a8e8faeea20a6bd028cbcf47e3eee195157e894a3c1ebe0c5fcb31e_Device=CPU_Config=(),0.000790994 -conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=static_IR=8ab43c6fb2385cd2857489bb1a23e8373982b219d21a8b008b8d918667d361cd_Device=CPU_Config=(),0.000789706 -conformance_ReduceMax/ReadIRTest.ImportExport/Op=ReduceMax.1_Type=f32_Shape=static_IR=93691c8886ef3f112b45bef52825b34eea27fa63d0a3f67660a3d35d27a1f9fe_Device=CPU_Config=(),0.000789706 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=4cd45eacad61238eed70a5028193aa42b07f185f4f2854bc4f5bf5d440d528a7_Device=CPU_Config=(),0.000789706 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=f9240fe29c4597973ebe8f9f9afdbfa8402db46e3392a110bee4eb80577c19b0_Device=CPU_Config=(),0.000787561 -conformance_Exp/ReadIRTest.ImportExport/Op=Exp.1_Type=f32_Shape=static_IR=fdca6a3cfdc4ec92dbb61494041b1a50de26de4422602ecf915ba9244a1ee3c2_Device=CPU_Config=(),0.000775346 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_Shape=dynamic_IR=5778bee9c208564def20231652b21f21ce970821efca07c5960000e8e1b203bc_Device=CPU_Config=(),0.000767222 -conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=8891e077700b85423f6f4d90172fa9e4228490c5bdfb0d69c21dec91b1c36c83_Device=CPU_Config=(),0.000766793 -conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_Shape=static_IR=2b1bfaf54f3cf8dc0f37ea22e9f72a434251984e94cd0ec9ed8c11e7f2a3c478_Device=CPU_Config=(),0.00075515 -conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_Shape=static_IR=a713927fb5a7b807cfdeea2e61fef2424083294d7216bfd0734f04c16b9e4061_Device=CPU_Config=(),0.000747913 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i32_Shape=static_IR=f69265c471b368ef753c42d4ef6db2fe226ccb02bf7737a1717367fb7599bbe8_Device=CPU_Config=(),0.00074079 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=b2925f2bb50555f09e1016521b935243e9823528cdeaf9de34816eefac8eee00_Device=CPU_Config=(),0.00074079 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=eee08ab9eb6d2d63076ba113ddb1ff1dc9cb0f15cff063856c6166a58b66cd18_Device=CPU_Config=(),0.00073873 -conformance_If/ReadIRTest.ImportExport/Op=If.8_Type=f32_Shape=static_IR=ca74155424ab492e4023d73c155b7c5d5cdef69750e748b525631e396b88d35c_Device=CPU_Config=(),0.00073873 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=31a8083ea0dbb05e751147fe8b1dc619722f25749cf4ed25d9b2363dfbc94b1c_Device=CPU_Config=(),0.000732437 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=010f04872452e7220273dcad65e15739a49244327d6226f8087bfadc4256ae70_Device=CPU_Config=(),0.000729147 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=0f498cbacb0a71f82e05e89360b804c32294019db0f606d09ac6f2b173f8fca4_Device=CPU_Config=(),0.000727917 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=6f480e27dfb82f1184f796472baf79edb84968d33bdc9564dcaf5a72b9361f45_Device=CPU_Config=(),0.000723454 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=Convolution-1_60_Device=CPU_Config=(),0.000722768 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=0501f73ab57313a4fb00d025091c52258f644d85e4b93c73a9b8e805c7e2e477_Device=CPU_Config=(),0.000715216 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=MatMul-1_768_Device=CPU_Config=(),0.000712298 -conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_Shape=static_IR=13c8ea70958355a75fa25b4307cc222d544ff8dbaffca01b1fb203278c6d7cd5_Device=CPU_Config=(),0.000702171 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=ecae8fe0a14fbb0e17dd711a46e05fa4e6901bc3ba360990c54713775074d6b9_Device=CPU_Config=(),0.000699482 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_403_Device=CPU_Config=(),0.000695592 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=2702db0d741d74e9b7a040b9b861f7367f7c69193545cecd8b1acc9ea46bf13b_Device=CPU_Config=(),0.000683663 -conformance_Concat/ReadIRTest.Inference/Op=Concat.1_Type=f32_Shape=dynamic_IR=9b7a1d1050acbadeb479db70fd51cdb0ccc5ba34aa0d1f7d77e814eca0893a22_Device=CPU_Config=(),0 -conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=830ab428e39124866276e2c0c352123f124a1167a4199c7acb1cc587cd952cf7_Device=CPU_Config=(),0.000675739 -conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=cbffaa2c9b5d9fe202f3f5bfaa43689b38a22adc631604a2c600205907a50655_Device=CPU_Config=(), 0 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=830ab428e39124866276e2c0c352123f124a1167a4199c7acb1cc587cd952cf7_Device=CPU_Config=(),0.000675739 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=962075102f4aee372758414155ba95227bbf15a567d8dc02e3b4dbdf5fe913b3_Device=CPU_Config=(),0.000671362 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=647d1908985889e0e7bce82731a516b6f920f1773c6f22509d1955de50dbeac5_Device=CPU_Config=(),0.00066238 -conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_Shape=static_IR=061ff5946f7a5f9e40ce7689b2c3e116af129f43ceb1ac786a1406b0abf30ebf_Device=CPU_Config=(),0.000660492 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=8214e394631fc0d96a080e8541b828323c69b8d081d449a19acb1eed7f2dddbe_Device=CPU_Config=(),0.000654284 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=8328e8a781bd1156b3fc3d483594e6c27578a4b4de7b941f3cd7aafd8ff05bb6_Device=CPU_Config=(),0.00064842 -conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_Shape=static_IR=d5862889b4e4be84be17bace10560cd83176d11402f0a7ac1fd9b4451b557ba6_Device=CPU_Config=(),0.00064656 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=6b8cdc6a29ffdeac883e9f5a04fa196808935ccbff06a4ce4a1c5826c5783584_Device=CPU_Config=(),0.000645588 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=5c2ae0b57c4fedf886e428e33677bdd54a65b7a67b215e6b8f59bcae0db0a881_Device=CPU_Config=(),0.000642698 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=00458bb2b45f7c3058550c78c0dc4c2f19525a8cf2771b62602a37b8f0a1e156_Device=CPU_Config=(),0.000639981 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=1f6080ebf34bf005a256a1841fc8063677103ad8745179ff9b8f10d61ce651b6_Device=CPU_Config=(),0.00063918 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=eab33d79c162b6248c9cffdff23a0c48955f9ec1a0ca94408f5e5347d85dce0e_Device=CPU_Config=(),0.00063918 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=50520c3fd16e490767a758125e793a11c4adab8fc75a60152d5c346b8679ecb1_Device=CPU_Config=(),0.000634488 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=64788cc4a297a9786df29f718709fafefdb95b2d40eb57c25543a407d2c87b76_Device=CPU_Config=(),0.000628138 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i32_Shape=static_IR=e96df787e0c2200272830e58140ef50288f42f732959d107cdbbf0802b4b6337_Device=CPU_Config=(),0.000626564 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=22ae8d3d0f4ba99130074a080593d4bfce691ea1fecc6069063b40aca63cf7b1_Device=CPU_Config=(),0.000624934 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=84e7f1f68f735b20bd5ccb15dacda98a8f96410e4fd1d43ead2a1776474894c6_Device=CPU_Config=(),0.000621959 -conformance_Relu/ReadIRTest.ImportExport/Op=Relu.1_Type=f32_Shape=static_IR=c23dbfffae465b4ffaf41bbfb4e306e6f3a1c8809e8ccc477d3c5c48020cacae_Device=CPU_Config=(),0.000619127 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=551b9e0519966cf813111a910a021565dbd4b630822dba6bfacc6cde4c7f59d1_Device=CPU_Config=(),0.000615494 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_Shape=static_IR=9b63204476c966a4968c13381d07c1f3c456912aa2cfc3e4a87e50fa84b677e5_Device=CPU_Config=(),0.0006088 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=da1d2303796268309dffa2b8a8c01a35b93615057fb8b4da4e1bca1c14366f13_Device=CPU_Config=(),0.000592237 -conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=14f7b821b76cfde7d2a57c11e3893796801f3e66937398b7c4bd83765e6b6aed_Device=CPU_Config=(),0.000592237 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=a7da11a30afaccbcb8126e1cebe65f8734aad1af688c59a10f6f366c7a2dd897_Device=CPU_Config=(),0.000587059 -conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=dc0b460b468ef0fffeb9b87c3558d18cd4877ac3b70e86913fb8564a0f31ffe5_Device=CPU_Config=(),0.000578677 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=8088c1abf5b3c4d881a12a448637a987d97112bad5019777605008d6ee9ca681_Device=CPU_Config=(),0.000578677 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=0ea53d2b34565ced48ee8ebe364b1447642cd0ccc7d3374c5ecb5035e55f3598_Device=CPU_Config=(),0.00057719 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_759_Device=CPU_Config=(),0.000549413 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=1d4839f1256644ae80cd916aaa147a57771fb96ec7c69bde5ea24dc96a8942af_Device=CPU_Config=(),0.000544607 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=MatMul-1_897_Device=CPU_Config=(),0.000544607 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=bf57b4e9289c0ffde6e49845d76a449bb5b607c68efb33ab6cd16835cd71087b_Device=CPU_Config=(),0.000537399 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_1046_Device=CPU_Config=(),0.000535911 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=c0902531ada30f4b5374061c2fd290d58039d7dbdfe1b1249f8fdcc50ef99b6b_Device=CPU_Config=(),0.0005265 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=70b94cd2a10562313f9d639d00a54f3840a6ad1c0a22f5652212d3575a1d6521_Device=CPU_Config=(),0.000525813 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=4cbc7c94cc9f5d5dcb24d9bc6cc649efba704ba21d9520f69c7243100ee750ac_Device=CPU_Config=(),0.000525813 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2329af04f96c783d2d315c115f2613bc4e2c2a2a3d554a1f9ffb5794bc8e4fe2_Device=CPU_Config=(),0.000522981 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=1d48f93dcc3cfffbc47832be5c7274da52f64770d341f795db3ff2de82718dc2_Device=CPU_Config=(),0.000515086 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=677b7a13707e6bc973cdafd14bbbedc582e4b9697d9b3a35233d505e2cce5e0c_Device=CPU_Config=(),0.000506761 -conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=b25cb8d3723c79b8267499bbba42dc17fe39d542940fe5ead1583c8cc4c47800_Device=CPU_Config=(),0.000495176 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=40ca9ed9b04e22a430cbfc79d1e30148450ab2f3a02214f6e2dae3dfc4a9efb7_Device=CPU_Config=(),0.000488081 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=72163ca8c8cac0c54fa744eb28c7bf024b510e1ae5eab238cd176c6b1a80d8e5_Device=CPU_Config=(),0.000483361 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_504_Device=CPU_Config=(),0.000478956 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=7ee4eacfda52ab676947d0168408d9ad03bd161b387e828074f20a329fa10d59_Device=CPU_Config=(),0.000477411 -conformance_GroupNormalization/ReadIRTest.ImportExport/Op=GroupNormalization.12_Type=f32_Shape=static_IR=8a965eb3a8e3a6f552b40228d13e82e580646c6c9d8ea2657e6329a1ac46c59b_Device=CPU_Config=(),0.000473263 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=621317783c379d8d86a795d11b2c0425a5be5f1a624a7a3155df38e1a0b8c910_Device=CPU_Config=(),0.00047046 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_719_Device=CPU_Config=(),0.00047046 -conformance_ShuffleChannels/ReadIRTest.ImportExport/Op=ShuffleChannels.1_Type=f32_Shape=static_IR=d78c6268923c55edff78ca4364650b2bdd0f15604082b8315dab7cef4e84823f_Device=CPU_Config=(),0.00046943 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_749_Device=CPU_Config=(),0.000461592 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f64cc20fd2689c60b15d687f3c98ce4a284baf86788110e220486143720e7e8e_Device=CPU_Config=(),0.000460505 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a0939f5059458ce87550050263316a7648ee216ec71af09e67ad2efeecfa703c_Device=CPU_Config=(),0.000460505 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=12a024b3d2381e824ab65a317ccb0663206f09c1437b200b0b66b5d2da27a4fa_Device=CPU_Config=(),0.000460505 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_454_Device=CPU_Config=(),0.000460505 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=4fcb7c7f3a7fbfec41cb76d79275a66a20457772ef18b59e01def5cc213ce385_Device=CPU_Config=(),0.000458273 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=MatMul-1_890_Device=CPU_Config=(),0.000454412 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=47a56832a86e27182f8ec8b5b48ca6bb403533a8ed06fe472b5f4a9b9078377a_Device=CPU_Config=(),0.000452466 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9655ec9d7324c92aa5de8d98c5d69fd11ba1579220d7d2874d93ab55eca9381a_Device=CPU_Config=(),0.000444685 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=ed3c88d0209c6e97748fffe50da9971de78bad79cf5242b56d63f7b696534e59_Device=CPU_Config=(),0.000443513 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=0a3c2e701575d2475ab18c7debc2f2f0bfae153d00aab98113760acb43399842_Device=CPU_Config=(),0.000436247 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=560d2c1c973533bf122b820b6915dc7939c6886bda0656cf39d31eb4f3d50ab5_Device=CPU_Config=(),0.000435646 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=92fe0846ddac9179f9b9d91165c2f031619e6d57742b5daeac23f0e15ec16dab_Device=CPU_Config=(),0.000433272 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f3c8b0137ff20b748dcb5a258599046e22e81f9d75b50d98b965db993efaaf45_Device=CPU_Config=(),0.000422229 -conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=8dc7b330f0d148b3433e309002080bda3ba23f8f9eea73a42e4c5f6c0080341a_Device=CPU_Config=(),0.000421657 -conformance_Einsum/ReadIRTest.ImportExport/Op=Einsum.7_Type=f32_Shape=static_IR=dd86f424d9e476be060057c64d9eeb4b4c99e73a82eb785b6e284939ad8dfa72_Device=CPU_Config=(),0.000421371 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_317_Device=CPU_Config=(),0.00042017 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=91d53b3a00a0e6988bbd3f9aa8aef53e8d922b2802570b142bd9c157fe49529a_Device=CPU_Config=(),0.000415879 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=56561bd3bbea1fe67a6a6ac11a36708539344dcb79ee605c803a7830f0365685_Device=CPU_Config=(),0.000414935 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ec95298d085a695e52022ab8345eb5647f088059c20edad38dcfd7bb8b854562_Device=CPU_Config=(),0.000413133 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=6a10a5e0075c066d4526d1dc4d8f8d39b5d73a9c4125dc36bd349afa7d3172a4_Device=CPU_Config=(),0.000412217 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_762_Device=CPU_Config=(),0.000408327 -conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=c771bce3afe248393d58eabb4a523588e44b53177e53b7002b25ca84d8094b2a_Device=CPU_Config=(),0.000406725 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=c771bce3afe248393d58eabb4a523588e44b53177e53b7002b25ca84d8094b2a_Device=CPU_Config=(),0.000406725 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=e4e1b2348695d533974a4b152c0259188508d71407682171cf8c407496ab3490_Device=CPU_Config=(),0.000405724 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=89f0ef44b3130bf3309d8761d095b2cc3ca909532cab7fd7547f0915d5c2e404_Device=CPU_Config=(),0.000403693 -conformance_Log/ReadIRTest.ImportExport/Op=Log.1_Type=f32_Shape=static_IR=ca164c0e2c8f96341bc57628210d5d1cbab6c5adb6adbed7693cdccfbe59c9c1_Device=CPU_Config=(),0.000403693 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f8c9b4286fd521e95ceac2df65b1669fb6fa508b583bd5bcb1b677fa45e11271_Device=CPU_Config=(),0.000403693 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=cfc66fb0ad297eddea5f5e24d243d4b4fde28919b81adcd1758e54336f3e629e_Device=CPU_Config=(),0.000400002 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=8db18ffd723cea90bfedf7d68f08b2c71033b2fb6c53302c77ca41169cddb168_Device=CPU_Config=(),0.000398515 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=56609ec82c3af7c86a1de83a63142bfaf0e18b117953f1504f5e5d1ae1aa7236_Device=CPU_Config=(),0.000395912 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=d1739f6c802b95c2864cf3cf181d7f13cab3fa06886508fa04f2a8ed1ed498ab_Device=CPU_Config=(),0.000393795 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=52f27fe4542c1356109ef0becccf728a71703c3eea628b7ab3577cddff680f0f_Device=CPU_Config=(),0.000386014 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=dynamic_IR=3c29b0a89a412615c1d830c9438ead870602337d0594d8d7568f4948bdc9ec88_Device=CPU_Config=(),0.000383525 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=MaxPool-8_945_Device=CPU_Config=(),0.000383525 -conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_Shape=dynamic_IR=aec39964f15dd1a3114e6b362335950e33e45dc1198759d754610967c50508ec_Device=CPU_Config=(),0.000383525 -conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=dynamic_IR=48caaa749863b7da1913c346efef34f58352c84264aeb35e8f1898a557db3937_Device=CPU_Config=(),0.000383525 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=dynamic_IR=4186c44cc2765159ad9672c627c3ff958bd921c09245c9db6ea6ca1dd228874f_Device=CPU_Config=(),0.000383525 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_340_Device=CPU_Config=(),0.000379892 -conformance_Tanh/ReadIRTest.ImportExport/Op=Tanh.1_Type=f32_Shape=static_IR=5562799ab3f8d6a09393d5c3d68490369ae7bad1b2a49dbf146bc308d65f90f1_Device=CPU_Config=(),0.000369565 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=88dc07aaac63f6117484febae9e5dd33cf08edeb8d76eaf2988bbcea21c13a5e_Device=CPU_Config=(),0.000369251 -conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=4715e012355b7124610625b2ea4b21cbf394620e2c4385f9e597c2a333b97613_Device=CPU_Config=(),0.000362814 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=a72df254a1280e38288bea31975868d97cefb7f83c316ca5c41c4ca1cb0514bc_Device=CPU_Config=(),0.000359667 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=1a1dfb32ab587686a22cb652d5cd5b18659ec7105f1f355844dea8cfeb82fdaa_Device=CPU_Config=(),0.000357121 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d368661d3c3950c5d56a371fe0eb30511b3d3230dc290a8db492ad6e62e2c7c3_Device=CPU_Config=(),0.000351 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=ce4703247246707b9d75641a013cdb2daeee225c4ab62fcae0dc69fd496a5c21_Device=CPU_Config=(),0.000349512 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=3eaf72e4d66b8bb05d491cfa30f3fbe8e6fcc0a98ff3dfb09b552920e8e5f902_Device=CPU_Config=(),0.000347138 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=8c14eb91c50415fa8537c2e3b537a25748d464a037612705bf3d73f77e4ad155_Device=CPU_Config=(),0.00034688 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=cdcd76daca683207b94b713c1bbfa838f668a51f4ecaf5282f28229c0b7ac58c_Device=CPU_Config=(),0.000340187 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=b8f76cdf8664f45fd096d38023d3b4266aaa31c625999ec9542d3935335443e4_Device=CPU_Config=(),0.000340187 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=a4624404e0c659c08940baba2b66a40dcd08e33147eb09c508337321c418c968_Device=CPU_Config=(),0.000340187 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=6364c0bf3e5223331912ea8b7924a023779a767c11cbf4f2d6ee314430dc3a6e_Device=CPU_Config=(),0.000337927 -conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=fd373104c5b513c4965fada2486ee2f05263d6cba159c9b07c1e929d207edae2_Device=CPU_Config=(),0.000337784 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=fd373104c5b513c4965fada2486ee2f05263d6cba159c9b07c1e929d207edae2_Device=CPU_Config=(),0.000337784 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=a06622f6a28fc09117a3d1ab248564b5d99fa302f9948e81c78c72159351f21b_Device=CPU_Config=(),0.000336239 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=4be020bc729de9d030ecf3b5220016b690cb7506b1149cfd0ed73bb6558a8d83_Device=CPU_Config=(),0.00032574 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=static_IR=972c836e1653f221ec18bd2a2875bb01332cb939b3523e6884ea6c463090d307_Device=CPU_Config=(),0.000324596 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=96a70c2f1eb49e348da2b172e948f973510d3bdfd17c747a578f5aa98e48db64_Device=CPU_Config=(),0.000323967 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=98d105d2cfe1892e817fa8918ad3a0187488eb0bfecd9d4e3c99f6eea349adba_Device=CPU_Config=(),0.000323967 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=boolean_Shape=static_IR=a0388351376c544acd9d4b28135430ff0be36e74ff5cbddcfa484c5a5a20c903_Device=CPU_Config=(),0.000319504 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=boolean_Shape=static_IR=13abc55c839ad896259a0766d0b394ed6921cd5d7b53bd9a4d7d67fc8c221af0_Device=CPU_Config=(),0.000319504 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=2b2e6496b2a4dc6619574cf29d0238090d7444f2a3d42147710d133a4a47183f_Device=CPU_Config=(),0.000319504 -conformance_Convert/ReadIRTest.Inference/Op=Convert.1_Type=boolean_Shape=static_IR=4b165e4f5a31fc0b54e4e915f6abdd02eac89d24637e72e961c506eac340be97_Device=CPU_Config=(),0.000319504 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=1083b7c18bf9592652006600446a8f6c94d7c29f6b2cb553f8c606659e79bde5_Device=CPU_Config=(),0.000319504 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=boolean_Shape=static_IR=4b165e4f5a31fc0b54e4e915f6abdd02eac89d24637e72e961c506eac340be97_Device=CPU_Config=(),0.000319504 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=1dc819c29d5bc4fab0a4d5476af7cc520ff9d45fce2ed8301cfd560ad184cd5e_Device=CPU_Config=(),0.000319504 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=1d39365843e3a7e456949cfafc377f2ce1f616bf3ccc12e8f36ee56c9b8e9847_Device=CPU_Config=(),0.000319504 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=boolean_Shape=static_IR=4e9e1af1e95fd42b13a376a18989a3dbfa08d06a2aa20f7f1827e5d14cf38f23_Device=CPU_Config=(),0.000319504 -conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_Shape=static_IR=fb2e627ff021b61561195fe303874f81198ceff114d020bc056d9cc6467df7bd_Device=CPU_Config=(),0.000319075 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=b63cc54c1780d2d7bcd1875984f0ce103d56907789712aab662b1d4fb906a3a6_Device=CPU_Config=(),0.000315871 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_951_Device=CPU_Config=(),0.000314098 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_760_Device=CPU_Config=(),0.000314098 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=7eebdc50ae1f58bd7113358784e1c937bfb6951f43050153012de385d9ef8ae0_Device=CPU_Config=(),0.000313039 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=565b7f5e6c5ac64a57fdc6e254f7ebf5d88973818c9d03be29fcfe9fba36abcd_Device=CPU_Config=(),0.000312896 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=dynamic_IR=b9a8f78fb3ecc1624fea0392e4d98d87822ca8436f996b263783cbc7049e3e59_Device=CPU_Config=(),0.000304314 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=dynamic_IR=e3f2f931b0a4f62359e785f6677164eef6091e939d84fd7d488fcfa188528fb4_Device=CPU_Config=(),0.000304314 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i64_Shape=static_IR=b8d8474b1297fec242565afa9967e0842f9d47892f725da2bc455cb3727b49ec_Device=CPU_Config=(),0.000304314 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=dynamic_IR=9b7a1d1050acbadeb479db70fd51cdb0ccc5ba34aa0d1f7d77e814eca0893a22_Device=CPU_Config=(),0.000304314 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=cdefaa0b4af627cd31d029ee6896781fb0d6a44a034924fe8dcde0d53d3efc9e_Device=CPU_Config=(),0.000302083 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=76c9b3d6cdf875ef90e14f24184aa387215cadf3bc82d3fd800f2a6d2675d251_Device=CPU_Config=(),0.000302083 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=a367e2320bb7edac76c3428a478a7b3b3b07f4facb7eaeefd3083e9ca229a8a2_Device=CPU_Config=(),0.000302083 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=c8418694929dce79ca0283f9dee6398a83117dad3a0acf9211d85c939d8536a5_Device=CPU_Config=(),0.000302083 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=85abdf2f5fbdf1ac7af2487a5e93e4856a1656bfa217e00a342fe34d31ad40ca_Device=CPU_Config=(),0.000300595 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=0937667ef842992f89bd87954bd5a95ca1fd223d9c9c4bbb18e0d9133a148e91_Device=CPU_Config=(),0.000298908 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=0937667ef842992f89bd87954bd5a95ca1fd223d9c9c4bbb18e0d9133a148e91_Device=CPU_Config=(),0.000298908 -conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_Shape=static_IR=75a3f0c6c0208d78f1c5bcb8828b148e4e801880d2ec2021dfeaa02d08176fae_Device=CPU_Config=(),0.000297105 -conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=static_IR=ab315d896d8ec3193a1db909975a3d8adf1c0bdf8c7900f540323544b897d3ed_Device=CPU_Config=(),0.000297105 -conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_Shape=static_IR=d43b433b5bf8f5d8c23343277b4c42ef4ac604cd7671cc0ae097b52a896e9439_Device=CPU_Config=(),0.000295732 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=f544f4fb4101da0b04e07b04281b77dc25bf1b98a7de9dd34983f13ede85e4a8_Device=CPU_Config=(),0.000294245 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_662_Device=CPU_Config=(),0.000289096 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=GroupConvolution-1_458_Device=CPU_Config=(),0.00028778 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=fc363e7829f71b44f99c2686af079bc3e28173b0f2aaa14237daf5de1fa5944e_Device=CPU_Config=(),0.00028715 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_501_Device=CPU_Config=(),0.000282945 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=1cd3726a34717b86df72b6553b724af8b526677a5adfd3d4ac3ec4dbd83d4e71_Device=CPU_Config=(),0.000277224 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=f8d2c8614467d60c83295e3cc81ac84ad751ba066c2251af7a4bedd08fa1fb0d_Device=CPU_Config=(),0.000266382 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=7d1e3b5d9cd07ee2aa5968add906e8f3ce187e7ac5ec3cbd8e3e6eb53069b5be_Device=CPU_Config=(),0.000266153 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=static_IR=4e336499274d6d73904a51ae124460aa4ac4b76a80aaed1fbe2c224c3c83fada_Device=CPU_Config=(),0.000261405 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_700_Device=CPU_Config=(),0.000258315 -conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=Interpolate-11_999_Device=CPU_Config=(),0.000255626 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=Interpolate-11_999_Device=CPU_Config=(),0.000255626 -conformance_HSigmoid/ReadIRTest.ImportExport/Op=HSigmoid.5_Type=f32_Shape=static_IR=5b60ab2deaed89dd893d7bf679de386221ccf31caa48d222a42ece14da21f4fc_Device=CPU_Config=(),0.000255226 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=22f9497b5b657c45977748d1ef69f29edb86e53c7018bd8fa3eed29908737aaf_Device=CPU_Config=(),0.000255226 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_530_Device=CPU_Config=(),0.000253824 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_419_Device=CPU_Config=(),0.000249304 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=a46c2aceffd7408b204aef16b3f0d48cf20acb96f1ffd77ac0b99927b2d092b4_Device=CPU_Config=(),0.000249304 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=i32_Shape=static_IR=faa706deb6c5c900e174d8d6d20ad66730ec5c4a43f545006988166df438609a_Device=CPU_Config=(),0.000247731 -conformance_Pad/ReadIRTest.Inference/Op=Pad.12_Type=f32_Shape=static_IR=702023aaf5c1730c643432167b54c07077107363935c2dd88dd08b73599f840b_Device=CPU_Config=(),0.000247502 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=702023aaf5c1730c643432167b54c07077107363935c2dd88dd08b73599f840b_Device=CPU_Config=(),0.000247502 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d3f30426638d16816674cb04b2a4ee1b0863092ab8ca8a6abb0c6b91ddb52bb2_Device=CPU_Config=(),0.00024733 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=7dc07057973536b87885655e2b2eab3e79f4988c59f344acaf8101601d8245d4_Device=CPU_Config=(),0.000245614 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=static_IR=50dce9c881bbb8d134f10c4da03a5d23cceee7b9e43484a0c0fac415a2033b77_Device=CPU_Config=(),0.000242696 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_249_Device=CPU_Config=(),0.00023174 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_391_Device=CPU_Config=(),0.000231397 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_836_Device=CPU_Config=(),0.000229566 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=846d4ef316a40a73209fa88972fc433460f3e17a4233d2e4466dce9599cf08bb_Device=CPU_Config=(),0.000229137 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=846d4ef316a40a73209fa88972fc433460f3e17a4233d2e4466dce9599cf08bb_Device=CPU_Config=(),0.000229137 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=b9842b102a4983c1e2d355a1616f7176dfb4444c5ac933958fc820413287cf88_Device=CPU_Config=(),0.000228965 -conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=static_IR=e2bca0617c1689a669d557fe2fb1f73ec18800c8de37eed99a19bcc3876d5f29_Device=CPU_Config=(),0.000228965 -conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=static_IR=86f8d8715306c949c6ddd8fc809c57c038918b1899bb7a10ffa6b08e69a46a41_Device=CPU_Config=(),0.000228965 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=8406b5a3375089a0acb2738f25d275925223880c3ed6928ef4e0a90292d52f59_Device=CPU_Config=(),0.000228965 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i32_Shape=static_IR=70f236c00b8808d74b6678e1a0c422422b95032e5d303db351672ad447525120_Device=CPU_Config=(),0.000228965 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=59179c6f5b845d52cae7521a3c78ed818e2886e65d97b0c4afe106fab2702c0c_Device=CPU_Config=(),0.000228965 -conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=dynamic_IR=419fd77682b743e20ab28d52825c449c60ac0824f22d3d2ee5d0f4ad7c642631_Device=CPU_Config=(),0.000226991 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=36100d3e45f626a87a878b0d17d9ef5a99199cbc9a7d7fe66f705d3059d52f0b_Device=CPU_Config=(),0.000226991 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=72530c1d00803ed543897ef701fc23226bf46517e4f70dee7c545401b19bcc9c_Device=CPU_Config=(),0.000221871 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=i32_Shape=static_IR=480d223ae41adc7e7f8f8b2484967b2a348a8fd2b7a18906b97cc833575bf07a_Device=CPU_Config=(),0.000212946 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=09ad8bc515988b284b5014a173777ba0add1775beb3ff379736aa3eaf3046241_Device=CPU_Config=(),0.000212946 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_705_Device=CPU_Config=(),0.000211201 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=static_IR=adf95a634bea6bed7cf0849d5478b0bc8c2f7349095e0060802fc68cb61f879e_Device=CPU_Config=(),0.000208769 -conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_Shape=static_IR=3a543267b75bd430bda785c118815d05666a5f9442eeba1a5fb17dce89aa8df3_Device=CPU_Config=(),0.000207682 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_904_Device=CPU_Config=(),0.000207339 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9b632a610884f12865e8ee66cda807eeb8980c068a993e2c29334fe71f8dd077_Device=CPU_Config=(),0.000206738 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=6e304064dcb3070f767e95e3597b3b9ae8995ea58cfaf938cf05651b642822fb_Device=CPU_Config=(),0.000206194 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_Shape=dynamic_IR=4cbdaa51c99064416c87014a410a5666ca1a1c450843f2bb5bfe345fbfe92055_Device=CPU_Config=(),0.000202819 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=35b63bcb1e04e51af2de99763ee5f1f860f0814af8c772c0df79fab9b98ea5ff_Device=CPU_Config=(),0.000198042 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=b0cff01ec5f73ad9b40b2007aa686a9975af0969ecebf0b06a9f305c12e9ddf9_Device=CPU_Config=(),0.000197298 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=f28cc3ca7c4553ca9ceeeee53a2223fd12fb266857fa8815bb208185e6f08124_Device=CPU_Config=(),0.000197298 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=84b63ab1d5305ac6aee357e766507a9ddb1c47ebc7614c221adfa29456f7c040_Device=CPU_Config=(),0.000196125 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4b684b1009e0b14d8fdd57ad69edfa15df355da015011d5dd0d2e9b8f8f3650e_Device=CPU_Config=(),0.00019581 -conformance_If/ReadIRTest.ImportExport/Op=If.8_Type=f32_Shape=static_IR=If-8_925_Device=CPU_Config=(),0.000194123 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1f791d2e5218c42fbea8d9f4cf588e00570f1fab8a95d594576f6c921ca26831_Device=CPU_Config=(),0.000192921 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_90_Device=CPU_Config=(),0.00019232 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i32_Shape=static_IR=723ff8a2e43baf5c59a29c1633d49898edaad3f33c4979ed4dba2cfa35438066_Device=CPU_Config=(),0.000191119 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=dynamic_IR=d20222ed247c6872adab05df0d282b527b905f7b705e4881019517dba18565e4_Device=CPU_Config=(),0.000188659 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=d57d585418a6958afc58679a3cee0a0fc8eb4377f6696b9cb293767cee8c0dcf_Device=CPU_Config=(),0.000187343 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=5a014f92c4553e6b7501f33b6794ff5adc19d16bee82e544e31a2947ca1ee274_Device=CPU_Config=(),0.00018351 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_922_Device=CPU_Config=(),0.000183481 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_767_Device=CPU_Config=(),0.000183481 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_755_Device=CPU_Config=(),0.000183481 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_704_Device=CPU_Config=(),0.000183481 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_922_Device=CPU_Config=(),0.000183481 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_767_Device=CPU_Config=(),0.000183481 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_755_Device=CPU_Config=(),0.000183481 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_704_Device=CPU_Config=(),0.000183481 -conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=4a26e507a9635e0c082219f6f071fbb49fb8974106c6716e850c0701b1883064_Device=CPU_Config=(),0.000183481 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=530687639a78bb0ba4e0d745aaf192d2eafcf4eac7f4d0802c581ca391cff83e_Device=CPU_Config=(),0.000183338 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_542_Device=CPU_Config=(),0.000183023 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=0218c0d5eef6211729b6aa8a20f0b2b74078df276a4a041c9696b3c45744379e_Device=CPU_Config=(),0.000181936 -conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=e94a9110eb4b47e0c518911d052b17e79bbd1f54dec565a951a6c262df626e83_Device=CPU_Config=(),0.000181936 -conformance_Reshape/ReadIRTest.Inference/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6d699e8b6d4ab6ca840918713cbe280ef6b986e12876e3500966f96d4ac74c62_Device=CPU_Config=(),0.000181936 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=e94a9110eb4b47e0c518911d052b17e79bbd1f54dec565a951a6c262df626e83_Device=CPU_Config=(),0.000181936 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=6d699e8b6d4ab6ca840918713cbe280ef6b986e12876e3500966f96d4ac74c62_Device=CPU_Config=(),0.000181936 -conformance_LogSoftmax/ReadIRTest.ImportExport/Op=LogSoftmax.5_Type=f32_Shape=dynamic_IR=e52360303d9834c1bfb89eaf5352d1e57bb4121d511dc2250dab94fdcebd5c0b_Device=CPU_Config=(),0.000181936 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=f8edd0961f6aec0341b2e0e6c3a9767aeaa5c5f1095df765323980c48b8396d9_Device=CPU_Config=(),0.000181936 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=0579c28b042af9f42fab14ceecf839a561e21d1010322d840fab653af686e052_Device=CPU_Config=(),0.000181936 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=cd365f99fda3004be7f2721dce07b7fc3782981258fc5139876c46d00a3c186b_Device=CPU_Config=(),0.000176358 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=a2af3aeb61b317bb3f050aefb7e8859206fd127310754e4d78f860e2c8347264_Device=CPU_Config=(),0.000175157 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=2c8fb040af57cf305364104a07b34dfe5fcfbd74fdf3b507a110922abf92c708_Device=CPU_Config=(),0.000174213 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=3d8b637c6494c98dbe8ff059d55e288a7b196712c23d54923d796b6e53ddb228_Device=CPU_Config=(),0.000173583 -conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=Clamp-1_385_Device=CPU_Config=(),0.000172553 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=1bf303d4cec837d3fd1dadb4d7d668a3363916c9b5524a4dc42f644afc7a5fdb_Device=CPU_Config=(),0.00017118 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=a7a0dfc3dfff3af78129e5fb23f4e0783a679d532864a7173d47ac05c9f6d03a_Device=CPU_Config=(),0.000170208 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b2a4034be357dc1e864604e0a1a1339f0ea8ac1f97c63c7cf6e2ed6ca74e6e45_Device=CPU_Config=(),0.00016975 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=f32_Shape=dynamic_IR=5265b728f97a97c1aea4c96b31e962ed24f57b78c24a6683076282b76fc22878_Device=CPU_Config=(),0.000168806 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=91c6158284310c5355406d04d46c09179f8c237ce6060fe3809a61a9e05dd865_Device=CPU_Config=(),0.000168806 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=38760fbb498f53bc58cee8ab38e787cac30d68b14a6d2bb3ce7d84f0b826f9b9_Device=CPU_Config=(),0.000168806 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=4e5e03c7aeeaa25146a7808601d5872e6491cc9fe02e66182b3ea24187ed392d_Device=CPU_Config=(),0.000168806 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=dynamic_IR=60738b9dff196a757e34d3ca26b51a6980c57b4fe9fc30a9666e5a7f5c33fe37_Device=CPU_Config=(),0.000168806 -conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=a419e9bbeeebaaeca469be69343018f44bd6a494843d0781effc622d8c3b9fc4_Device=CPU_Config=(),0.000168806 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=a419e9bbeeebaaeca469be69343018f44bd6a494843d0781effc622d8c3b9fc4_Device=CPU_Config=(),0.000168806 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=7d8befa2fd0a8c0e118a59e732eba3a0179054518efee4dfa64ddec67e6fc14e_Device=CPU_Config=(),0.000168806 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=52e9696e18db2b6894c4cf87d5eae0c50fca131cf140e840c06589e9c0f1e089_Device=CPU_Config=(),0.000168034 -conformance_LogSoftmax/ReadIRTest.ImportExport/Op=LogSoftmax.5_Type=f32_Shape=static_IR=fc33843c4222ba24142cf6c9274be0f35829ac2cbf111cda9ee48e5d7a3804fe_Device=CPU_Config=(),0.000167404 -conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=i32_Shape=static_IR=24c49353906243a7ce15993a9ee31dc174f71ee17dec38bc8fedd019d68f3401_Device=CPU_Config=(),0.000163685 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i32_Shape=static_IR=dcc54304d45d5394649dbbd7a57e689e4cc29d5c22bae3786a61236dbae75f35_Device=CPU_Config=(),0.000163685 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i32_Shape=static_IR=e3089e9cc3fde201e178f67a3e0c498c81b7441e52fe110dc05057e19d7f4e2b_Device=CPU_Config=(),0.000163685 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=9929ac7c06e32cacd2c1dc65614bcbc8304cd9dca25532d364782effd838c6b7_Device=CPU_Config=(),0.000163685 -conformance_Ceiling/ReadIRTest.ImportExport/Op=Ceiling.1_Type=f32_Shape=static_IR=1c16a437f93aaad29c0b0f3d1c18d5ac51713bff37faad1a6c1f183f7b6bb67c_Device=CPU_Config=(),0.000163685 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_551_Device=CPU_Config=(),0.000161654 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=b61812605a2c528cf203dd9de7efba6cc0b8b7358d994356200a70b110dd6d20_Device=CPU_Config=(),0.000158594 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=f434ceb3c2bf2f53bfb1afc5aef5329a0b96212116132482ac7249e791d4ccbe_Device=CPU_Config=(),0.000156391 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=dcd366c15188794221b8c93f9eac5f5c0ebb1f5aaf99e6286ea845ab2f902dd1_Device=CPU_Config=(),0.000155847 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=d7348f44f0bc1a8e9903f4a0edc136de768e75c8b34cba7a7cc4c4013346c665_Device=CPU_Config=(),0.000155132 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=dynamic_IR=a380dab4018caaf6c9c97dd05bd043131024a8a124aed741063dd61449fbd2c7_Device=CPU_Config=(),0.00015456 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=6391d4c4fed9adcc1a08e3134564859d582c2f0dfeda47ea75147c2dc0f686ad_Device=CPU_Config=(),0.000153788 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=6391d4c4fed9adcc1a08e3134564859d582c2f0dfeda47ea75147c2dc0f686ad_Device=CPU_Config=(),0.000153788 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=f875aa55e00c0b36cfb97f074f5efc77dfacced6f86eb726269c3d888bc4db71_Device=CPU_Config=(),0.000151757 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4dfd41288074d59a6af000a128d3b0979682820859f96aea9d72ff27a2016b2d_Device=CPU_Config=(),0.000151385 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=a48a048bc7a80c4310891ba16455c2ed9e2f859d2baf21802f21439427c5b291_Device=CPU_Config=(),0.000149325 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=dynamic_IR=45baf2f85ad76928d21607499598ddbe6596736c1d710e22d6e138316688d714_Device=CPU_Config=(),0.000149325 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=dynamic_IR=723818bbbd4cc22ae843abc9976f01e556124b329b11e0ca81b1bb834954b24b_Device=CPU_Config=(),0.000149325 -conformance_Slice/ReadIRTest.ImportExport/Op=Slice.8_Type=i64_Shape=dynamic_IR=629b1683acd381c63608645fb7ad6279c592518373dff9ffa823b65b6e64d117_Device=CPU_Config=(),0.000149325 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=dynamic_IR=854e2b2ef312ceca5b678cabafead3970536851ef1fe49a4ad5818dcc972b389_Device=CPU_Config=(),0.000149325 -conformance_Equal/ReadIRTest.ImportExport/Op=Equal.1_Type=boolean_Shape=static_IR=6b56dbe75ec5972be2948f1ce6e5e9339eab2de0efb4a60e9e7d3554d0d1ba64_Device=CPU_Config=(),0.000149068 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=dynamic_IR=1853d26c521ed847c0a00226712ffddda8c68fd8e1a35efb0b8d7de49ff30e92_Device=CPU_Config=(),0.000148982 -conformance_GroupConvolutionBackpropData/ReadIRTest.ImportExport/Op=GroupConvolutionBackpropData.1_Type=f32_Shape=static_IR=f5fa4d6112e9a88edcd2041cbed0f0f4140fc4edc648a74642fedb73bc237704_Device=CPU_Config=(),0.000148982 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=a812fd814397a432fa536787935f47d104f658789d87ed4797980fcea21c6d8f_Device=CPU_Config=(),0.000148581 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=6ec39fbc1356f7ef2adb9e45d1a28077357b58681a1a5c35e9f023ae1b4fc53e_Device=CPU_Config=(),0.000148467 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_721_Device=CPU_Config=(),0.000145806 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=03f2151a574ebdc260a55dc84d85bcd1f7661509bf3c3ac1e10fe9297bd72263_Device=CPU_Config=(),0.00014429 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=dynamic_IR=c9237bae31e47b657c974d49a1328c0d522acc006105885aa48cb6f08a32c637_Device=CPU_Config=(),0.000134907 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=1cc8a3b004f29e32af14295defbbcf6a9b195e99169a42d30d7a8da51a9c7392_Device=CPU_Config=(),0.000134307 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=17f4ea041c836582962a5add42394aef53ca535cdcbb7552a2402ffde3246ea8_Device=CPU_Config=(),0.000133935 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=c66b39d44133f9e1804ca1613331e5d7537b2db68b61710d9cc9c73fc6df8848_Device=CPU_Config=(),0.000133878 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=16307d02716975df59936f493a135cc523d02ed0e87b5bef39f73a79532485df_Device=CPU_Config=(),0.000133878 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_713_Device=CPU_Config=(),0.000132676 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=6e46e15b4cedec678f2ce4c25059d462f317798c7588c6fa8577895d3f777678_Device=CPU_Config=(),0.000132562 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_Shape=static_IR=a5c035319dc8e359642f7c8f233b2e0cea3e6c701c7e8544a519da87924ee79c_Device=CPU_Config=(),0.000131818 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_422_Device=CPU_Config=(),0.000131475 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=c3994924d501b198695e8b7e5e98fcbe3252f69585a511f6b3f7d844737afd93_Device=CPU_Config=(),0.000131446 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ea288ac5da2ab0f30b4212aaee4eacf151a1eb8ad7ed18f14115be8617dcc8eb_Device=CPU_Config=(),0.00013096 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i32_Shape=static_IR=a8f131ea0e1b5d7c704654c67c9752e6d1e11f26489d52e74682c14f8a5dacf6_Device=CPU_Config=(),0.000128271 -conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=9e43c8b338fa223b65d9ddf09ce047a0455ac3985575a953be180eebbd1be5bd_Device=CPU_Config=(),0.000128214 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a42d1c259763b5a0fe698d9ce07a79709d558599e5f9814db2e2b89cddff9fc7_Device=CPU_Config=(),0.000127613 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=659ef1bb1eda2fefc70d10b4fd68ed0eac042c2cdf1d53dfc8903d199f9cbba4_Device=CPU_Config=(),0.000127527 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=71dbb0ee89142ae893b27bc3947cc478eca94a8db535eed2ce8ec28ba0e0dd6f_Device=CPU_Config=(),0.000127527 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=3ab7d2071d9a85af091125d48056b5a2cc3e0019a122348ab175e29260ceab1e_Device=CPU_Config=(),0.000125725 -conformance_Proposal/ReadIRTest.ImportExport/Op=Proposal.4_Type=f32_Shape=static_IR=Proposal-4_790_Device=CPU_Config=(),0.000124952 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=bcf8916aca87d3ced6cb58209d686ce9d6caad3f48f9360feaab69c271832d9d_Device=CPU_Config=(),0.000124209 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=f975799db23fefa661731167f4a04e5ea0881bb456b36972d393ce1ddbf2b39b_Device=CPU_Config=(),0.000123494 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d0dcd3bdd6186a36197ea711d8829ab3e789cf43a9c578107d2eb0160f3d1c69_Device=CPU_Config=(),0.000122893 -conformance_MatMul/ReadIRTest.ImportExport/Op=MatMul.1_Type=f32_Shape=static_IR=260c754588aca529758574c050991c8376ab56bb43e22656c60b9bfb004ea0d1_Device=CPU_Config=(),0.000120747 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=059a5a92f33f1648d703e9a606988d1bec8ca2c6d91df6b7c61f6c49fa9e1d7f_Device=CPU_Config=(),0.000118945 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=c321ca28fbb97736d33efc10213ef0893ec5283865b2eb586c23a70d4d4d16ee_Device=CPU_Config=(),0.00011843 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=87db2b780622e3ba76ced852882b11efc00c9977d166df2107a211f3b37a3cb4_Device=CPU_Config=(),0.00011843 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=342120101ac5ad6b9a44924d31e8b26f1b0cfc49f408112bccd00b6d0950dcca_Device=CPU_Config=(),0.000116714 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=0325139df7d4b1aadbfc7a0b42f4d3fc5224dd5b854722097a9f938f29508ee8_Device=CPU_Config=(),0.000116714 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=ee75ed9210bb34af068356ef3a3eb68cfe0f9d2d1da5af790df3a3d38a472995_Device=CPU_Config=(),0.000116714 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=f54d93bd13c8962a740b0dfe0aa28de0bf37a36b97f3cac2b7f5d8ed3797aca9_Device=CPU_Config=(),0.000116113 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=static_IR=7fe0a5e83b2f96629312041fbfb18ba8e17408acd3ca49b25aee8d86185b1b3b_Device=CPU_Config=(),0.000116113 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=c65e0e4d58a65b295bfb604ce1f5171cefefdf8d3802680bc30634e4ba19a176_Device=CPU_Config=(),0.000116113 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=ae4e792701d41de755ea11a9ee59ae3954d1b93e8e14ae8420e332fa8d0b63c8_Device=CPU_Config=(),0.000115598 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=bc627f7c0be6f9805fd857239385693f3fcc7a786ac135993779928ac843427b_Device=CPU_Config=(),0.000115598 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_440_Device=CPU_Config=(),0.000115598 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=static_IR=9a12f233957e037d873d3de875f90ed4851e5342009ce4fe7587fb8150f4faf4_Device=CPU_Config=(),0.000115598 -conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=5f5db9ad61f83dfa79423ecdf256f0f60daa670063852121223fee424510a3ea_Device=CPU_Config=(),0.000115598 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=d919fe33bbc33b73734376de5445291671cc70255744d814da068bad28bb292f_Device=CPU_Config=(),0.000115598 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=0e62e566bf4e94d5206fa34aa8fecfc43f311e69927a6c500ac6c336d09ca30a_Device=CPU_Config=(),0.000115598 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=dfac84e1b66bec5d3baee0478209d26bf9a20fbce48fb0fee640ddf2f3b4756f_Device=CPU_Config=(),0.000115598 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=8c3cfcb112ea6fd9896444cd4ace1c150f68a265b2aaa0ff58d7f3ba5215778c_Device=CPU_Config=(),0.000114397 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=8a42d5d623d6b054889107a4f31bfeff76619f14b022ad3b79b9b71c951ef7b9_Device=CPU_Config=(),0.000114397 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=439457c2df590b3feec9bf8b733fce374ab7ba65c9c5023d84f8abc95f10c230_Device=CPU_Config=(),0.00011371 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=00a5dde2172a6e345c2a08fe9a6983596f97b0578f7b19d85841998ba279aacb_Device=CPU_Config=(),0.000113338 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=MaxPool-8_950_Device=CPU_Config=(),0.000113281 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=c301804445f273eef62f41f02204711d9d6e571da28c76ab447d7d90983b0032_Device=CPU_Config=(),0.000113281 -conformance_Sigmoid/ReadIRTest.ImportExport/Op=Sigmoid.1_Type=f32_Shape=static_IR=03d3f700f96f4ac1ec43711231fba5be4f384db1858d079f922519b2083e9105_Device=CPU_Config=(),0.000112509 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=8906dce902238384668e8a6b7f3c8f185a6f45ba98a60aeb075382530d1dd1e9_Device=CPU_Config=(),0.000111393 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=8906dce902238384668e8a6b7f3c8f185a6f45ba98a60aeb075382530d1dd1e9_Device=CPU_Config=(),0.000111393 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=i64_Shape=dynamic_IR=bfc418259ffe1e22328a851dc11db92d7f774cee1f9c49e80f972eabdfef9ab5_Device=CPU_Config=(),0.000110964 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=3714d1cd3adf627869eb8a22a15911bd5bc8f5605382fcf356197fea1623e210_Device=CPU_Config=(),0.000109677 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=00b03e54257a6cb2ebb8ff6e0ef635e2f771aecaa3cb00256493c567962df045_Device=CPU_Config=(),0.000109333 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=ef80dd919af54694b22e60202f903ae7da4f2288301c8ea6014c9588fa13659c_Device=CPU_Config=(),0.000108876 -conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=8039b636a4ef3f6f893521dd950c2fe6ac23436028471f222ace55d3eeb4e60b_Device=CPU_Config=(),0.000106416 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=cc57d84669b66a060b8fb631cae164141b6dad76e8d47ed337abfb495db9a742_Device=CPU_Config=(),0.000105329 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=787b699a4f6595168dfdc841948cc4a42cea2b1b63ebf30447b51277796174d2_Device=CPU_Config=(),0.000102983 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=b3a051e64ab99173e611b36b1d7008aab97a19a7c265d9c771b430754a67d15a_Device=CPU_Config=(),0.000101581 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_377_Device=CPU_Config=(),0.000101352 -conformance_TopK/ReadIRTest.ImportExport/Op=TopK.3_Type=f32_Shape=dynamic_IR=6a9a38ca1bddbbc101beff8f1115c6d4927ad60c35b8355b4f5c23cbb29018f7_Device=CPU_Config=(),0.000101324 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4e96a8ffc8b3590a005ce6100805a339d48219df783da561b7a84e7db63d440d_Device=CPU_Config=(),0.000101266 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=601b2056aed7972deabb49e784edc58d76c059878d7beda5567e39b99180f34b_Device=CPU_Config=(),9.63747e-05 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i32_Shape=static_IR=37b38ae41a2f207792960577812af0b976116507ef69a4f0656773010af6fb50_Device=CPU_Config=(),9.62889e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_840_Device=CPU_Config=(),9.56024e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_673_Device=CPU_Config=(),9.56024e-05 -conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=e82c6a9d0ae85d730d89c5e97915de13b2cf54a03a0c41cf2db6f10dee14a367_Device=CPU_Config=(),9.53735e-05 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a2ba0bffd25fb4db194b8ce32877bb44e1632c867fe229911a137c4e8aea62f2_Device=CPU_Config=(),9.32566e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=e80718ea398a62bb4159a648dd745a1d822506fab2e032617cb4ed8b7bb421ea_Device=CPU_Config=(),9.03102e-05 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=dynamic_IR=ab86f34fc9f2dc496de061a30cab5a769dc44893dde53e33cbd3cd80df86ff26_Device=CPU_Config=(),8.84508e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=654ee1fc8f9adaee5eaf1891b7a7c0d1a9554b5e0f1b78390e000f7786cb20c3_Device=CPU_Config=(),8.83077e-05 -conformance_Negative/ReadIRTest.ImportExport/Op=Negative.1_Type=f32_Shape=static_IR=5c4b572c47a554a0b388af78c1fc25eb90a2c3c12858b693f115eacf6aed6791_Device=CPU_Config=(),8.75354e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a5ba11df050b8a2c69c4d2c2667a1a1b680b0395135347bf72d130b75e60afd0_Device=CPU_Config=(),8.74496e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_980_Device=CPU_Config=(),8.74496e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=944508bde4733269b11515d6b736058f91c99a4e191fcca8affc542b33fc19ed_Device=CPU_Config=(),8.72493e-05 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=i64_Shape=dynamic_IR=9f357137acd56688ac778bd8ebb4ee40f15ddda280e478abdaf20e192ea8c5e9_Device=CPU_Config=(),8.70205e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_869_Device=CPU_Config=(),8.70205e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=894070e5e6a934f09dbf647d6ac0f8655768b50bb197dce03326c2d8eb7694a9_Device=CPU_Config=(),8.65055e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=591fe2116e088316c4e07134cc21cb463924fb70a528aa1948e7abcfe3f50c41_Device=CPU_Config=(),8.50466e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=58e7d89904f07a8f20d8564841d8510fd989a9da499c8d34eea4f37a64f13039_Device=CPU_Config=(),8.50466e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_491_Device=CPU_Config=(),8.48178e-05 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=04671633de06488b0296e3cf1b73a2a5597230d67b1800e229331c36e4136870_Device=CPU_Config=(),8.35877e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_969_Device=CPU_Config=(),8.3273e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_979_Device=CPU_Config=(),8.19571e-05 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=dbc0e23a8b7d2bb56192026632d00c8e931f98c0f4c16fbbf15cfa0e90967af9_Device=CPU_Config=(),8.17569e-05 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=e4dea2e1c3c8049c0f1133898c5920aac0526930e3102a28dced544027c1b246_Device=CPU_Config=(),8.17569e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=7d2e1566473cf91c6cc6470703a6090b564ad115592fabe76e878df4501ddcf6_Device=CPU_Config=(),8.17569e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=i32_Shape=static_IR=28b38fdc7d514d1cb11b546feb64682d2306b90c888e955e7744088eb4678126_Device=CPU_Config=(),8.17569e-05 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=static_IR=6a32b9be72d06514d385a446533a16820298848fe641cabbdfcdad7b49a2e744_Device=CPU_Config=(),8.17569e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_766_Device=CPU_Config=(),8.16425e-05 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=49b68de3dce617801a3a6ff5802fe56e7553fb883cc8c2e6b46a541b99926cf9_Device=CPU_Config=(),7.89249e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=66ec8b40ad4b365f3404bfbed95363b751c285fd9f559de2b20060d134c096c9_Device=CPU_Config=(),7.61215e-05 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=5d589f436a268a8f1ce52fec9efc407b4367c419b071277e64d651d37ddeab60_Device=CPU_Config=(),7.55207e-05 -conformance_TopK/ReadIRTest.ImportExport/Op=TopK.3_Type=f32_Shape=static_IR=207bc8d50442d7eb86e0a0d5c9643e06e766c47091d7029bacf706e8b0a0de23_Device=CPU_Config=(),7.40618e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_612_Device=CPU_Config=(),7.29748e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_611_Device=CPU_Config=(),7.29748e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_608_Device=CPU_Config=(),7.29748e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_412_Device=CPU_Config=(),7.27745e-05 -conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=f48c18bfc0980bb6f45ac932c4f572e212dbd843d845f23b2b429eb259a45686_Device=CPU_Config=(),7.22024e-05 -conformance_Clamp/ReadIRTest.ImportExport/Op=Clamp.1_Type=f32_Shape=static_IR=Clamp-1_514_Device=CPU_Config=(),7.18019e-05 -conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=dynamic_IR=f0e68c6319c36993236e12b3eb3f6a4a771239f07d2927128113ef68c96656d2_Device=CPU_Config=(),7.16589e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=MaxPool-8_264_Device=CPU_Config=(),7.09723e-05 -conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_Shape=static_IR=Loop-5_769_Device=CPU_Config=(),7.0629e-05 -conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_Shape=static_IR=Loop-5_732_Device=CPU_Config=(),7.0629e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=31671f324507d264c28daccd39fd56de740e3c7ecdeed31fcbfc51573bf7cc62_Device=CPU_Config=(),7.05432e-05 -conformance_ConvolutionBackpropData/ReadIRTest.ImportExport/Op=ConvolutionBackpropData.1_Type=f32_Shape=static_IR=24b4ad9c39243a51c1deb064e5744fe9cfe264f9339b478bb9fbf27ea619c6cf_Device=CPU_Config=(),6.91701e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=cd1ed2eccd473c7c736e236bf36b52d47c6d83cca0321a8e5acfeb63e2074d44_Device=CPU_Config=(),6.91701e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=bb719a1f1649dd0b306250af0048fa40975de77303e4d632cbaccc80d287087e_Device=CPU_Config=(),6.91701e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=acda58b57ee88605661372a4d2d9c70908e84e5fea407e2331fe120bb5867608_Device=CPU_Config=(),6.91701e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9c5dd7d2ba5667872200158addb672ef73bcaa37640db89ac73ece995999a0ae_Device=CPU_Config=(),6.91701e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=5123f7978940ee25fe5e1a34ba4a6ec5dd70395dbf13a9c5ad7b2d86c90b5df5_Device=CPU_Config=(),6.91701e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1f4964078feaf71a560dfeb1b71c9f7c8a78fdbd0b7d77409d6f5035a7995f3e_Device=CPU_Config=(),6.91701e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1e100e688548e6f34fda8af2be3833b53052d0af2bf338dac946f0cdfc3b0705_Device=CPU_Config=(),6.91701e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=02f542434c1f0c3ba50a8deb6bd9ab661951aef25e8f971a78a0f9a86d455ea8_Device=CPU_Config=(),6.91701e-05 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=i64_Shape=dynamic_IR=320cf046816dafcd6c7c4b657df754672556751b820f6c6cc9299b08fc84d397_Device=CPU_Config=(),6.73679e-05 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=609788552b7a59bf3cbea272486cfc8985da67b3724f24fdfde5c0ecd971102c_Device=CPU_Config=(),6.73679e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=static_IR=4ed8440de89ec0ad0a35222fcc165ad65109321364ed16c71d356d379e37fcd5_Device=CPU_Config=(),6.73679e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=i64_Shape=dynamic_IR=164e1c5560fc4c7f7cd8f540e8ed9ba8a01cec1542b66172f1345170c21657ee_Device=CPU_Config=(),6.73679e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=4662a5279f0a48bfab43c34ec2132f53f2e421105af00e7eb355497d22e1ef4b_Device=CPU_Config=(),6.73679e-05 -conformance_NonZero/ReadIRTest.ImportExport/Op=NonZero.3_Type=i64_Shape=dynamic_IR=9d7b1a713d5949d31c15df3f480c4663ee2d80a042c068935c2c72e7cbfd8ee4_Device=CPU_Config=(),6.73679e-05 -conformance_Greater/ReadIRTest.ImportExport/Op=Greater.1_Type=boolean_Shape=static_IR=6db037b38ec859e1f063f91f4446b794f9ea7ffa287e7683ba40a98c88e54532_Device=CPU_Config=(),6.73679e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=f5c45f7cdda216154d1a3b9ec22e1c9ef64d670ee2b8913b89b45ff50ca293e6_Device=CPU_Config=(),6.73679e-05 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=3a46cd88dab5883ce7d993e1f276c3b3610992f15e9dac2915959b713f4766cd_Device=CPU_Config=(),6.73679e-05 -conformance_Broadcast/ReadIRTest.ImportExport/Op=Broadcast.3_Type=f32_Shape=static_IR=9b1899b82a479f535932c28b113fc5af2c45df80e3fb60e611db034d49feb0be_Device=CPU_Config=(),6.73679e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=674226af6635d4be35c8b2351f0a76d96819e8efc44104731f28f7959d6e1594_Device=CPU_Config=(),6.72821e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=674226af6635d4be35c8b2351f0a76d96819e8efc44104731f28f7959d6e1594_Device=CPU_Config=(),6.72821e-05 -conformance_TopK/ReadIRTest.ImportExport/Op=TopK.3_Type=f32_Shape=static_IR=TopK-3_780_Device=CPU_Config=(),6.58232e-05 -conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_Shape=static_IR=5952466230ef3c0116bdb538c06a6d1d93c777c8d73d49d98701b28997d28450_Device=CPU_Config=(),6.48792e-05 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=322c35d43311e6d9eceab74864fd5dbec2aca5144b62c16321d9c0b77b0d0314_Device=CPU_Config=(),6.4021e-05 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=a8fa871ffec2171a15fd2d4b0f2295c4c7e08378caed66d7a399fa2b17153721_Device=CPU_Config=(),6.36777e-05 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=96fa427f01452126f02777dd5c55062bcffaff9779d8b936bc74a2320770be87_Device=CPU_Config=(),6.36777e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_891_Device=CPU_Config=(),6.36777e-05 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_Shape=dynamic_IR=b8c78be79292df1ec63982d1b7a69b0adaccbc7e069cea00c36a2e2cbe711f41_Device=CPU_Config=(),6.0531e-05 -conformance_Interpolate/ReadIRTest.Inference/Op=Interpolate.11_Type=f32_Shape=static_IR=d39c830b6c252804458126e23fe9503705107c21a1a49fda4c6b648276652bb9_Device=CPU_Config=(),5.91579e-05 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=d39c830b6c252804458126e23fe9503705107c21a1a49fda4c6b648276652bb9_Device=CPU_Config=(),5.91579e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d39c131d1dc9b0753deac96e70ede2a09310536078b5ec043a19994366efb571_Device=CPU_Config=(),5.90435e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=823927a1cd84922da020280e5bcd82a8c5241193f17875629fada01f67b52dde_Device=CPU_Config=(),5.85286e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=b2450cffac6de937e816a32481656eec5b8c968c40d5b840df3df622670ac37c_Device=CPU_Config=(),5.80136e-05 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=6b45e0a0e572fdd4d93f1c93dac18fd3758e0664c93cdaa3ab568e7a37ada195_Device=CPU_Config=(),5.66405e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=Concat-1_666_Device=CPU_Config=(),5.62973e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i64_Shape=static_IR=d031592368b1488fac2025b6ec230ebaca971e3c78734a36817e52bec2c8ac1b_Device=CPU_Config=(),5.61256e-05 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=static_IR=557a55917c413eda7b1e28d76cca75a4e42e8052a45d1d58d5e2487cdcb75632_Device=CPU_Config=(),5.61256e-05 -conformance_GroupConvolution/ReadIRTest.ImportExport/Op=GroupConvolution.1_Type=f32_Shape=static_IR=b8056dd237267a291859585289b2b0f5b16c4d9750a34f6f72045e8100f56c96_Device=CPU_Config=(),5.50958e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_541_Device=CPU_Config=(),5.14056e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_223_Device=CPU_Config=(),5.14056e-05 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=bd3e18081ce008e8f6e942ff531536d0dbb4f44c8b8d1767fe301c0596cbb434_Device=CPU_Config=(),5.09765e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d82de8640009f3025b2722cbfaa270f06bc34f0fc54c85a4fd7474be252cc44d_Device=CPU_Config=(),5.09765e-05 -conformance_Subtract/ReadIRTest.ImportExport/Op=Subtract.1_Type=f32_Shape=static_IR=1e77537e44691d894e800acbbb410bbb11b55462e67b43ade22aa6975e5715d7_Device=CPU_Config=(),5.07476e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=88e7722362388c1ed19219da89ea252bdeb00f05531d2f024134270595c580a5_Device=CPU_Config=(),4.89168e-05 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=static_IR=e1249b057841a6a8ecdd605a4d73887dc09f31271815bb90c7c1e91b41c4dfe1_Device=CPU_Config=(),4.81445e-05 -conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=MaxPool-8_441_Device=CPU_Config=(),4.7887e-05 -conformance_MaxPool/ReadIRTest.Inference/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=22ae8d3d0f4ba99130074a080593d4bfce691ea1fecc6069063b40aca63cf7b1_Device=CPU_Config=(),0 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=dynamic_IR=MaxPool-8_441_Device=CPU_Config=(),4.7887e-05 -conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=26df036b689f6aaf1436fc55f432e39ed413b933c19cde378539947360acab0a_Device=CPU_Config=(),4.77154e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=26df036b689f6aaf1436fc55f432e39ed413b933c19cde378539947360acab0a_Device=CPU_Config=(),4.77154e-05 -conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_Shape=static_IR=3d5957ca87af757d6050ea3e8f81cbdcabb0ad84688a7fe65d5ebc99cf68f66a_Device=CPU_Config=(),4.74293e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_954_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_944_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_676_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_433_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_342_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_954_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_944_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_676_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_433_Device=CPU_Config=(),4.69144e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_342_Device=CPU_Config=(),4.69144e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_214_Device=CPU_Config=(),4.6142e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=8cdce4d75682fd4e98132e71c5f680dec43464dba6884cb071d9e32f5cbb8341_Device=CPU_Config=(),4.6142e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_845_Device=CPU_Config=(),4.58273e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_795_Device=CPU_Config=(),4.58273e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_564_Device=CPU_Config=(),4.58273e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_502_Device=CPU_Config=(),4.58273e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_845_Device=CPU_Config=(),4.58273e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_795_Device=CPU_Config=(),4.58273e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_564_Device=CPU_Config=(),4.58273e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_502_Device=CPU_Config=(),4.58273e-05 -conformance_FakeQuantize/ReadIRTest.ImportExport/Op=FakeQuantize.1_Type=f32_Shape=static_IR=80fab1a0645bdb57459e3d4abb0da3e23c0cb2c5e6c403cc985811685dbebb51_Device=CPU_Config=(),4.53124e-05 -conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_Shape=static_IR=191167d5b9c2fc58d278fe912cee18d02feb3eee41966c6ea78e48136855a211_Device=CPU_Config=(),4.48547e-05 -conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_Shape=static_IR=191167d5b9c2fc58d278fe912cee18d02feb3eee41966c6ea78e48136855a211_Device=CPU_Config=(),4.48547e-05 -conformance_ScatterElementsUpdate/ReadIRTest.Inference/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=b65ba26b5f26a196ca080ddb89c75ab3f76f2dd3e9b84be73a656cb28f839b56_Device=CPU_Config=(),4.48547e-05 -conformance_ScatterElementsUpdate/ReadIRTest.ImportExport/Op=ScatterElementsUpdate.12_Type=f32_Shape=static_IR=b65ba26b5f26a196ca080ddb89c75ab3f76f2dd3e9b84be73a656cb28f839b56_Device=CPU_Config=(),4.48547e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_947_Device=CPU_Config=(),4.4111e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_877_Device=CPU_Config=(),4.4111e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_815_Device=CPU_Config=(),4.4111e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_993_Device=CPU_Config=(),4.39393e-05 -conformance_VariadicSplit/ReadIRTest.ImportExport/Op=VariadicSplit.1_Type=f32_Shape=static_IR=3b212b5b25e823c4ed564202524f55ef1dcb95ec75226679aabb68d7780d5b52_Device=CPU_Config=(),4.35388e-05 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=static_IR=5854c5cde01f584fa60a6e4bed4153e9e3ec358bf9a7cc1af15d1e52c198aacc_Device=CPU_Config=(),4.35388e-05 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=Softmax-8_879_Device=CPU_Config=(),4.35388e-05 -conformance_GatherND/ReadIRTest.ImportExport/Op=GatherND.8_Type=f32_Shape=static_IR=cf26cf5748bbd3a2f8c9393be832df1a214cf87e5ff4d3b1d5d8b5c1f90e5394_Device=CPU_Config=(),4.35388e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=e851b62890c8f207a73f9b81b88cffec635f12e830225eed0c2929e46f2ffe73_Device=CPU_Config=(),4.34244e-05 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i64_Shape=dynamic_IR=a9538213f2f4d221fdaa07b7b193a2936b01f23cff866ee94716df79b6d5ddba_Device=CPU_Config=(),4.34244e-05 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=f32_Shape=static_IR=aa81c2f1e41cac01c5d0f09739f1e5ecf537ec67b1972688b4d179ab682d4cfd_Device=CPU_Config=(),4.34244e-05 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.3_Type=i64_Shape=dynamic_IR=89814392656dabf90f43fa5f7b06164c611e5ac4db815c7639215614a3abb387_Device=CPU_Config=(),4.34244e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=8eab4ee3e0c015853a6e31a00b3903e83a3975d4c115d59aa91d04190b394e68_Device=CPU_Config=(),4.34244e-05 -conformance_ReduceMin/ReadIRTest.ImportExport/Op=ReduceMin.1_Type=i32_Shape=static_IR=564549da290bbddca482dbee4b678d7bb6eefbb295f63f41c695c84b6f7be2eb_Device=CPU_Config=(),4.34244e-05 -conformance_NonMaxSuppression/ReadIRTest.ImportExport/Op=NonMaxSuppression.9_Type=i64_Shape=dynamic_IR=e6b6f6c92406a0558158b2a1175f779c073ec4eedc0bfc5907583305ea9b34b5_Device=CPU_Config=(),4.34244e-05 -conformance_Gather/ReadIRTest.Inference/Op=Gather.8_Type=f32_Shape=dynamic_IR=d3a7f7f03964207ea8b253f361f11be37c2905f3f7c8ff731c29ef40d55b237f_Device=CPU_Config=(),4.34244e-05 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=d3a7f7f03964207ea8b253f361f11be37c2905f3f7c8ff731c29ef40d55b237f_Device=CPU_Config=(),4.34244e-05 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=a0dfba1f746d9d0ad8b53b22a62af48052b010a9fd1bd70339b1cf02a6be818c_Device=CPU_Config=(),4.34244e-05 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.8_Type=f32_Shape=dynamic_IR=68a6ff0135ae81327e5b3b0ab287f6cbaa647d678a14397858892023f6d7b043_Device=CPU_Config=(),4.34244e-05 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=u64_Shape=dynamic_IR=59d61fe3ad99f1f9c01aa355306a4b207386f4689b13b6c6208d970aaf54281b_Device=CPU_Config=(),4.34244e-05 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=017893ca7e42922082bbaf2a75a788529cc40e74bca9654be374547fd019b49d_Device=CPU_Config=(),4.34244e-05 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i64_Shape=dynamic_IR=d7d40ea6b1b971b170cbe1762bf30caf122b054658fffa34e069566c6be8d26b_Device=CPU_Config=(),4.34244e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_231_Device=CPU_Config=(),4.24232e-05 -conformance_GRUSequence/ReadIRTest.ImportExport/Op=GRUSequence.5_Type=f32_Shape=static_IR=f5ebeb377ad81fb33a4832b1693727d7a59b7d4378bfa4a701d8aad819812f64_Device=CPU_Config=(),4.24232e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d6a3851cea23fa42a953d697cc26c02f5a18f8a20b93d8e771ffa1ac70528a89_Device=CPU_Config=(),4.24232e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b759a61e9c6e0dcf58218fd832b063613c40de6671214ddda32c5739d7150ec7_Device=CPU_Config=(),4.24232e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=54223c1ee835541f5f857e40867f7f82d9f8835252f05f1933f47166d6479439_Device=CPU_Config=(),4.24232e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=40f3f19b0836cfadc8bbbda05dee0acbff7e5476b2e2b5989dbeb31729ecd1b0_Device=CPU_Config=(),4.24232e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=014a9c2e7e595ae947ccc439204fac2863924553578c7552cac09c546a42dde9_Device=CPU_Config=(),4.24232e-05 -conformance_Split/ReadIRTest.ImportExport/Op=Split.1_Type=f32_Shape=static_IR=eeff9a413124b377afabe49671874975ba13276353a0b3d722702962ebbe51e0_Device=CPU_Config=(),4.02491e-05 -conformance_Minimum/ReadIRTest.ImportExport/Op=Minimum.1_Type=f32_Shape=static_IR=3766e089cf04fcee6bd548c428c436105f86230a9f2991dd184f2505e275ea6d_Device=CPU_Config=(),3.96484e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=2d7f7b1c9321267857c30413110d6410c3e5c0402b0ec5ae0efe7ffd53052c14_Device=CPU_Config=(),3.91335e-05 -conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_Shape=static_IR=cf2ecc84915c9424ae3e4b93f0be8ac370a5d4003d7d7673f9a0f7008068efd5_Device=CPU_Config=(),3.78748e-05 -conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=e0b77b88d0d21ed244dae0e43fd4b1764a20f142cfdf161a8e9e484c8e87f72a_Device=CPU_Config=(),3.78748e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=31ee4ddebf2a2e33e19ef55935029cec0c3bb0ef2351fdc321c558819e29418e_Device=CPU_Config=(),3.75029e-05 -conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=static_IR=fca6a730d61dd7c4cc645bf091a950289b509ff10f30aecfae394014e8bf9b28_Device=CPU_Config=(),3.75029e-05 -conformance_HSwish/ReadIRTest.ImportExport/Op=HSwish.4_Type=f32_Shape=static_IR=227ba60a7156653619608c942d8b570d7704b17e8d616f4b071f1402d7f12383_Device=CPU_Config=(),3.68163e-05 -conformance_GRUSequence/ReadIRTest.ImportExport/Op=GRUSequence.5_Type=f32_Shape=static_IR=b7937dd6044ae051c5341db0b1772a270da3c6a8643eb14f727b89e02587a2ed_Device=CPU_Config=(),3.46709e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=e01b754d2bb3780bc2e1c5f23cb49ab847d9835c530b4720facd66dde8c3d96b_Device=CPU_Config=(),3.46709e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=dec9f8261a4738e527e92223e7aa09e2f905766844904273c63b00244df0c555_Device=CPU_Config=(),3.46709e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d44a8a6422066261a025b4be4192706b1579163cef12944c453797070e3a2ee3_Device=CPU_Config=(),3.46709e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=baeb82c17d72e93406521482104b1c5b85fcbbdac22a501acab9bd04a54bc764_Device=CPU_Config=(),3.46709e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=7d6e53d9e576276b2f2949e5f94599d6bb56e516492e395ec86894ea2f8df688_Device=CPU_Config=(),3.46709e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=59368a22dcaff273e44b809fba7f13b5966d978f25db9d39cfa3726b71bb2b04_Device=CPU_Config=(),3.46709e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=513d7cbc0e61575ebdb8fcc7e0b92d0f6085e369c5d7aded8ddd0c37ee693b10_Device=CPU_Config=(),3.46709e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=38d7c53748ad83554735c93023f872dd621fd423404072d5e14cbf6b0079425a_Device=CPU_Config=(),3.46709e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=d0b855aeac2caf3ddabe3d38dda46634c742def705766af84bd7b288737e39d0_Device=CPU_Config=(),3.46709e-05 -conformance_Loop/ReadIRTest.ImportExport/Op=Loop.5_Type=i32_Shape=static_IR=Loop-5_653_Device=CPU_Config=(),3.3212e-05 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=static_IR=Interpolate-11_753_Device=CPU_Config=(),3.31261e-05 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=i64_Shape=static_IR=9a11a2f1bf2d0f652af69fcd5b858f1fdb50af9786d9b4a9e8358d42e52d7863_Device=CPU_Config=(),3.2697e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_690_Device=CPU_Config=(),3.2697e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_191_Device=CPU_Config=(),3.2697e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_170_Device=CPU_Config=(),3.2697e-05 -conformance_Convert/ReadIRTest.ImportExport/Op=Convert.1_Type=f32_Shape=static_IR=d54bae455ec3075ebbf2f9ae9cdef66087759a7d46820da8ca200cf757355e81_Device=CPU_Config=(),3.23538e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=035d21b2057f60921ccefb2ac3b3f3c1c8bed9da8a461115696005190d2a5fa5_Device=CPU_Config=(),3.19819e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_998_Device=CPU_Config=(),3.19247e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_660_Device=CPU_Config=(),3.19247e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_142_Device=CPU_Config=(),3.19247e-05 -conformance_Sqrt/ReadIRTest.ImportExport/Op=Sqrt.1_Type=f32_Shape=static_IR=84deded0000080d9336c2c282711105a7acb45b7beb670d423c52f339ee055c9_Device=CPU_Config=(),3.1753e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=7118ee68001e03a010a8dfd307f7483d9500f01070aee67205457ba5328b9a12_Device=CPU_Config=(),3.1753e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=b8ef53e65f3dd18ba0b3a39c01d34dd85bf374c1d7d70733b655cf583ac336fd_Device=CPU_Config=(),3.1753e-05 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=static_IR=62b5862e146727b216449226279800cb77657ca23847e8daeca88d3deaba63b6_Device=CPU_Config=(),3.1753e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_632_Device=CPU_Config=(),3.1753e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=ff80757f15c30b488c3f763ebf4fc03cf05bf4fcd2018ccce49a5ccc247f4d8c_Device=CPU_Config=(),3.14956e-05 -conformance_IDFT/ReadIRTest.ImportExport/Op=IDFT.7_Type=f32_Shape=static_IR=28dce20800dd5a8409c0d50230baa40b875a5a0ddae9961376328d46efbc09d4_Device=CPU_Config=(),3.14956e-05 -conformance_LRN/ReadIRTest.ImportExport/Op=LRN.1_Type=f32_Shape=static_IR=75fe51bfdd72003afac1631ead7a731d8de15f5c586f39bedecbac920138ed06_Device=CPU_Config=(),3.07804e-05 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.4_Type=f32_Shape=static_IR=8dec756b3ed3541ac01747029daaca96225bc88b3d9e221b9f3bab958437ef78_Device=CPU_Config=(),3.04944e-05 -conformance_DetectionOutput/ReadIRTest.ImportExport/Op=DetectionOutput.8_Type=f32_Shape=static_IR=7f598191570ff810bf54a1cf08684687e1d70d4dc9233f29e74830dbe6e64d9d_Device=CPU_Config=(),3.04657e-05 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.1_Type=i64_Shape=static_IR=3c5d6e1848f0939e80d97d86ec65e95d71e86a8ea2e8d4bcd0b2f37b2c59e36c_Device=CPU_Config=(),2.9865e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=11974897f572809d1d6f004d8b15d393927752fa88e37afdaab87999bf598f74_Device=CPU_Config=(),2.91785e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=b171d43d2869e339e27f9513677a72220b3956f64f264b75871f3541ded37a7e_Device=CPU_Config=(),2.91785e-05 -conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=974270d9310d70ea02593f3ce98769467e1e3295ed999065008e0ce5c88dc63c_Device=CPU_Config=(),2.8921e-05 -conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=143af1d4c87d64c07e394e533782d66ad33330b576f39f7bac20441ac460f947_Device=CPU_Config=(),2.8921e-05 -conformance_TopK/ReadIRTest.Inference/Op=TopK.11_Type=f32_Shape=dynamic_IR=5f34dd2786a2968539b3fc0e6d51fbbee52d5d9b3a59f93807f49e9216e77b5c_Device=CPU_Config=(),2.88924e-05 -conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_Shape=dynamic_IR=5f34dd2786a2968539b3fc0e6d51fbbee52d5d9b3a59f93807f49e9216e77b5c_Device=CPU_Config=(),2.88924e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=4e084cb457b8ecf58c405605c47c7391972dc2e7cd693bb21418911beea59092_Device=CPU_Config=(),2.88924e-05 -conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=32b3ff73c08de6c5997cf36275ca4013f1d5e7652614be47ae5281e35a7c50c4_Device=CPU_Config=(),2.83203e-05 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=caa9c1f224cb27f267eac25a84df532b292251bd0e237a8de8c78d0f5085f10a_Device=CPU_Config=(),2.75479e-05 -conformance_ReduceMean/ReadIRTest.ImportExport/Op=ReduceMean.1_Type=f32_Shape=static_IR=505f545bb55297b6d0b0caf8c998ac57336ed44925611f68a58b5d0e4356d6fb_Device=CPU_Config=(),2.75479e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_835_Device=CPU_Config=(),2.75479e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_672_Device=CPU_Config=(),2.75479e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_984_Device=CPU_Config=(),2.75479e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_974_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_923_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_886_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_619_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_465_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_974_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_923_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_886_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_619_Device=CPU_Config=(),2.56313e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_465_Device=CPU_Config=(),2.56313e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=d9386e93f7c1daa16a5517997437eef8feb3961c67d48fa154aaa9718ca78838_Device=CPU_Config=(),2.54024e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=cf252ca8487e87ca565efda29b7dab1bc6f86522b019861abb0ac6323d23b84b_Device=CPU_Config=(),2.54024e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b5fdb53b420e560f3c9291881ae2cd2132a8dfab8fbf44c733727e9d0929bc00_Device=CPU_Config=(),2.54024e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=b3b3a7b1b91d898552d59510f8f1c994921f62deabce9106dba53ad755667666_Device=CPU_Config=(),2.54024e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=a1aac4cc8ebaa4f4de4478f5d53d23d0b4588611db83ded9e044040da807c238_Device=CPU_Config=(),2.54024e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=9b97bbbeff4b4872682d10966f3915819aa796aed9e8388d062366691893d64e_Device=CPU_Config=(),2.54024e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=7c1c9fb877c7c3959c1610b3e3887550ed4e4873e98da735605120ada3c55770_Device=CPU_Config=(),2.54024e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=659efb374a45b7850eaf40c570f1727c7797ed97673abc09a3dcb9d8555d1597_Device=CPU_Config=(),2.54024e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=1ef155e899cdb23357700e778492a5fae77c8e98df668316c8aaf3cb58ccc420_Device=CPU_Config=(),2.54024e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=4d8525b599684f84bd4c900541b34cd2855974a2739f509ff965896bc03c7bdd_Device=CPU_Config=(),2.54024e-05 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=i64_Shape=static_IR=a746b77e38ec37a9fb67103e9577c73687e8694f981e6083c0f8b9f49d7a7614_Device=CPU_Config=(),2.50591e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_959_Device=CPU_Config=(),2.50591e-05 -conformance_StridedSlice/ReadIRTest.ImportExport/Op=StridedSlice.1_Type=i32_Shape=static_IR=f0e0c32ba5b1abfb0936d7876684d204c898bc8560e28bda6b58d9a7c02b03b6_Device=CPU_Config=(),2.48875e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_949_Device=CPU_Config=(),2.48875e-05 -conformance_Transpose/ReadIRTest.ImportExport/Op=Transpose.1_Type=f32_Shape=dynamic_IR=b7ff1fa55f284c0df98364b614dd89a99c4aabd2a75ea885cb0b8a4471b0bc61_Device=CPU_Config=(),2.38577e-05 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=932ae958cfcc2c49de70c15de97a38301ca0cd60070e8b31f52f1495e23a78a7_Device=CPU_Config=(),2.38577e-05 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=9a2736ae3d787054bcc509653a533a614a63f66af0bccb6cd38d1d0af5c54723_Device=CPU_Config=(),2.38577e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=dynamic_IR=cbffaa2c9b5d9fe202f3f5bfaa43689b38a22adc631604a2c600205907a50655_Device=CPU_Config=(),2.38577e-05 -conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=ba65774de91ef836922146f0bfd7b13f50d3c61abc191890db73d9e07a6b3bba_Device=CPU_Config=(),2.30853e-05 -conformance_Concat/ReadIRTest.ImportExport/Op=Concat.1_Type=f32_Shape=static_IR=c428f5bef879bf8ef11fb84d776e7f21a1e98980f2779184a23cec69464d101e_Device=CPU_Config=(),2.30853e-05 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=f32_Shape=static_IR=3d45d67a7e386c3532753e23e86b7599525a3e0a02e5a69c213e640b3644c731_Device=CPU_Config=(),2.28851e-05 -conformance_TopK/ReadIRTest.ImportExport/Op=TopK.11_Type=f32_Shape=static_IR=6b0b0b4ae8fa7eb4882cb36012eb7b4ef60630041f9138a2969abc947be3dd18_Device=CPU_Config=(),2.17408e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_640_Device=CPU_Config=(),2.11401e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=de3e4d9bce672d2f9d1b636bfbdaae03ee9a1d01125f692487a4e00a73561b45_Device=CPU_Config=(),2.11401e-05 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.12_Type=f32_Shape=static_IR=092beb3237c090ef1b8693e75c06dd08d99add4365ce4b8637ac565b5805e831_Device=CPU_Config=(),2.09398e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_526_Device=CPU_Config=(),2.09398e-05 -conformance_Gather/ReadIRTest.ImportExport/Op=Gather.1_Type=i64_Shape=static_IR=21f175001cc6de836e25a43d601d7b79ba82e25d7602c277616d7ab9c7e50d9b_Device=CPU_Config=(),2.09398e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=ab3e751461c96ad0b7c06dd5ae4df600180048b3a8045fd9c5dca6923c777115_Device=CPU_Config=(),1.94809e-05 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=16dc31428002b2d3f3ed685ce295dc5377f1a8fe3b87500325b2c6e81b450fc4_Device=CPU_Config=(),1.91662e-05 -conformance_ShapeOf/ReadIRTest.ImportExport/Op=ShapeOf.1_Type=i64_Shape=static_IR=768ef8dfca086085830f4c2b7918968f99267ef176768a4ca1434de3bd7f93e0_Device=CPU_Config=(),1.77359e-05 -conformance_PriorBoxClustered/ReadIRTest.Inference/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_875_Device=CPU_Config=(),1.74213e-05 -conformance_PriorBoxClustered/ReadIRTest.ImportExport/Op=PriorBoxClustered.1_Type=f32_Shape=static_IR=PriorBoxClustered-1_875_Device=CPU_Config=(),1.74213e-05 -conformance_Pad/ReadIRTest.ImportExport/Op=Pad.1_Type=f32_Shape=dynamic_IR=f7720759ec4302d50c8202343aa480abccc973ed5b54f65d388f0e706f998ef5_Device=CPU_Config=(),1.57907e-05 -conformance_Interpolate/ReadIRTest.ImportExport/Op=Interpolate.11_Type=f32_Shape=dynamic_IR=4b8c349017646c48fa04047f30f0ad700bd67f3edeba1d66fcd110154c5016f8_Device=CPU_Config=(),1.57907e-05 -conformance_AvgPool/ReadIRTest.ImportExport/Op=AvgPool.1_Type=f32_Shape=static_IR=AvgPool-1_1033_Device=CPU_Config=(),1.29587e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=i32_Shape=static_IR=57e8a1874c5b3f500c9d28adfc3c407d7a0c1bd061d46dc0d0ed2b27e263dc92_Device=CPU_Config=(),1.23579e-05 -conformance_Tile/ReadIRTest.ImportExport/Op=Tile.1_Type=f32_Shape=static_IR=339fb1130cd4308085ccdf47c50f16cba63456e42e1f563e6ef7da466256a0a0_Device=CPU_Config=(),1.23579e-05 -conformance_Squeeze/ReadIRTest.ImportExport/Op=Squeeze.1_Type=f32_Shape=static_IR=3f7ce2b6d7977f47f72a692c54d7b8ceba8612d64468116a9189bf23423a0507_Device=CPU_Config=(),1.23579e-05 -conformance_SpaceToDepth/ReadIRTest.ImportExport/Op=SpaceToDepth.1_Type=f32_Shape=static_IR=53c74fcce6c2e4608c874b7334a35ffe89bbbaf436ad7ee2527a2a4361e3ef62_Device=CPU_Config=(),1.23579e-05 -conformance_MaxPool/ReadIRTest.ImportExport/Op=MaxPool.8_Type=f32_Shape=static_IR=MaxPool-8_448_Device=CPU_Config=(),1.23579e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=80f8d42b9528ab42df0f933a1b2f513e1873f78c763ab8ea6c2857af20e945ad_Device=CPU_Config=(),1.23579e-05 -conformance_Convolution/ReadIRTest.ImportExport/Op=Convolution.1_Type=f32_Shape=static_IR=010e40cb83a0516ff7ac30246b841a178fada738ab81a7c6938bce5f842bd957_Device=CPU_Config=(),1.23579e-05 -conformance_CTCGreedyDecoderSeqLen/ReadIRTest.ImportExport/Op=CTCGreedyDecoderSeqLen.6_Type=i64_Shape=static_IR=a2df483ee8bb9b66998376e2180988db6dc30f0081803b56fa38e63006d12acd_Device=CPU_Config=(),1.23579e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=1edb2383f309489c62d50e904d86df71ccff3bfda927a90caff1cf77500a55e4_Device=CPU_Config=(),1.1843e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=169084543f933dbb1ef2e0ed10011a29532eb63d0dc7a7c68a3ed4c80b3fc734_Device=CPU_Config=(),1.1843e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=dynamic_IR=0152e03f7a03f2917dc4ae13ebc15763e8639b7211fc819d56b55f343ed099b6_Device=CPU_Config=(),1.1843e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=8c67a77b3545adcfaad18010ac5479423027555b6ffaf8564ab7802906ec18ac_Device=CPU_Config=(),1.1843e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=dynamic_IR=797e676efcac3575336b5997f0b5176257276e8f4a37ca82a88505227004b06b_Device=CPU_Config=(),1.1843e-05 -conformance_ReduceSum/ReadIRTest.Inference/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=ade571535fa87f3ef6a5bea4eb3583e27213c8afb6b650584e42322bf18be841_Device=CPU_Config=(),1.1843e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=e7f43fac89040f0f616aee19205f4f8d3fba86c6bf1c32af15961529a07a3cb3_Device=CPU_Config=(),1.1843e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=ade571535fa87f3ef6a5bea4eb3583e27213c8afb6b650584e42322bf18be841_Device=CPU_Config=(),1.1843e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=71ec8716d557b4a2009449568c2412c9af5084c12ba009e00dba164c96496a66_Device=CPU_Config=(),1.1843e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=2c3dbad159b2762a5223bdba5272b59850ee3703ed5f38c0762f5dd76767624a_Device=CPU_Config=(),1.1843e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=dynamic_IR=0ebabff73dd81b30539621df40f8faa148bc3382a048919e5a787c2d13e842fe_Device=CPU_Config=(),1.1843e-05 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=fdcab01e2bfd1d42e82e02abbc9485a70bbca7f6699c43dbd32e7cfcd32f10e6_Device=CPU_Config=(),1.1843e-05 -conformance_Power/ReadIRTest.ImportExport/Op=Power.1_Type=f32_Shape=dynamic_IR=5beacd2bb9191defe3872fe637eb2fcb831437af69e9164251f4546e5cb1156f_Device=CPU_Config=(),1.1843e-05 -conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=6f0d2c60afa3c010ecabbc0f54f50ab607b60c28ec71b00612c9fa1dea17224f_Device=CPU_Config=(),1.1843e-05 -conformance_Multiply/ReadIRTest.Inference/Op=Multiply.1_Type=f32_Shape=dynamic_IR=f875aa55e00c0b36cfb97f074f5efc77dfacced6f86eb726269c3d888bc4db71_Device=CPU_Config=(),0 -conformance_Multiply/ReadIRTest.ImportExport/Op=Multiply.1_Type=f32_Shape=dynamic_IR=6f0d2c60afa3c010ecabbc0f54f50ab607b60c28ec71b00612c9fa1dea17224f_Device=CPU_Config=(),1.1843e-05 -conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=dynamic_IR=9fabcc0b789906e2b65e7195462e0c915ad56a65b049e4d68f762c28437efddc_Device=CPU_Config=(),1.1843e-05 -conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=dynamic_IR=51dcd5f3e64d8126b3816a76d8ec0bca37f4b662d2b90be115af2be4e2d72c79_Device=CPU_Config=(),1.1843e-05 -conformance_Divide/ReadIRTest.ImportExport/Op=Divide.1_Type=f32_Shape=dynamic_IR=b887e471773f821d63d2e9cee6d46a451a771ae23e5ab37a199db58e1a8865c8_Device=CPU_Config=(),1.1843e-05 -conformance_Add/ReadIRTest.Inference/Op=Add.1_Type=f32_Shape=dynamic_IR=cad5dd8f018be8f8d628cdd3dc1043c97ab0ee4ae39cd321785e3b624fb96f6d_Device=CPU_Config=(),1.1843e-05 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=f32_Shape=dynamic_IR=cad5dd8f018be8f8d628cdd3dc1043c97ab0ee4ae39cd321785e3b624fb96f6d_Device=CPU_Config=(),1.1843e-05 -conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=6f175bde26572d6d5844c630d03173dffd7c70efb78d45336b3aecc5399fa8ca_Device=CPU_Config=(),1.17286e-05 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.1_Type=f32_Shape=static_IR=0d4482e1f7ce2b8c899d93e76f0c8c8377dc35953ff1d6b0e69d9f00bb752183_Device=CPU_Config=(),1.14711e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=d124732486e130ab64410fc881d7eeca2417803af4affe42a16df10c990a6a99_Device=CPU_Config=(),1.01266e-05 -conformance_Unsqueeze/ReadIRTest.ImportExport/Op=Unsqueeze.1_Type=f32_Shape=static_IR=97375897a0f3c0095da86f90270be3d40a692e97dc0dcba3a8c833ecbb5248ac_Device=CPU_Config=(),1.01266e-05 -conformance_Softmax/ReadIRTest.ImportExport/Op=Softmax.8_Type=f32_Shape=static_IR=Softmax-8_893_Device=CPU_Config=(),1.01266e-05 -conformance_Reshape/ReadIRTest.ImportExport/Op=Reshape.1_Type=f32_Shape=static_IR=423b4baae3be39bf92c625a121c947c162273fc186a91c0ae9b102d573b8ea8b_Device=CPU_Config=(),1.01266e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=de31ce5d6727d121be352741e99f06c4c7f5b30c4a92e1ccd2742817f5c7c014_Device=CPU_Config=(),1.01266e-05 -conformance_ReduceSum/ReadIRTest.ImportExport/Op=ReduceSum.1_Type=f32_Shape=static_IR=b052b287437bea9d3b10bd946b0302626eef0bbfa5f8b134beae65f91e2e33fd_Device=CPU_Config=(),1.01266e-05 -conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_Shape=static_IR=bb5b435512cb922a87fb89af942ea55a0f1ba9e158ac9a6a55f7daf98ac93883_Device=CPU_Config=(),1.01266e-05 -conformance_NormalizeL2/ReadIRTest.ImportExport/Op=NormalizeL2.1_Type=f32_Shape=static_IR=7a346953eb65f172bdf3614c4851f338b3dbb0517d5a6168d5033a78474b2e9b_Device=CPU_Config=(),1.01266e-05 -conformance_Maximum/ReadIRTest.ImportExport/Op=Maximum.1_Type=f32_Shape=static_IR=b46839718ab32e78bdebd09281e08d06506bdbe17a790032d5a8cd2df3c2719e_Device=CPU_Config=(),1.01266e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_994_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_844_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_842_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_761_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_350_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_224_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_994_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_844_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_842_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_761_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_350_Device=CPU_Config=(),1.00408e-05 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_224_Device=CPU_Config=(),1.00408e-05 -conformance_LSTMSequence/ReadIRTest.ImportExport/Op=LSTMSequence.5_Type=f32_Shape=static_IR=1270640cd6b52779c1f6da011ec6ecedb141f03134110fcd8ec4a3ab8c27f9b4_Device=CPU_Config=(),9.78337e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_914_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_880_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_680_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_531_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_470_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_405_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_914_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_880_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_680_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_531_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_470_Device=CPU_Config=(),9.61173e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_405_Device=CPU_Config=(),9.61173e-06 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_772_Device=CPU_Config=(),8.83936e-06 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_765_Device=CPU_Config=(),8.83936e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_977_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_620_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_595_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_365_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.Inference/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_175_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_977_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_620_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_595_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_365_Device=CPU_Config=(),8.52469e-06 -conformance_PriorBox/ReadIRTest.ImportExport/Op=PriorBox.1_Type=f32_Shape=static_IR=PriorBox-1_175_Device=CPU_Config=(),8.52469e-06 -conformance_PRelu/ReadIRTest.ImportExport/Op=PRelu.1_Type=f32_Shape=static_IR=20e7e74f55eb5fb78014cce7e0665d6925bbefd708dd9ccff12dbfbea2a330dd_Device=CPU_Config=(),5.69266e-06 -conformance_RegionYolo/ReadIRTest.ImportExport/Op=RegionYolo.1_Type=f32_Shape=static_IR=RegionYolo-1_750_Device=CPU_Config=(),5.06332e-06 -conformance_Add/ReadIRTest.ImportExport/Op=Add.1_Type=i32_Shape=static_IR=28f23780d4ca0d40671caf79d5cd9223ad8f6dc2fa5ade2521f3d99586eeeb7f_Device=CPU_Config=(),9.72615e-07 -conformance_Convolution/ReadIRTest.Inference/Op=Convolution.1_Type=f32_Shape=dynamic_IR=c301804445f273eef62f41f02204711d9d6e571da28c76ab447d7d90983b0032_Device=CPU_Config=(),0.000113281 -conformance/OpImplCheckTest.checkPluginImplementation/Function=Multinomial_opset13_Device=CPU_Config=(),1 -conformance/OpImplCheckTest.checkPluginImplementation/Function=LSTMSequence_opset1_Device=CPU_Config=(),1 \ No newline at end of file From ab8dc10b20baf70b6a2411a849e3aa51bcdc58e3 Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Fri, 12 Jan 2024 18:37:07 +0400 Subject: [PATCH 30/43] [GHA] Update MO deps (#22130) * [GHA] Update MO deps Signed-off-by: Kazantsev, Roman * Update .github/components.yml --------- Signed-off-by: Kazantsev, Roman --- .github/components.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/components.yml b/.github/components.yml index 77311e4e332983..5431054cf11c6f 100644 --- a/.github/components.yml +++ b/.github/components.yml @@ -199,6 +199,9 @@ IE_Tests: - IR_FE MO: + revalidate: + - PyTorch_FE + - TF_FE build: - Python_API From a30103869465de561fbb27f6bfe7752563b1bb43 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 12 Jan 2024 18:55:02 +0400 Subject: [PATCH 31/43] Updated build docs for Python API (#22134) * Updated build docs for Python API * Removed legacy variables * Update docs/dev/build_windows.md Co-authored-by: Sebastian Golebiewski --------- Co-authored-by: Sebastian Golebiewski --- cmake/packaging/rpm.cmake | 1 - docs/dev/build_linux.md | 12 ++++-------- docs/dev/build_raspbian.md | 3 +-- docs/dev/build_windows.md | 10 +++------- src/bindings/python/CMakeLists.txt | 1 - 5 files changed, 8 insertions(+), 19 deletions(-) diff --git a/cmake/packaging/rpm.cmake b/cmake/packaging/rpm.cmake index e6bb1aca1c50e0..c36610ce8c749f 100644 --- a/cmake/packaging/rpm.cmake +++ b/cmake/packaging/rpm.cmake @@ -276,7 +276,6 @@ macro(ov_cpack_settings) ov_rpm_add_rpmlint_suppression("${python_component}" # all directories "non-standard-dir-perm /usr/lib64/${pyversion}/site-packages/openvino/*" - "non-standard-dir-perm /usr/lib64/${pyversion}/site-packages/ngraph/*" ) endif() diff --git a/docs/dev/build_linux.md b/docs/dev/build_linux.md index eb98cb2c744ac6..395a2dcb139ec8 100644 --- a/docs/dev/build_linux.md +++ b/docs/dev/build_linux.md @@ -51,7 +51,7 @@ The software was validated on: 4. OpenVINO Runtime uses a CMake-based build system. In the created `build` directory, run `cmake` to fetch project dependencies and create Unix makefiles, then run `make` to build the project: ```sh cmake -DCMAKE_BUILD_TYPE=Release .. - make --jobs=$(nproc --all) + cmake --build . --parallel ``` The process may take some time to finish. @@ -68,19 +68,15 @@ You can use the following additional build options: - OpenVINO offers several CMake options that can be used to build a custom OpenVINO runtime, which can speed up compilation. These options allow you to skip compilation of other plugins and frontends by disabling/enabling them. You can find a list of available options [here](https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/cmake_options_for_custom_compilation.md) - To build the OpenVINO Runtime Python API: - 1. Install all additional packages (e.g., cython and opencv) listed in the `/src/bindings/python/src/compatibility/openvino/requirements-dev.txt` file: - ```sh - pip install -r requirements-dev.txt - ``` - 2. Enable the `-DENABLE_PYTHON=ON` option in the CMake step above (Step 4). To specify an exact Python version, use the following options (requires cmake 3.16 and higher): + 1. Enable the `-DENABLE_PYTHON=ON` option in the CMake step above (Step 4). To specify an exact Python version, use the following options (requires cmake 3.16 and higher): ``` -DPython3_EXECUTABLE=/usr/bin/python3.8 ``` - 3. To build a wheel package (.whl), enable the `-DENABLE_WHEEL=ON` option in the CMake step above (Step 4), and install requirements: + 2. To build a wheel package (.whl), enable the `-DENABLE_WHEEL=ON` option in the CMake step above (Step 4), and install requirements: ```sh pip install -r /src/bindings/python/wheel/requirements-dev.txt ``` - 4. After the build process finishes, export the newly built Python libraries to the user environment variables: + 3. After the build process finishes, export the newly built Python libraries to the user environment variables: ``` export PYTHONPATH=/bin/intel64/Release/python:$PYTHONPATH export LD_LIBRARY_PATH=/bin/intel64/Release:$LD_LIBRARY_PATH diff --git a/docs/dev/build_raspbian.md b/docs/dev/build_raspbian.md index 6b81e9d3c446fb..3fcf07f2cddde5 100644 --- a/docs/dev/build_raspbian.md +++ b/docs/dev/build_raspbian.md @@ -39,8 +39,7 @@ git clone --recurse-submodules --single-branch --branch=master https://github.co ## Additional Build Options - To build Python API, install `libpython3-dev:armhf` and `python3-pip` - packages using `apt-get`; then install `numpy` and `cython` python modules - via `pip3`, adding the following options: + packages using `apt-get`; add the following options: ```sh -DENABLE_PYTHON=ON \ -DPython3_EXECUTABLE=/usr/bin/python3.8 diff --git a/docs/dev/build_windows.md b/docs/dev/build_windows.md index bd326803513767..a8f906cadcd0a8 100644 --- a/docs/dev/build_windows.md +++ b/docs/dev/build_windows.md @@ -51,19 +51,15 @@ Supported configurations: ### Additional Build Options - To build the OpenVINO Runtime Python API: - 1. First, install all additional packages (e.g., cython) listed in the file: - ```sh - pip install -r \src\bindings\python\src\compatibility\openvino\requirements-dev.txt - ``` - 2. Second, enable the `-DENABLE_PYTHON=ON` in the CMake (Step #3) option above. To specify an exact Python version, use the following options (requires cmake 3.16 and higher): + 1. Enable the `-DENABLE_PYTHON=ON` in the CMake (Step #3) option above. To specify an exact Python version, use the following options (requires cmake 3.16 and higher): ```sh -DPython3_EXECUTABLE="C:\Program Files\Python11\python.exe" ``` - 3. To build a wheel package (.whl), enable the `-DENABLE_WHEEL=ON` option in the CMake step above (Step 4), and install requirements: + 2. To build a wheel package (.whl), enable the `-DENABLE_WHEEL=ON` option in the CMake step above (Step 4), and install requirements: ```sh pip install -r \src\bindings\python\wheel\requirements-dev.txt ``` - 4. After the build process finishes, export the newly built Python libraries to the user environment variables: + 3. After the build process finishes, export the newly built Python libraries to the user environment variables: ``` set PYTHONPATH=/bin//Release/python;%PYTHONPATH% set OPENVINO_LIB_PATHS=/bin//Release;%OPENVINO_LIB_PATH% diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index efd9c2ea664502..ec4661e2a73cc0 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -281,7 +281,6 @@ macro(ov_define_setup_py_dependencies) list(APPEND ov_setup_py_deps ${openvino_py_files} - ${compat_ngraph_py_files} "${CMAKE_CURRENT_SOURCE_DIR}/wheel/setup.py" "${OpenVINOPython_SOURCE_DIR}/requirements.txt" "${OpenVINOPython_SOURCE_DIR}/wheel/readme.txt" From c169a7554c646c9d9c990e86aff73be4bf07feb5 Mon Sep 17 00:00:00 2001 From: Sebastian Golebiewski Date: Fri, 12 Jan 2024 16:47:51 +0100 Subject: [PATCH 32/43] [DOCS] Removal of docs for deprecated POT (#22104) * Removal of docs for deprecated POT * pot deprecated removal * fix pot reference * fix references --------- Co-authored-by: msmykx <101244365+msmykx-intel@users.noreply.github.com> --- .../openvino_legacy_features.rst | 49 +-- .../post_training_optimization_tool.rst | 102 ----- .../pot_api_reference.rst | 404 ------------------ .../pot_cli.rst | 115 ----- .../configuration_file_description.rst | 78 ---- .../pot_cli/simplified_mode.rst | 79 ---- .../pot_examples.rst | 20 - .../pot_examples/pot_api_examples.rst | 78 ---- .../pot_example_3d_segmentation.rst | 43 -- .../pot_example_classification.rst | 42 -- .../pot_example_face_detection.rst | 50 --- .../pot_example_object_detection.rst | 41 -- .../pot_example_segmentation.rst | 46 -- .../pot_api_examples/pot_example_speech.rst | 49 --- .../pot_examples/pot_cli_example.rst | 225 ---------- .../pot_faq.rst | 118 ----- .../protecting_model.rst | 90 ---- .../quantization_best_practices.rst | 120 ------ .../saturation_issue.rst | 55 --- .../quantizing_models.rst | 196 --------- .../default_quantization_algorithm.rst | 201 --------- .../quantizing_models_with_accuracy.rst | 205 --------- .../accuracy_aware_algorithm.rst | 134 ------ .../openvino_samples/hello_classification.rst | 2 +- .../Device_Plugins/CPU.rst | 173 ++++---- .../Device_Plugins/GNA.rst | 120 +++--- .../images/default_quantization_flow.svg | 3 - .../_static/images/workflow_simple.svg | 3 - 28 files changed, 164 insertions(+), 2677 deletions(-) delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_api_reference.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/configuration_file_description.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/simplified_mode.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_3d_segmentation.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_classification.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_face_detection.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_object_detection.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_segmentation.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_faq.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/protecting_model.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices/saturation_issue.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models/default_quantization_algorithm.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy.rst delete mode 100644 docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy/accuracy_aware_algorithm.rst delete mode 100644 docs/sphinx_setup/_static/images/default_quantization_flow.svg delete mode 100644 docs/sphinx_setup/_static/images/workflow_simple.svg diff --git a/docs/articles_en/documentation/openvino_legacy_features.rst b/docs/articles_en/documentation/openvino_legacy_features.rst index 772f6721c4eb89..dfedd4ef01dd7b 100644 --- a/docs/articles_en/documentation/openvino_legacy_features.rst +++ b/docs/articles_en/documentation/openvino_legacy_features.rst @@ -14,16 +14,14 @@ Legacy Features and Components OpenVINO API 2.0 transition Open Model ZOO Apache MXNet, Caffe, and Kaldi - Post-training Optimization Tool - -Since OpenVINO has grown very rapidly in recent years, some of its features -and components have been replaced by other solutions. Some of them are still +Since OpenVINO has grown very rapidly in recent years, some of its features +and components have been replaced by other solutions. Some of them are still supported to assure OpenVINO users are given enough time to adjust their projects, -before the features are fully discontinued. +before the features are fully discontinued. -This section will give you an overview of these major changes and tell you how +This section will give you an overview of these major changes and tell you how you can proceed to get the best experience and results with the current OpenVINO offering. @@ -32,10 +30,10 @@ offering. | *New solution:* OpenVINO Runtime includes all supported components | *Old solution:* discontinuation planned for OpenVINO 2025.0 | -| OpenVINO Development Tools used to be the OpenVINO package with tools for - advanced operations on models, such as Model conversion API, Benchmark Tool, - Accuracy Checker, Annotation Converter, Post-Training Optimization Tool, - and Open Model Zoo tools. Most of these tools have been either removed, +| OpenVINO Development Tools used to be the OpenVINO package with tools for + advanced operations on models, such as Model conversion API, Benchmark Tool, + Accuracy Checker, Annotation Converter, Post-Training Optimization Tool, + and Open Model Zoo tools. Most of these tools have been either removed, replaced by other solutions, or moved to the OpenVINO Runtime package. | :doc:`See how to install Development Tools ` @@ -44,16 +42,16 @@ offering. | *New solution:* Direct model support and OpenVINO Converter (OVC) | *Old solution:* Legacy Conversion API discontinuation planned for OpenVINO 2025.0 | -| The role of Model Optimizer and later the Conversion API was largely reduced +| The role of Model Optimizer and later the Conversion API was largely reduced when all major model frameworks became supported directly. For converting model - files explicitly, it has been replaced with a more light-weight and efficient + files explicitly, it has been replaced with a more light-weight and efficient solution, the OpenVINO Converter (launched with OpenVINO 2023.1). | :doc:`See how to use OVC ` | :doc:`See how to transition from the legacy solution ` | **OpenVINO Deployment Manager** | *New solution:* the tool is no longer needed -| *Old solution:* discontinuation planned for OpenVINO 2024.0 +| *Old solution:* discontinuation planned for OpenVINO 2024.0 | | It is recommended to explore alternative deployment solutions available in OpenVINO. | :doc:`See how to deploy locally ` @@ -88,11 +86,10 @@ offering. | **Post-training Optimization Tool (POT)** | *New solution:* NNCF extended in OpenVINO 2023.0 | *Old solution:* POT discontinuation planned for 2024.0 -| +| | Neural Network Compression Framework (NNCF) now offers the same functionality as POT, - apart from its original feature set. It is currently the default tool for performing + apart from its original feature set. It is currently the default tool for performing both, post-training and quantization optimizations, while POT is considered deprecated. -| :doc:`See the deprecated POT documentation ` | :doc:`See how to use NNCF for model optimization ` | `Check the NNCF GitHub project, including documentation `__ @@ -101,7 +98,7 @@ offering. | *New solution:* API 2.0 launched in OpenVINO 2022.1 | *Old solution:* discontinuation planned for OpenVINO 2024.0 | -| API 1.0 (Inference Engine and nGraph) is now deprecated. It can still be +| API 1.0 (Inference Engine and nGraph) is now deprecated. It can still be used but is not recommended. Its discontinuation is planned for 2024. | :doc:`See how to transition to API 2.0 ` @@ -110,21 +107,21 @@ offering. | *New solution:* the tool is no longer needed | *Old solution:* deprecated in OpenVINO 2023.0 | -| Compile tool is now deprecated. If you need to compile a model for inference on +| Compile tool is now deprecated. If you need to compile a model for inference on a specific device, use the following script: .. tab-set:: - + .. tab-item:: Python :sync: py - + .. doxygensnippet:: docs/snippets/export_compiled_model.py :language: python :fragment: [export_compiled_model] - + .. tab-item:: C++ :sync: cpp - + .. doxygensnippet:: docs/snippets/export_compiled_model.cpp :language: cpp :fragment: [export_compiled_model] @@ -137,7 +134,7 @@ offering. | *New solution:* DevCloud version | *Old solution:* local distribution discontinued in OpenVINO 2022.3 | -| The stand-alone version of DL Workbench, a GUI tool for previewing and benchmarking +| The stand-alone version of DL Workbench, a GUI tool for previewing and benchmarking deep learning models, has been discontinued. You can use its cloud version: | `Intel® Developer Cloud for the Edge `__. @@ -145,7 +142,7 @@ offering. | *New solution:* Direct model support and OpenVINO Converter (OVC) | *Old solution:* discontinued in OpenVINO 2023.0 | -| OpenVINO™ Integration with TensorFlow is longer supported, as OpenVINO now features a - native TensorFlow support, significantly enhancing user experience with no need for - explicit model conversion. +| OpenVINO™ Integration with TensorFlow is longer supported, as OpenVINO now features a + native TensorFlow support, significantly enhancing user experience with no need for + explicit model conversion. diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool.rst deleted file mode 100644 index 840a53b5c237b6..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool.rst +++ /dev/null @@ -1,102 +0,0 @@ -.. {#pot_introduction} - -[Deprecated] Post-training Quantization with POT -================================================ - - -.. toctree:: - :maxdepth: 1 - :hidden: - - Quantizing Model - Quantizing Model with Accuracy Control - Quantization Best Practices - API Reference - Command-line Interface - Examples - Post-training Optimization Tool FAQ - (Experimental) Protecting Model - - - - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - -For the needs of post-training optimization, OpenVINO provides a **Post-training Optimization Tool (POT)** -which supports the **uniform integer quantization** method. This method allows moving from floating-point precision -to integer precision (for example, 8-bit) for weights and activations during inference time. It helps to reduce -the model size, memory footprint and latency, as well as improve the computational efficiency, using integer arithmetic. -During the quantization process, the model undergoes the transformation process when additional operations, that contain -quantization information, are inserted into the model. The actual transition to integer arithmetic happens at model inference. - -The post-training quantization algorithm takes samples from the representative dataset, inputs them into the network, -and calibrates the network based on the resulting weights and activation values. Once calibration is complete, -values in the network are converted to 8-bit integer format. - -While post-training quantization makes your model run faster and take less memory, it may cause a slight reduction -in accuracy. If you performed post-training quantization on your model and find that it isn’t accurate enough, -try using :doc:`Quantization-aware Training ` to increase its accuracy. - - -| **Post-Training Quantization Quick Start Examples:** -| Try out these interactive Jupyter Notebook examples to learn the POT API and see post-training quantization in action: - -* `Quantization of Image Classification Models with POT `__. -* `Object Detection Quantization with POT `__. - -Quantizing Models with POT -####################################### - -The figure below shows the post-training quantization workflow with POT. In a typical workflow, a pre-trained -model is converted to OpenVINO IR format using Model Optimizer. Then, the model is quantized with a representative dataset using POT. - -.. image:: _static/images/workflow_simple.svg - :alt: OVMS Benchmark Setup Diagram - - -Post-training Quantization Methods -+++++++++++++++++++++++++++++++++++++++ - -Depending on your needs and requirements, POT provides two quantization methods that can be used: -Default Quantization and Accuracy-aware Quantization. - - -Default Quantization ---------------------------------------- - -Default Quantization uses an unannotated dataset to perform quantization. It uses representative -dataset items to estimate the range of activation values in a network and then quantizes the network. -This method is recommended to start with, because it results in a fast and accurate model in most cases. -To quantize your model with Default Quantization, see the :doc:`Quantizing Models ` page. - -Accuracy-aware Quantization ---------------------------------------- - -Accuracy-aware Quantization is an advanced method that maintains model accuracy within a predefined -range by leaving some network layers unquantized. It uses a trade-off between speed and accuracy to meet -user-specified requirements. This method requires an annotated dataset and may require more time for quantization. -To quantize your model with Accuracy-aware Quantization, see the :doc:`Quantizing Models with Accuracy Control ` page. - -Quantization Best Practices and FAQs -+++++++++++++++++++++++++++++++++++++++ - -If you quantized your model and it isn’t accurate enough, visit the :doc:`Quantization Best Practices ` -page for tips on improving quantized performance. Sometimes, older Intel CPU generations can encounter a saturation issue when -running quantized models that can cause reduced accuracy: learn more on the :doc:`Saturation Issue Workaround ` page. - -Have more questions about post-training quantization or encountering errors using POT? Visit the -:doc:`POT FAQ ` page for answers to frequently asked questions and solutions to common errors. - - - -Additional Resources -####################################### - -* `Tutorial: Migrate quantization from POT API to NNCF API `__ -* :doc:`Post-training Quantization Examples ` -* :doc:`Quantization Best Practices ` -* :doc:`Post-training Optimization Tool FAQ ` -* :doc:`Performance Benchmarks ` - - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_api_reference.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_api_reference.rst deleted file mode 100644 index bbbf724b0431f3..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_api_reference.rst +++ /dev/null @@ -1,404 +0,0 @@ -.. {#pot_compression_api_README} - -[Deprecated] API Reference -================================= - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -Post-training Optimization Tool API provides a full set of interfaces and helpers that allow users to implement a custom optimization pipeline for various types of DL models including cascaded or compound models. Below is a full specification of this API: - -DataLoader -++++++++++++++++++++ - -.. code-block:: sh - - class openvino.tools.pot.DataLoader(config) - - -The base class for all DataLoaders. - -``DataLoader`` loads data from a dataset and applies pre-processing to them providing access to the pre-processed data -by index. - -All subclasses should override the ``__len__()`` function, which should return the size of the dataset, and ``__getitem__()``, -which supports integer indexing in the range of 0 to ``len(self)``. ``__getitem__()`` method can return data in one of the possible formats: - -.. code-block:: sh - - (data, annotation) - - -or - -.. code-block:: sh - - (data, annotation, metadata) - - -``data`` is the input that is passed to the model at inference so that it should be properly preprocessed. ``data`` can be either ``numpy.array`` object or dictionary where the key is the name of the model input and the value is ``numpy.array`` which corresponds to this input. The format of ``annotation`` should correspond to the expectations of the ``Metric`` class. ``metadata`` is an optional field that can be used to store additional information required for post-processing. - -Metric -++++++++++++++++++++ - -.. code-block:: sh - - class openvino.tools.pot.Metric() - - -An abstract class representing an accuracy metric. - -All instances should override the following properties: - -- ``value`` - returns the accuracy metric value for the last model output in a format of ``Dict[str, numpy.array]``. -- ``avg_value`` - returns the average accuracy metric over collected model results in a format of ``Dict[str, numpy.array]``. -- ``higher_better`` should return ``True`` if a higher value of the metric corresponds to better performance, otherwise, returns ``False``. Default implementation returns ``True``. - -and methods: - -- ``update(output, annotation)`` - calculates and updates the accuracy metric value using the last model output and annotation. The model output and annotation should be passed in this method. It should also contain the model-specific post-processing in case the model returns the raw output. -- ``reset()`` - resets collected accuracy metric. -- ``get_attributes()`` - returns a dictionary of metric attributes: - - .. code-block:: sh - - {metric_name: {attribute_name: value}} - - - Required attributes: - - - ``direction`` - (``higher-better`` or ``higher-worse``) a string parameter defining whether the metric value should be increased in accuracy-aware algorithms. - - ``type`` - a string representation of metric type. For example, 'accuracy' or 'mean_iou'. - -Engine -++++++++++++++++++++ - -.. code-block:: sh - - class openvino.tools.pot.Engine(config, data_loader=None, metric=None) - -Base class for all Engines. - -The engine provides model inference, statistics collection for activations and calculation of accuracy metrics for a dataset. - -*Parameters* - -- ``config`` - engine specific config. -- ``data_loader`` - ``DataLoader`` instance to iterate over dataset. -- ``metric`` - ``Metric`` instance to calculate the accuracy metric of the model. - -All subclasses should override the following methods: - -- ``set_model(model)`` - sets/resets a model. - - *Parameters* - - - ``model`` - `CompressedModel` instance for inference. - -- `predict(stats_layout=None, sampler=None, metric_per_sample=False, print_progress=False)` - performs model inference on the specified subset of data. - - *Parameters* - - - `stats_layout` - dictionary of statistic collection functions. An optional parameter. - - .. code-block:: sh - - { - 'node_name': { - 'stat_name': fn - } - } - - - `sampler` - `Sampler` instance that provides a way to iterate over the dataset. (See details below). - - `metric_per_sample` - if `Metric` is specified and this parameter is set to True, then the metric value should be - calculated for each data sample, otherwise for the whole dataset. - - `print_progress` - print inference progress. - - *Returns* - - - a tuple of dictionaries of per-sample and overall metric values if ``metric_per_sample`` is True - - .. code-block:: sh - - ( - { - 'sample_id': sample_index, - 'metric_name': metric_name, - 'result': metric_value - }, - { - 'metric_name': metric_value - } - ) - - - Otherwise, a dictionary of overall metrics. - - .. code-block:: sh - - { 'metric_name': metric_value } - - -- a dictionary of collected statistics - - .. code-block:: sh - - { - 'node_name': { - 'stat_name': [statistics] - } - } - - -Pipeline -++++++++++++++++++++ - -.. code-block:: sh - - class openvino.tools.pot.Pipeline(engine) - - -Pipeline class represents the optimization pipeline. - -*Parameters* - -- ``engine`` - instance of ``Engine`` class for model inference. - -The pipeline can be applied to the DL model by calling ``run(model)`` method where ``model`` is the ``NXModel`` instance. - -Create a pipeline --------------------- - -The POT Python* API provides the utility function to create and configure the pipeline: - -.. code-block:: sh - - openvino.tools.pot.create_pipeline(algo_config, engine) - - -*Parameters* - -- ``algo_config`` - a list defining optimization algorithms and their parameters included in the optimization pipeline. - The order in which they are applied to the model in the optimization pipeline is determined by the order in the list. - - Example of the algorithm configuration of the pipeline: - - .. code-block:: sh - - algo_config = [ - { - 'name': 'DefaultQuantization', - 'params': { - 'preset': 'performance', - 'stat_subset_size': 500 - } - }, - ... - ] - - -- ``engine`` - instance of ``Engine`` class for model inference. - -*Returns* - -- instance of the ``Pipeline`` class. - -Helpers and Internal Model Representation -######################################### - -To simplify the implementation of optimization pipelines we provide a set of ready-to-use helpers. Here we also -describe an internal representation of the DL model and how to work with it. - -IEEngine -++++++++++++++++++++ - -.. code-block:: sh - - class openvino.tools.pot.IEEngine(config, data_loader=None, metric=None) - -IEEngine is a helper which implements Engine class based on :doc:`OpenVINO™ Inference Engine Python API `. -This class support inference in synchronous and asynchronous modes and can be reused as-is in the custom pipeline or -with some modifications, e.g. in case of custom post-processing of inference results. - -The following methods can be overridden in subclasses: - -- ``postprocess_output(outputs, metadata)`` - Processes model output data using the image metadata obtained during data loading. - - *Parameters* - - - ``outputs`` - dictionary of output data per output name. - - ``metadata`` - information about the data used for inference. - - *Return* - - - list of the output data in an order expected by the accuracy metric if any is used - -``IEEngine`` supports data returned by ``DataLoader`` in the format: - -.. code-block:: sh - - (data, annotation) - - -or - -.. code-block:: sh - - (data, annotation, metadata) - - -Metric values returned by a ``Metric`` instance are expected to be in the format: - -- for ``value()``: - - .. code-block:: sh - - {metric_name: [metric_values_per_image]} - -- for ``avg_value()``: - - .. code-block:: sh - - {metric_name: metric_value} - - -In order to implement a custom ``Engine`` class you may need to get familiar with the following interfaces: - -CompressedModel -++++++++++++++++++++ - -The Python POT API provides the ``CompressedModel`` class as one interface for working with single and cascaded DL model. -It is used to load, save and access the model, in case of the cascaded model, access each model of the cascaded model. - -.. code-block:: sh - - class openvino.tools.pot.graph.nx_model.CompressedModel(**kwargs) - -The CompressedModel class provides a representation of the DL model. A single model and cascaded model can be -represented as an instance of this class. The cascaded model is stored as a list of models. - -*Properties* - -- ``models`` - list of models of the cascaded model. -- ``is_cascade`` - returns True if the loaded model is a cascaded model. - -Read model from OpenVINO IR -++++++++++++++++++++++++++++++ - -The Python POT API provides the utility function to load the model from the OpenVINO™ Intermediate Representation (IR): - -.. code-block:: sh - - openvino.tools.pot.load_model(model_config) - -*Parameters* - -- ``model_config`` - dictionary describing a model that includes the following attributes: - - ``model_name`` - model name. - - ``model`` - path to the network topology (.xml). - - ``weights`` - path to the model weights (.bin). - - Example of ``model_config`` for a single model: - - .. code-block:: sh - - model_config = { - 'model_name': 'mobilenet_v2', - 'model': '/mobilenet_v2.xml', - 'weights': '/mobilenet_v2.bin' - } - - Example of ``model_config`` for a cascaded model: - - .. code-block:: sh - - model_config = { - 'model_name': 'mtcnn', - 'cascade': [ - { - 'name': 'pnet', - "model": '/pnet.xml', - 'weights': '/pnet.bin' - }, - { - 'name': 'rnet', - 'model': '/rnet.xml', - 'weights': '/rnet.bin' - }, - { - 'name': 'onet', - 'model': '/onet.xml', - 'weights': '/onet.bin' - } - ] - } - - -*Returns* - -- ``CompressedModel`` instance - -Save a model to IR ----------------------- - -The Python POT API provides the utility function to save a model in the OpenVINO™ Intermediate Representation (IR): - -.. code-block:: sh - - openvino.tools.pot.save_model(model, save_path, model_name=None, for_stat_collection=False) - - -*Parameters* - -- ``model`` - ``CompressedModel`` instance. -- ``save_path`` - path to save the model. -- ``model_name`` - name under which the model will be saved. -- ``for_stat_collection`` - whether the model is saved to be used for statistic collection or for inference (affects only cascaded models). If set to False, removes model prefixes from node names. - -*Returns* - -- list of dictionaries with paths: - - .. code-block:: sh - - [ - { - 'name': model name, - 'model': path to .xml, - 'weights': path to .bin - }, - ... - ] - - -Sampler -++++++++++++++++++++ - -.. code-block:: sh - - class openvino.tools.pot.samplers.Sampler(data_loader=None, batch_size=1, subset_indices=None) - -Base class for all Samplers. - -Sampler provides a way to iterate over the dataset. - -All subclasses the ``__iter__()`` method, providing a way to iterate over the dataset, and a ``__len__()`` method -that returns the length of the returned iterators. - -*Parameters* - -- ``data_loader`` - instance of ``DataLoader`` class to load data. -- ``batch_size`` - number of items in batch, default is 1. -- ``subset_indices`` - indices of samples to load. If ``subset_indices`` is set to None then the sampler will take elements from the whole dataset. - -BatchSampler -++++++++++++ - -.. code-block:: sh - - class openvino.tools.pot.samplers.batch_sampler.BatchSampler(data_loader, batch_size=1, subset_indices=None): - -Sampler provides an iterable over the dataset subset if ``subset_indices`` is specified -or over the whole dataset with a given ``batch_size``. Returns a list of data items. - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli.rst deleted file mode 100644 index d893a683b2d841..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. {#pot_compression_cli_README} - -[Deprecated] Use Post-Training Optimization Tool Command-Line Interface (Model Zoo flow) -==================================================================================================== - - -.. toctree:: - :maxdepth: 1 - :hidden: - - Simplified Mode - Configuration File Description - - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - -Introduction -#################### - -POT command-line interface (CLI) is aimed at optimizing models that are similar to the models from OpenVINO `Model Zoo `__ or if there is a valid :doc:`AccuracyChecker Tool ` configuration file for the model. Examples of AccuracyChecker configuration files can be found on `GitHub `__. Each model folder contains a YAML configuration file that can be used with POT as is. - -.. note:: - - There is also a :doc:`Simplified mode ` aimed at the optimization of models from the Computer Vision domain and has a simple dataset preprocessing like image resize and crop. In this case, you can also use POT CLI for optimization. However, the accuracy results are not guaranteed in this case. Moreover, you are also limited in the optimization methods choice since the accuracy measurement is not available. - - -Run POT CLI -#################### - -There are two ways how to run POT via the command line: - -- **Basic usage for DefaultQuantization**. In this case, you can run POT with basic settings just specifying all the options via the command line. ``-q default`` stands for :doc:`DefaultQuantization ` algorithm: - - .. code-block:: sh - - pot -q default -m -w --ac-config - -- **Basic usage for AccuracyAwareQuantization**. You can also run :doc:`AccuracyAwareQuantization ` method with basic options. ``--max-drop 0.01`` option defines maximum accuracy deviation to 1 absolute percent from the original model: - - .. code-block:: sh - - pot -q accuracy_aware -m -w --ac-config --max-drop 0.01 - - -- **Advanced usage**. In this case, you should prepare a configuration file for the POT where you can specify advanced options for the optimization methods available. See :doc:`POT configuration file description ` for more details. - - To launch the command-line tool with the configuration file run: - - .. code-block:: sh - - pot -c - - -For all available usage options, use the ``-h``, ``--help`` arguments or refer to the Command-Line Arguments section below. - -By default, the results are dumped into the separate output subfolder inside the ``./results`` folder that is created -in the same directory where the tool is run from. Use the ``-e`` option to evaluate the accuracy directly from the tool. - -See also the :doc:`End-to-end example ` about how to run a particular example of 8-bit -quantization with the POT. - -Command-Line Arguments -++++++++++++++++++++++ - -The following command-line options are available to run the tool: - -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| Argument | Description | -+=====================================================+=======================================================================================================================================================================================================+ -| ``-h``, ``--help`` | Optional. Show help message and exit. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``-q``, ``--quantize`` | Quantize model to 8 bits with specified quantization method: ``default`` or ``accuracy_aware``. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--preset`` | Use ``performance`` for fully symmetric quantization or ``mixed`` preset for symmetric quantization of weight and asymmetric quantization of activations. Applicable only when ``-q`` option is used. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``-m``, ``--model`` | Path to the optimizing model file (.xml). Applicable only when ``-q`` option is used. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``-w``, ``--weights`` | Path to the weights file of the optimizing model (.bin). Applicable only when ``-q`` option is used. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``-n``, ``--name`` | Optional. Model name. Applicable only when ``-q`` option is used. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--engine {accuracy_checker, simplified}`` | Engine type used to specify CLI mode. Default: ``accuracy_checker``. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--data-source DATA_DIR`` | Optional. Valid and required for Simplified mode only. Specifies the path to calibration data. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--ac-config`` | Path to the Accuracy Checker configuration file. Applicable only when ``-q`` option is used. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--max-drop`` | Optional. Maximum accuracy drop. Valid only for accuracy-aware quantization. Applicable only when ``-q`` option is used and the ``accuracy_aware`` method is selected. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``-c CONFIG``, ``--config CONFIG`` | Path to a config file with task- or model-specific parameters. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``-e``, ``--evaluate`` | Optional. Evaluate the model on the whole dataset after optimization. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--output-dir OUTPUT_DIR`` | Optional. A directory where results are saved. Default: ``./results``. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``-sm``, ``--save-model`` | Optional. Save the original full-precision model. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``-d``, ``--direct-dump`` | Optional. Save results to the "optimized" subfolder within the specified output directory with no additional subpaths added at the end. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--log-level {CRITICAL,ERROR,WARNING,INFO,DEBUG}`` | Optional. Log level to print. Default: INFO. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--progress-bar`` | Optional. Disable CL logging and enable the progress bar. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--stream-output`` | Optional. Switch model quantization progress display to a multiline mode. Use with third-party components. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``--keep-uncompressed-weights`` | Optional. Keep Convolution, Deconvolution and FullyConnected weights uncompressed. Use with third-party components. | -+-----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - -See Also -#################### - -* :doc:`Optimization with Simplified mode ` -* :doc:`Post-Training Optimization Best Practices ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/configuration_file_description.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/configuration_file_description.rst deleted file mode 100644 index d4f41bc14e1397..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/configuration_file_description.rst +++ /dev/null @@ -1,78 +0,0 @@ -.. {#pot_configs_README} - -[Deprecated] Configuration File Description -============================================== - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -The tool is designed to work with the configuration file where all the parameters required for the optimization are specified. These parameters are organized as a dictionary and stored in -a JSON file. JSON file allows using comments that are supported by the ``jstyleson`` Python package. -Logically all parameters are divided into three groups: - -- **Model parameters** that are related to the model definition (e.g. model name, model path, etc.) -- **Engine parameters** that define parameters of the engine which is responsible for the model inference and data preparation used for optimization and evaluation (e.g. preprocessing parameters, dataset path, etc.) -- **Compression parameters** that are related to the optimization algorithm (e.g. algorithm name and specific parameters) - -Model Parameters -#################### - -.. code-block:: json - - "model": { - "model_name": "model_name", - "model": "", - "weights": "" - } - - -This section contains only three parameters: - -- ``"model_name"`` - string parameter that defines a model name, e.g. ``"MobileNetV2"`` -- ``"model"`` - string parameter that defines the path to an input model topology (.xml) -- ``"weights"`` - string parameter that defines the path to an input model weights (.bin) - -Engine Parameters -#################### - -.. code-block:: json - - "engine": { - "type": "accuracy_checker", - "config": "./configs/examples/accuracy_checker/mobilenet_v2.yaml" - } - - -The main parameter is ``"type"`` which can take two possible options: ``"accuracy_checher"`` (default) or ``"simplified"``. It specifies the engine used for model inference and validation (if supported): - -- **Simplified mode** engines. These engines can be used only with the ``DefaultQuantization`` algorithm to get a fully quantized model. They do not use the Accuracy Checker tool and annotation. In this case, the following parameters are applicable: - - - ``"data_source"`` specifies the path to the directory​ where the calibration data is stored. - - ``"layout"`` - (Optional) Layout of input data. Supported values: [``"NCHW"``, ``"NHWC"``, ``"CHW"``, ``"CWH"``]​. - -- **Accuracy Checker** engine. It relies on the :doc:`Deep Learning Accuracy Validation Framework ` (Accuracy Checker) when inferencing DL models and working with datasets. - -If you have annotations, you can benefit from this mode by measuring accuracy. When this mode is selected, you can use the accuracy-aware algorithms family. -There are two options to define engine parameters in this mode: - -- Refer to the existing Accuracy Checker configuration file which is represented by the YAML file. It can be a file used for full-precision model validation. In this case, you should define only the ``"config"`` parameter containing the path to the AccuracyChecker configuration file. -- Define all the :doc:`required Accuracy Checker parameters ` directly in the JSON file. In this case, POT just passes the corresponding dictionary of parameters to the Accuracy Checker when instantiating it. For more details, refer to the corresponding Accuracy Checker information and examples of configuration files provided with the tool: 8-bit quantization of `SSD-MobileNet model `__ - -Compression Parameters -###################### - -For more details on the parameters of a particular optimization algorithm, see descriptions of :doc:`Default Quantization ` and :doc:`Accuracy-aware Quantizatoin ` methods. - -Examples of the Configuration File -################################## - -For a quick start, many examples of configuration files are provided `here `__. -There, you can find ready-to-use configurations for the models from various domains: Computer Vision (Image Classification, Object Detection, Segmentation), Natural Language Processing, and Recommendation Systems. We put configuration files for the models which require non-default configuration settings to get accurate results. - -For details on how to run the Post-Training Optimization Tool with a sample configuration file, see the :doc:`example `. - -Additional Resources -#################### - -* :doc:`Optimization with Simplified mode ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/simplified_mode.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/simplified_mode.rst deleted file mode 100644 index de5fccd0af7b54..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_cli/simplified_mode.rst +++ /dev/null @@ -1,79 +0,0 @@ -.. {#pot_docs_simplified_mode} - -[Deprecated] Optimization with Simplified Mode -==================================================== - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -Introduction -#################### - -Simplified mode is designed to make data preparation for the model optimization process easier. The mode is represented by an implementation of the Engine interface from the POT API. It allows reading the data from an arbitrary folder specified by the user. For more details about POT API, refer to the corresponding :doc:`description `. Currently, Simplified mode is available only for image data in PNG or JPEG formats, stored in a single folder. It supports Computer Vision models with a single input or two inputs where the second is "image_info" (Faster R-CNN, Mask R-CNN, etc.). - -.. note:: - - This mode cannot be used with accuracy-aware methods. There is no way to control accuracy after optimization. Nevertheless, this mode can be helpful to estimate performance benefits when using model optimizations. - -Usage -#################### - -To use the Simplified mode, prepare the data and place it in a separate folder. No other files should be present in this folder. - -To apply optimization when there is only a model and no data is available. It is possible to generate a synthetic dataset using Dataset Management Framework (Datumaro) available on `GitHub `__. Currently, data generation is available only for Computer Vision models, it can take time in some cases. - -Install Datumaro: - -.. code-block:: sh - - pip install datumaro - - -Create a synthetic dataset with elements of the specified type and shape, and save it to the provided directory. - -Usage: - -.. code-block:: sh - - datum generate [-h] -o OUTPUT_DIR -k COUNT --shape SHAPE [SHAPE ...] - [-t {image}] [--overwrite] [--model-dir MODEL_PATH] - - -Example of generating 300 images with height = 224 and width = 256 and saving them in the ``./dataset`` directory. - -.. code-block:: sh - - datum generate -o ./dataset -k 300 --shape 224 256 - - -After that, ``OUTPUT_DIR`` can be provided to the ``--data-source`` CLI option or to the ``data_source`` config parameter. - -There are two options to run POT in the Simplified mode: - -* Using command-line options only. Here is an example of 8-bit quantization: - - ``pot -q default -m -w --engine simplified --data-source `` - -* To provide more options, use the corresponding `"engine"` section in the POT configuration file as follows: - - .. code-block:: json - - "engine": { - "type": "simplified", - "layout": "NCHW", // Layout of input data. Supported ["NCHW", - // "NHWC", "CHW", "CWH"] layout - "data_source": "PATH_TO_SOURCE" // You can specify a path to the directory with images - // Also you can specify template for file names to filter images to load. - // Templates are unix style (this option is valid only in Simplified mode) - } - - -A template of the configuration file for 8-bit quantization using Simplified mode can be found `at the following link `__. - -For more details about POT usage via CLI, refer to this :doc:`CLI document `. - -Additional Resources -#################### - -* :doc:`Configuration File Description ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples.rst deleted file mode 100644 index 8d0cb6f0444c46..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. {#pot_examples_description} - -[Deprecated] Examples -======================== - - -.. toctree:: - :maxdepth: 1 - :hidden: - - API Examples - Command-line Example - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - -This section provides a set of examples that demonstrate how to apply the post-training optimization methods to optimize various models from different domains. It contains optimization recipes for concrete models, that unnecessarily cover your case, but which should be sufficient to reuse these recipes to optimize custom models: - -- :doc:`API Examples ` -- :doc:`Command-line Example ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples.rst deleted file mode 100644 index f4eea83bed5b07..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples.rst +++ /dev/null @@ -1,78 +0,0 @@ -.. {#pot_example_README} - -[Deprecated] Post-training Optimization Tool API Examples -=============================================================== - - -.. toctree:: - :maxdepth: 1 - :hidden: - - Quantizing Image Classification Model - Quantizing Object Detection Model with Accuracy Control - Quantizing Cascaded Model - Quantizing Semantic Segmentation Model - Quantizing 3D Segmentation Model - Quantizing for GNA Device - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -The Post-training Optimization Tool contains multiple examples that demonstrate how to use its :doc:`API ` -to optimize DL models. All available examples can be found on `GitHub `__. - -The following examples demonstrate the implementation of ``Engine``, ``Metric``, and ``DataLoader`` interfaces for various use cases: - -1. :doc:`Quantizing Image Classification model ` - - - Uses a single ``MobilenetV2`` model from TensorFlow - - Implements ``DataLoader`` to load .JPEG images and annotations of the Imagenet database - - Implements ``Metric`` interface to calculate Accuracy at top-1 metric - - Uses DefaultQuantization algorithm for quantization model - -2. :doc:`Quantizing Object Detection Model with Accuracy Control ` - - - Uses asingle ``MobileNetV1 FPN`` model from TensorFlow - - Implements ``Dataloader`` to load images of the COCO database - - Implements ``Metric`` interface to calculate ``mAP@[.5:.95]`` metric - - Uses ``AccuracyAwareQuantization`` algorithm for quantization model - -3. :doc:`Quantizing Semantic Segmentation Model ` - - - Uses a single ``DeepLabV3`` model from TensorFlow - - Implements ``DataLoader`` to load .JPEG images and annotations of the Pascal VOC 2012 database - - Implements ``Metric`` interface to calculate Mean Intersection Over Union metric - - Uses DefaultQuantization algorithm for quantization model - -4. :doc:`Quantizing 3D Segmentation Model ` - - - Uses a single ``Brain Tumor Segmentation`` model from PyTorch - - Implements ``DataLoader`` to load images in NIfTI format from the Medical Segmentation Decathlon BRATS 2017 database - - Implements ``Metric`` interface to calculate Dice Index metric - - Demonstrates how to use image metadata obtained during data loading to post-process the raw model output - - Uses DefaultQuantization algorithm for quantization model - -5. :doc:`Quantizing Cascaded model ` - - - Uses a cascaded (composite) ``MTCNN`` model from Caffe that consists of three separate models in an OpenVINO™ Intermediate Representation (IR) - - Implements ``Dataloader`` to load .jpg images of the WIDER FACE database - - Implements ``Metric`` interface to calculate Recall metric - - Implements ``Engine`` class that is inherited from ``IEEngine`` to create a complex staged pipeline to sequentially execute each of the three stages of the MTCNN model, represented by multiple models in IR. It uses engine helpers to set a model in OpenVINO Inference Engine and process raw model output for the correct statistics collection - - Uses DefaultQuantization algorithm for quantization model - -6. :doc:`Quantizing for GNA Device ` - - - Uses models from Kaldi - - Implements ``DataLoader`` to load data in .ark format - - Uses DefaultQuantization algorithm for quantization model - -After the execution of each example above, the quantized model is placed into the folder ``optimized``. The accuracy validation of the quantized model is performed right after the quantization. - -See the tutorials -#################### - -* `Quantization of Image Classification model `__ -* `Quantization of Object Detection model from Model Zoo `__ -* `Quantization of Segmentation model for medical data `__ -* `Quantization of BERT for Text Classification `__ - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_3d_segmentation.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_3d_segmentation.rst deleted file mode 100644 index a80701ad9c54aa..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_3d_segmentation.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. {#pot_example_3d_segmentation_README} - -[Deprecated] Quantizing 3D Segmentation Model -================================================================ - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -This example demonstrates the use of the :doc:`Post-training Optimization Tool API ` for the task of quantizing a 3D segmentation model. -The `Brain Tumor Segmentation `__ model from PyTorch is used for this purpose. A custom ``DataLoader`` is created to load images in NIfTI format from the `Medical Segmentation Decathlon BRATS 2017 `__ dataset for 3D semantic segmentation task and the implementation of the Dice Index metric is used for the model evaluation. In addition, this example demonstrates how one can use image metadata obtained during image reading and preprocessing to post-process the model raw output. The code of the example is available on `GitHub `__. - -How to Prepare the Data -####################### - -To run this example, you will need to download the Brain Tumors 2017 part of the Medical Segmentation Decathlon image database http://medicaldecathlon.com/. -3D MRI data in NIfTI format can be found in the ``imagesTr`` folder, and segmentation masks are in ``labelsTr``. - -How to Run the Example -###################### - -1. Launch :doc:`Model Downloader ` tool to download ``brain-tumor-segmentation-0002`` model from the Open Model Zoo repository. - - .. code-block:: sh - - omz_downloader --name brain-tumor-segmentation-0002 - - -2. Launch :doc:`Model Converter ` tool to generate Intermediate Representation (IR) files for the model: - - .. code-block:: sh - - omz_converter --name brain-tumor-segmentation-0002 - - -3. Launch the example script from the example directory: - - .. code-block:: sh - - python3 ./3d_segmentation_example.py -m -d --mask-dir - - - Optional: you can specify .bin file of IR directly using the ``-w``, ``--weights`` options. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_classification.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_classification.rst deleted file mode 100644 index 6bf8d2ec310447..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_classification.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. {#pot_example_classification_README} - -[Deprecated] Quantizing Image Classification Model -======================================================== - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -This example demonstrates the use of the :doc:`Post-training Optimization Tool API ` for the task of quantizing a classification model. -The `MobilenetV2 `__ model from TensorFlow is used for this purpose. -A custom ``DataLoader`` is created to load the `ImageNet `__ classification dataset and the implementation of Accuracy at top-1 metric is used for the model evaluation. The code of the example is available on `GitHub `__. - -How to Prepare the Data -####################### - -To run this example, you need to `download `__ the validation part of the ImageNet image database and place it in a separate folder, -which will be later referred to as ````. Annotations to images should be stored in a separate .txt file (````) in the format ``image_name label``. - - -How to Run the Example -###################### - -1. Launch :doc:`Model Downloader ` tool to download ``mobilenet-v2-1.0-224`` model from the Open Model Zoo repository. - - .. code-block:: sh - - omz_downloader --name mobilenet-v2-1.0-224 - -2. Launch :doc:`Model Converter ` tool to generate Intermediate Representation (IR) files for the model: - - .. code-block:: sh - - omz_converter --name mobilenet-v2-1.0-224 --mo /mo.py - -3. Launch the example script from the example directory: - - .. code-block:: sh - - python3 ./classification_sample.py -m -a -d - - Optional: you can specify .bin file of IR directly using the ``-w``, ``--weights`` options. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_face_detection.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_face_detection.rst deleted file mode 100644 index 2f3964bbcd44e9..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_face_detection.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. {#pot_example_face_detection_README} - -[Deprecated] Quantizing Cascaded Face detection Model -============================================================ - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -This example demonstrates the use of the :doc:`Post-training Optimization Tool API ` for the task of quantizing a face detection model. -The `MTCNN `__ model from Caffe is used for this purpose. -A custom ``DataLoader`` is created to load the `WIDER FACE `__ dataset for a face detection task -and the implementation of Recall metric is used for the model evaluation. In addition, this example demonstrates how one can implement -an engine to infer a cascaded (composite) model that is represented by multiple submodels in an OpenVINO™ Intermediate Representation (IR) -and has a complex staged inference pipeline. The code of the example is available on `GitHub `__. - -How to Prepare the Data -####################### - -To run this example, you need to download the validation part of the Wider Face dataset http://shuoyang1213.me/WIDERFACE/. -Images with faces divided into categories are placed in the ``WIDER_val/images`` folder. -Annotations in .txt format containing the coordinates of the face bounding boxes of the -validation part of the dataset can be downloaded separately and are located in the ``wider_face_split/wider_face_val_bbx_gt.txt`` file. - -How to Run the Example -###################### - -1. Launch :doc:`Model Downloader ` tool to download ``mtcnn`` model from the Open Model Zoo repository. - - .. code-block:: sh - - omz_downloader --name mtcnn* - - -2. Launch :doc:`Model Converter ` tool to generate Intermediate Representation (IR) files for the model: - - .. code-block:: sh - - omz_converter --name mtcnn* --mo /mo.py - - -3. Launch the example script from the example directory: - - .. code-block:: sh - - python3 ./face_detection_example.py -pm - -rm -om -d -a - - - Optional: you can specify .bin files of corresponding IRs directly using the ``-pw/--pnet-weights``, ``-rw/--rnet-weights`` and ``-ow/--onet-weights`` options. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_object_detection.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_object_detection.rst deleted file mode 100644 index 860d60058d1ba5..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_object_detection.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. {#pot_example_object_detection_README} - -[Deprecated] Quantizing Object Detection Model with Accuracy Control -================================================================================ - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -This example demonstrates the use of the :doc:`Post-training Optimization Toolkit API ` to quantize an object detection model in the :doc:`accuracy-aware mode `. The `MobileNetV1 FPN `__ model from TensorFlow for object detection task is used for this purpose. A custom ``DataLoader`` is created to load the `COCO `__ dataset for object detection task and the implementation of mAP COCO is used for the model evaluation. The code of the example is available on `GitHub `__. - -How to prepare the data -####################### - -To run this example, you will need to download the validation part of the `COCO `__. The images should be placed in a separate folder, which will be later referred to as ```` and the annotation file ``instances_val2017.json`` later referred to as ````. - -How to Run the example -###################### - -1. Launch :doc:`Model Downloader ` tool to download ``ssd_mobilenet_v1_fpn_coco`` model from the Open Model Zoo repository. - - .. code-block:: sh - - omz_downloader --name ssd_mobilenet_v1_fpn_coco - - -2. Launch :doc:`Model Converter ` tool to generate Intermediate Representation (IR) files for the model: - - .. code-block:: sh - - omz_converter --name ssd_mobilenet_v1_fpn_coco --mo /mo.py - - -3. Launch the example script from the example directory: - - .. code-block:: sh - - python ./object_detection_example.py -m -d --annotation-path - - -* Optional: you can specify .bin file of IR directly using the ``-w``, ``--weights`` options. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_segmentation.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_segmentation.rst deleted file mode 100644 index 5c6cf59696203f..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_segmentation.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. {#pot_example_segmentation_README} - -[Deprecated] Quantizing Semantic Segmentation Model -============================================================= - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -This example demonstrates the use of the :doc:`Post-training Optimization Tool API ` for the task of quantizing a segmentation model. -The `DeepLabV3 ` model from TensorFlow is used for this purpose. -A custom `DataLoader` is created to load the `Pascal VOC 2012 `__ dataset for semantic segmentation task -and the implementation of Mean Intersection Over Union metric is used for the model evaluation. The code of the example is available on `GitHub `__. - -How to Prepare the Data -####################### - -To run this example, you will need to download the validation part of the Pascal VOC 2012 image database http://host.robots.ox.ac.uk/pascal/VOC/voc2012/#data. -Images are placed in the ``JPEGImages`` folder, ImageSet file with the list of image names for the segmentation task can be found at ``ImageSets/Segmentation/val.txt`` -and segmentation masks are kept in the ``SegmentationClass`` directory. - -How to Run the Example -###################### - -1. Launch :doc:`Model Downloader ` tool to download ``deeplabv3`` model from the Open Model Zoo repository. - - .. code-block:: sh - - omz_downloader --name deeplabv3 - - -2. Launch :doc:`Model Converter ` tool to generate Intermediate Representation (IR) files for the model: - - .. code-block:: sh - - omz_converter --name deeplabv3 --mo /mo.py - - -3. Launch the example script from the example directory: - - .. code-block:: sh - - python3 ./segmentation_example.py -m -d --imageset-file --mask-dir - - - Optional: you can specify .bin file of IR directly using the ``-w``, ``--weights`` options. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.rst deleted file mode 100644 index 9a1ddc1dee9ade..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_api_examples/pot_example_speech.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. {#pot_example_speech_README} - -[Deprecated] Quantizing for GNA Device -========================================= - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -This example demonstrates the use of the :doc:`Post-training Optimization Tool API ` for the task of quantizing a speech model for :doc:`GNA ` device. Quantization for GNA is different from CPU quantization due to device specifics: GNA supports quantized inputs in INT16 and INT32 (for activations) precision and quantized weights in INT8 and INT16 precision. - -This example contains pre-selected quantization options based on the DefaultQuantization algorithm and created for models from `Kaldi `__ framework, and its data format. -A custom ``ArkDataLoader`` is created to load the dataset from files with .ark extension for speech analysis task. - -How to Prepare the Data -####################### - -To run this example, you will need to use the .ark files for each model input from your ````. -For generating data from original formats to .ark, please, follow the `Kaldi data preparation tutorial `__. - -How to Run the Example -###################### - -1. Launch :doc:`model conversion API ` with the necessary options (for details follow the :doc:`instructions for Kaldi ` to generate Intermediate Representation (IR) files for the model: - - .. code-block:: sh - - mo --input_model [MODEL_CONVERSION_API_PARAMETERS] - - -2. Launch the example script: - - .. code-block:: sh - - python3 /api/examples/speech/gna_example.py -m -w -d --input_names [LIST_OF_MODEL_INPUTS] --files_for_input [LIST_OF_INPUT_FILES] - - - Required parameters: - - - ``-i``, ``--input_names`` option. Defines the list of model inputs; - - ``-f``, ``--files_for_input`` option. Defines the list of filenames (.ark) mapped with input names. You should define names without extension, for example: FILENAME_1, FILENAME_2 maps with INPUT_1, INPUT_2. - - Optional parameters: - - - ``-p``, ``--preset`` option. Defines preset for quantization: ``performance`` for INT8 weights, ``accuracy`` for INT16 weights; - - ``-s``, ``--subset_size`` option. Defines subset size for calibration; - - ``-o``, ``--output`` option. Defines output folder for the quantized model. - -3. Validate your INT8 model using ``./speech_example`` from the Inference Engine examples. Follow the :doc:`speech example description link ` for details. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.rst deleted file mode 100644 index 0fc5f881f7cafe..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_examples/pot_cli_example.rst +++ /dev/null @@ -1,225 +0,0 @@ -.. {#pot_configs_examples_README} - -[Deprecated] End-to-end Command-line Interface Example -========================================================= - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -This tutorial describes an example of running post-training quantization for the **MobileNet v2 model from PyTorch** framework, -particularly by the DefaultQuantization algorithm. -The example covers the following steps: - -- Environment setup -- Model preparation and converting it to the OpenVINO™ Intermediate Representation (IR) format -- Performance benchmarking of the original full-precision model -- Dataset preparation -- Accuracy validation of the full-precision model in the IR format -- Model quantization by the DefaultQuantization algorithm and accuracy validation of the quantized model -- Performance benchmarking of the quantized model - -All the steps are based on the tools and samples of configuration files distributed with the Intel® Distribution of OpenVINO™ toolkit. - -The example has been verified in Ubuntu 18.04 Operating System with Python 3.6 installed. - -In case of issues while running the example, refer to :doc:`POT Frequently Asked Questions ` for help. - -Model Preparation -#################### - -1. Navigate to ````. - -2. Download the MobileNet v2 PyTorch model using :doc:`Model Downloader ` tool from the Open Model Zoo repository: - - .. code-block:: sh - - omz_downloader --name mobilenet-v2-pytorch - - - After that, the original full-precision model is located in ``/public/mobilenet-v2-pytorch/``. - -3. Convert the model to the OpenVINO™ Intermediate Representation (IR) format using :doc:`Model Converter ` tool: - - .. code-block:: sh - - omz_converter --name mobilenet-v2-pytorch - - - After that, the full-precision model in the IR format is located in ``/public/mobilenet-v2-pytorch/FP32/``. - -For more information about Model Conversion API, refer to its :doc:`documentation `. - -Performance Benchmarking of Full-Precision Models -################################################# - -Check the performance of the full-precision model in the IR format using :doc:`Deep Learning Benchmark ` tool: - -.. code-block:: sh - - benchmark_app -m /public/mobilenet-v2-pytorch/FP32/mobilenet-v2-pytorch.xml - -Note that the results might be different depending on the characteristics of your machine. On a machine with Intel® Core™ i9-10920X CPU @ 3.50GHz it is like: - -.. code-block:: sh - - Latency: 4.14 ms - Throughput: 1436.55 FPS - - -Dataset Preparation -#################### - -To perform the accuracy validation as well as quantization of a model, the dataset should be prepared. This example uses a real dataset called ImageNet. - -To download images: - -1. Go to the `ImageNet `__ homepage. -2. If you do not have an account, click the ``Signup`` button in the right upper corner, provide your data, and wait for a confirmation email. -3. Log in after receiving the confirmation email or if you already have an account. Go to the ``Download`` tab. -4. Select ``Download Original Images``. -5. You will be redirected to the ``Terms of Access`` page. If you agree to the Terms, continue by clicking ``Agree and Sign``. -6. Click one of the links in the ``Download as one tar file`` section. -7. Unpack the downloaded archive into ``/ImageNet/``. - -Note that the registration process might be quite long. - -Note that the ImageNet size is 50 000 images and takes around 6.5 GB of disk space. - -To download the annotation file: - -1. Download `archive `__. -2. Unpack ``val.txt`` from the archive into ``/ImageNet/``. - -After that, the ``/ImageNet/`` dataset folder should have a lot of image files like ``ILSVRC2012_val_00000001.JPEG`` and the ``val.txt`` annotation file. - -Accuracy Validation of Full-Precision Model in IR Format -######################################################## - -1. Create a new file in ```` and name it ``mobilenet_v2_pytorch.yaml``. This is the Accuracy Checker configuration file. - -2. Put the following text into ``mobilenet_v2_pytorch.yaml`` : - - .. code-block:: sh - - models: - - name: mobilenet-v2-pytorch - - launchers: - - framework: dlsdk - device: CPU - adapter: classification - - datasets: - - name: classification_dataset - data_source: ./ImageNet - annotation_conversion: - converter: imagenet - annotation_file: ./ImageNet/val.txt - reader: pillow_imread - - preprocessing: - - type: resize - size: 256 - aspect_ratio_scale: greater - use_pillow: True - - type: crop - size: 224 - use_pillow: True - - type: bgr_to_rgb - - metrics: - - name: accuracy@top1 - type: accuracy - top_k: 1 - - - name: accuracy@top5 - type: accuracy - top_k: 5 - - - where ``data_source: ./ImageNet`` is the dataset and ``annotation_file: ./ImageNet/val.txt`` - is the annotation file prepared in the previous step. For more information about - the Accuracy Checker configuration file refer to :doc:`Accuracy Checker Tool documentation `. - -3. Evaluate the accuracy of the full-precision model in the IR format by executing the following command in ```` : - - .. code-block:: sh - - accuracy_check -c mobilenet_v2_pytorch.yaml -m ./public/mobilenet-v2-pytorch/FP32/ - - - The actual result should be like **71.81%** of the accuracy top-1 metric on VNNI-based CPU. - Note that the results might be different on CPUs with different instruction sets. - - -Model Quantization -#################### - -1. Create a new file in ```` and name it ``mobilenet_v2_pytorch_int8.json``. This is the POT configuration file. - -2. Put the following text into ``mobilenet_v2_pytorch_int8.json`` : - - .. code-block:: sh - - { - "model": { - "model_name": "mobilenet-v2-pytorch", - "model": "./public/mobilenet-v2-pytorch/FP32/mobilenet-v2-pytorch.xml", - "weights": "./public/mobilenet-v2-pytorch/FP32/mobilenet-v2-pytorch.bin" - }, - "engine": { - "config": "./mobilenet_v2_pytorch.yaml" - }, - "compression": { - "algorithms": [ - { - "name": "DefaultQuantization", - "params": { - "preset": "mixed", - "stat_subset_size": 300 - } - } - ] - } - } - - - where ``"model": "./public/mobilenet-v2-pytorch/FP32/mobilenet-v2-pytorch.xml"`` and - ``"weights": "./public/mobilenet-v2-pytorch/FP32/mobilenet-v2-pytorch.bin"`` specify - the full-precision model in the IR format, ``"config": "./mobilenet_v2_pytorch.yaml"`` - is the Accuracy Checker configuration file, and ``"name": "DefaultQuantization"`` is the algorithm name. - -3. Perform model quantization by executing the following command in ```` : - - .. code-block:: sh - - pot -c mobilenet_v2_pytorch_int8.json -e - - - The quantized model is placed into the subfolder with your current date and time - in the name under the ``./results/mobilenetv2_DefaultQuantization/`` directory. - The accuracy validation of the quantized model is performed right after the quantization. - The actual result should be like **71.556%** of the accuracy top-1 metric on VNNI-based CPU. - Note that the results might be different on CPUs with different instruction sets. - - -Performance Benchmarking of Quantized Model -########################################### - -Check the performance of the quantized model using :doc:`Deep Learning Benchmark ` tool: - -.. code-block:: sh - - benchmark_app -m - - -where ```` is the path to the quantized model. -Note that the results might be different depending on the characteristics of your -machine. On a machine with Intel® Core™ i9-10920X CPU @ 3.50GHz it is like: - -.. code-block:: sh - - Latency: 1.54 ms - Throughput: 3814.18 FPS - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_faq.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_faq.rst deleted file mode 100644 index 5a495d77c63334..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/pot_faq.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. {#pot_docs_FrequentlyAskedQuestions} - -[Deprecated] Post-training Optimization Tool FAQ -=========================================================== - - -.. danger:: - - Post-training Optimization Tool has been deprecated since OpenVINO 2023.0. - :doc:`Neural Network Compression Framework (NNCF) ` is recommended for post-training quantization instead. - - -If your question is not covered below, use the `OpenVINO™ Community Forum page `__, where you can participate freely. - - -.. dropdown:: Is the Post-training Optimization Tool (POT) open-sourced? - - Yes, POT is developed on GitHub as a part of `openvinotoolkit/openvino `__ under Apache-2.0 License. - -.. dropdown:: Can I quantize my model without a dataset? - - In general, you should have a dataset. The dataset should be annotated if you want to validate the accuracy. - If your dataset is not annotated, you can use :doc:`Default Quantization ` - to quantize the model or command-line interface with :doc:`Simplified mode `. - -.. dropdown:: Can a model in any framework be quantized by the POT? - - The POT accepts models in the OpenVINO™ Intermediate Representation (IR) format only. For that you need to convert your model to the IR format using - :doc:`model conversion API `. - - -.. dropdown:: I'd like to quantize a model and I've converted it to IR but I don't have the Accuracy Checker config. What can I do? - - 1. Try quantization using Python API of the Post-training Optimization Tool. For more details see :doc:`Default Quantization `. - 2. If you consider command-line usage only refer to :doc:`Accuracy Checker documentation ` to create the Accuracy Checker configuration file, - and try to find the configuration file for your model among the ones available in the Accuracy Checker examples. - 3. An alternative way is to quantize the model in the :doc:`Simplified mode ` but you will not be able to measure the accuracy. - -.. dropdown:: What is a tradeoff when you go to low precision? - - The tradeoff is between the accuracy drop and performance. When a model is in low precision, it is usually performed - compared to the same model in full precision but the accuracy might be worse. You can find some benchmarking results in - :doc:`INT8 vs FP32 Comparison on Select Networks and Platforms `. - The other benefit of having a model in low precision is its smaller size. - -.. dropdown:: I tried all recommendations from "Post-Training Optimization Best Practices" but either have a high accuracy drop or bad performance after quantization. What else can I do? - - First of all, you should validate the POT compression pipeline you are running, which can be done with the following steps: - - 1. Make sure the accuracy of the original uncompressed model has the value you expect. Run your POT pipeline with an empty compression config and evaluate the resulting model metric. - Compare this uncompressed model accuracy metric value with your reference. - 2. Run your compression pipeline with a single compression algorithm (:doc:`Default Quantization ` or :doc:`Accuracy-aware Quantization `) - without any parameter values specified in the config (except for ``preset`` and ``stat_subset_size``). Make sure you get the desirable accuracy drop/performance gain in this case. - - Finally, if you have done the steps above and the problem persists, you could try to compress your model using the - `Neural Network Compression Framework (NNCF) `__. Note that NNCF usage requires you to have a - PyTorch or TensorFlow 2 based training pipeline of your model to perform Quantization-aware Training. - See :doc:`Model Optimization Guide ` for more details. - -.. dropdown:: I get “RuntimeError: Cannot get memory” and “RuntimeError: Output data was not allocated” when I quantize my model by the POT. - - These issues happen due to insufficient available amount of memory for statistics collection during the quantization process of a huge model or - due to a very high resolution of input images in the quantization dataset. If you do not have a possibility to increase your RAM size, one of the following options can help: - - - Set ``inplace_statistics`` parameters to ``True``. In that case, the POT will change the method to collect statistics and use less memory. - Note that such change might increase the time required for quantization. - - Set ``eval_requests_number`` and ``stat_requests_number`` parameters to 1. In that case, the POT will limit the number of infer requests by 1 and use less memory. - Note that such change might increase the time required for quantization. - - Set ``use_fast_bias`` parameter to ``false``. In that case, the POT will switch from the FastBiasCorrection algorithm to the full BiasCorrection algorithm - which is usually more accurate and takes more time but requires less memory. See :doc:`Post-Training Optimization Best Practices ` for more details. - - Reshape your model to a lower resolution and resize the size of images in the dataset. Note that such change might impact the accuracy. - -.. dropdown:: I have successfully quantized my model with a low accuracy drop and improved performance but the output video generated from the low precision model is much worse than from the full precision model. What could be the root cause? - - It can happen due to the following reasons: - - - A wrong or not representative dataset was used during the quantization and accuracy validation. - Please make sure that your data and labels are correct and they sufficiently reflect the use case. - - If the command-line interface was used for quantization, a wrong Accuracy Checker configuration file could lead to this problem. - Refer to :doc:`Accuracy Checker documentation ` for more information. - - If :doc:`Default Quantization ` was used for quantization you can also try - :doc:`Accuracy-aware Quantization ` method that allows controlling maximum accuracy deviation. - -.. dropdown:: The quantization process of my model takes a lot of time. Can it be decreased somehow? - - Quantization time depends on multiple factors such as the size of the model and the dataset. It also depends on the algorithm: - the :doc:`Default Quantization ` algorithm takes less time than the :doc:`Accuracy-aware Quantization ` algorithm. - The following configuration parameters also impact the quantization time duration - (see details in :doc:`Post-Training Optimization Best Practices `): - - - ``use_fast_bias``: when set to ``false``, it increases the quantization time - - ``stat_subset_size``: the higher the value of this parameter, the more time will be required for the quantization - - ``tune_hyperparams``: if set to ``true`` when the AccuracyAwareQuantization algorithm is used, it increases the quantization time - - ``stat_requests_number``: the lower number, the more time might be required for the quantization - - ``eval_requests_number``: the lower number, the more time might be required for the quantization - - Note that higher values of ``stat_requests_number`` and ``eval_requests_number`` increase memory consumption by POT. - -.. dropdown:: When I execute POT CLI, I get "File "/workspace/venv/lib/python3.7/site-packages/nevergrad/optimization/base.py", line 35... SyntaxError: invalid syntax". What is wrong? - - This error is reported when you have a Python version older than 3.7 in your environment. Upgrade your Python version. - -.. dropdown:: What does the message "ModuleNotFoundError: No module named 'some\_module\_name'" mean? - - It means that some required python module is not installed in your environment. To install it, run ``pip install some_module_name``. - -.. dropdown:: Is there a way to collect an intermediate IR when the AccuracyAware mechanism fails? - - You can add ``"dump_intermediate_model": true`` to the POT configuration file and it will drop an intermediate IR to ``accuracy_aware_intermediate`` folder. - -.. dropdown:: What do the messages "Output name: result_operation_name not found" or "Output node with result_operation_name is not found in graph" mean? - - Errors are caused by missing output nodes names in a graph when using the POT tool for model quantization. - It might appear for some models only for IRs converted from ONNX models using the new frontend (which is the default - conversion path starting from 2022.1 release). To avoid such errors, use the legacy MO frontend to convert a model - to IR by passing the ``--use_legacy_frontend`` option. Then, use the produced IR for quantization. - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/protecting_model.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/protecting_model.rst deleted file mode 100644 index 8ace77fcf70b92..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/protecting_model.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. {#pot_ranger_README} - -[Deprecated] Experimental: Protecting Deep Learning Model through Range Supervision ("RangeSupervision") -================================================================================================================ - -.. danger:: - - Post-training Optimization Tool has been deprecated since OpenVINO 2023.0. - :doc:`Neural Network Compression Framework (NNCF) ` is recommended for post-training quantization instead. - -Introduction -#################### - -Deep neural network find applications in many scenarios where the prediction is a critical component for safety-relevant decisions. Such workloads can benefit from additional protection against underlying errors. For example, memory bit flips (**"soft errors"** originating, e.g., from external radiation or internal electrical disturbances within the circuitry) in der platform hosting the network inference can corrupt the learned network parameters and lead to incorrect predictions. Typically, errors resulting in very large parameter values have a more drastic impact on the network behavior. **The range supervision algorithm ("RangeSupervision") described here establishes and inserts additional protection layers after already present activation layers**. Those layers truncate values that are found to be out of an expected activation range in order to mitigate the traces of potential platform errors. They do so during inference by applying a *clamp* operation to any activation *x* in the input to the RangeSupervision layer, - -.. math:: - - x = clamp(x ; T_{low}, T_{up}) = min(max(x, T_{low}), T_{high}) - - -where :math:`T_{low}` and :math:`T_{up}` are the lower and upper bounds for the particular protection layer, respectively. -The process flow follows the diagram :ref:`Fig 1 `. Starting from the internal representation (IR) of an OpenVINO model, the POT RangeSupervision algorithm is called to **add protection layers into the model graph**. This step requires **appropriate threshold values that are automatically extracted from a specified test dataset**. The result is an IR representation of the model with additional "RangeSupervision" layers after each supported activation layer. The original and the modified model can be called in the same way through the OpenVINO inference engine to evaluate the impact on accuracy, performance, and dependability in the presence of potential soft errors (for example using the *benchmark_app* and *accuracy_checker* functions). **The algorithm is designed to provide efficient protection at negligible performance overhead or accuracy impact in the absence of faults.** Bound extraction is a one-time effort and the protected IR model returned by the RangeSupervision algorithm can be used independently from there on. No changes in the learned parameters of the network are needed. - -.. _schematic-supervision: - -.. image:: _static/images/range_supervision/scheme3.svg - :alt: Schematic - - -*Fig 1: Schematic of RangeSupervision process flow.* - -Supported activation layers -+++++++++++++++++++++++++++ - -The following activation layers are currently supported for range supervision: - -- `ReLU` -- `Swish` -- `PReLU` -- `Elu` -- `Gelu` -- `Sigmoid` -- `Tanh` - -This means that any activation layer of one of the above types, that the model under consideration contains, will be protected with an appropriate subsequent RangeSupervision layer. - -Usage -#################### - -RangeSupervision protection can be used the same way as :doc:`DefaultQuantization ` method. - -Algorithm configuration -+++++++++++++++++++++++ - -Algorithm has a minimal configuration. Below is an example of such configuration: - -.. code-block:: json - - { - "name": "RangeSupervision", - "params": { - "stat_subset_size": 300 - "stat_batch_size": 1 - } - } - - -The protected model will be saved in IR format in a new folder ``./results/\_RangeSupervision/...``. - -Mandatory parameters: - -- ``"stat_subset_size"``: This parameter defines *how many images* of the specified dataset in "engine: config" are used to extract the bounds (images are randomly chosen if a subset is chosen). This value is set to **300** by default. The more images are selected for the bound generation, the more accurate the estimation of an out-of-bound event will be, at the cost of increasing extraction time. - -Example of RangeSupervision results -################################### - -The following example shows a traffic camera image and predicted objects using a Yolov3 pre-trained on the Coco dataset. A single weight fault was injected in a randomly chosen convolution layer of YOLO, flipping the most significant bit of the selected network parameter. If range supervision is applied, the original network performance is recovered despite the presence of the fault. - -.. image:: _static/images/range_supervision/img_combined_2.png - - -*Fig 2: Example of fault mitigation via range supervision.* - -Additional Resources -#################### - -- Z. Chen, G. Li, and K. Pittabiraman, "A Low-cost Fault Corrector for Deep Neural Networks through Range Restriction", 2020. https://arxiv.org/abs/2003.13874 -- F. Geissler, Q. Syed, S. Roychowdhury, A. Asgari, Y. Peng, A. Dhamasia, R. Graefe, K. Pattabiraman, and M. Paulitsch, "Towards a Safety Case for Hardware Fault Tolerance in Convolutional Neural Networks Using Activation Range Supervision", 2021. https://arxiv.org/abs/2108.07019 - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices.rst deleted file mode 100644 index f5f4e3d08ec255..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices.rst +++ /dev/null @@ -1,120 +0,0 @@ -.. {#pot_docs_BestPractices} - -[Deprecated] Post-Training Quantization Best Practices -======================================================== - - -.. toctree:: - :maxdepth: 1 - :hidden: - - Saturation Issue - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - -The :doc:`Default Quantization ` of the Post-training Optimization Tool (POT) is -the fastest and easiest way to get a quantized model. It requires only some unannotated representative dataset to be provided in most cases. Therefore, it is recommended to use it as a starting point when it comes to model optimization. However, it can lead to significant accuracy deviation in some cases. The purpose of this article is to provide tips to address this issue. - -.. note:: - - POT uses inference on the CPU during model optimization. It means that ability to infer the original floating-point model is essential for model optimization. In case of the 8-bit quantization, it is recommended to run POT on the same CPU architecture when optimizing for CPU or VNNI-based CPU when quantizing for a non-CPU device, such as GPU, NPU, or GNA. It should help to avoid the impact of the :doc:`saturation issue ` that occurs on AVX and SSE-based CPU devices. - - -Improving accuracy after the Default Quantization -################################################# - -Parameters of the Default Quantization algorithm with basic settings are presented below: - -.. code-block:: py - :force: - - { - "name": "DefaultQuantization", # Optimization algorithm name - "params": { - "preset": "performance", # Preset [performance, mixed] which controls - # the quantization scheme. For the CPU: - # performance - symmetric quantization of weights and activations. - # mixed - symmetric weights and asymmetric activations. - # accuracy - the same as "mixed" for CPU, GPU, and GNA devices; asymmetric weights and activations for NPU device. - "stat_subset_size": 300 # Size of the subset to calculate activations statistics that can be used - # for quantization parameters calculation. - } - } - - -There are two alternatives in case of substantial accuracy degradation after applying this method: - -1. Hyperparameters tuning. -2. AccuracyAwareQuantization algorithm. - -Tuning Hyperparameters of the Default Quantization -++++++++++++++++++++++++++++++++++++++++++++++++++ - -The Default Quantization algorithm provides multiple hyperparameters which can be used to improve accuracy results for the fully-quantized model. -Below is a list of best practices that can be applied to improve accuracy without a substantial performance reduction with respect to default settings: - -1. The first recommended option is to change the ``preset`` from ``performance`` to ``mixed``. This enables asymmetric quantization of activations and can be helpful for models with non-ReLU activation functions, for example, YOLO, EfficientNet, etc. -2. The next option is ``use_fast_bias``. Setting this option to ``false`` enables a different bias correction method which is more accurate, in general, and applied after model quantization as a part of the Default Quantization algorithm. - - .. note:: Changing this option can substantially increase quantization time in the POT tool. - -3. Some model architectures require a special approach when being quantized. For example, Transformer-based models need to keep some operations in the original precision to preserve accuracy. That is why POT provides a ``model_type`` option to specify the model architecture. Now, only ``"transformer"`` type is available. Use it to quantize Transformer-based models, e.g. BERT. -4. Another important option is a `range_estimator`. It defines how to calculate the minimum and maximum of quantization range for weights and activations. For example, the following ``range_estimator`` for activations can improve the accuracy for Faster R-CNN-based networks: - - .. code-block:: py - :force: - - { - "name": "DefaultQuantization", - "params": { - "preset": "performance", - "stat_subset_size": 300 - "activations": { # defines activation - "range_estimator": { # defines how to estimate statistics - "max": { # right border of the quantizing floating-point range - "aggregator": "max", # use max(x) to aggregate statistics over calibration dataset - "type": "abs_max" # use abs(max(x)) to get per-sample statistics - } - } - } - } - } - - -5. The next option is ``stat_subset_size``. It controls the size of the calibration dataset used by POT to collect statistics for quantization parameters initialization. It is assumed that this dataset should contain a sufficient number of representative samples. Thus, varying this parameter may affect accuracy (higher is better). However, we empirically found that 300 samples are sufficient to get representative statistics in most cases. -6. The last option is ``ignored_scope``. It allows excluding some layers from the quantization process, i.e. their inputs will not be quantized. It may be helpful for some patterns for which it is known in advance that they drop accuracy when executing in low precision. For example, the ``DetectionOutput`` layer of the SSD model expressed as a subgraph should not be quantized to preserve the accuracy of Object Detection models. One of the sources for the ignored scope can be the Accuracy-aware algorithm which can revert layers to the original precision (see details below). - -Find all the possible options and their description in the configuration `specification file `__ in the POT directory. - -Accuracy-aware Quantization -########################### - -When the steps above do not lead to the accurate quantized model, you may use the :doc:`Accuracy-aware Quantization ` algorithm which leads to mixed-precision models. A fragment of Accuracy-aware Quantization configuration with default settings is shown below: - -.. code-block:: py - :force: - - { - "name": "AccuracyAwareQuantization", - "params": { - "preset": "performance", - "stat_subset_size": 300, - "maximal_drop": 0.01 # Maximum accuracy drop which has to be achieved after the quantization - } - } - - -Since the Accuracy-aware Quantization calls the Default Quantization at the first step it means that all the parameters of the latter one are also valid and can be applied to the accuracy-aware scenario. - -.. note:: - - In general, the potential increase in speed with the Accuracy-aware Quantization algorithm is not as high as with the Default Quantization, when the model gets fully quantized. - - -Reducing the performance gap of Accuracy-aware Quantization -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -To improve model performance after Accuracy-aware Quantization, try the ``"tune_hyperparams"`` setting and set it to ``True``. It will enable searching for optimal quantization parameters before reverting layers to the "backup" precision. Note that this may impact the overall quantization time, though. - -If you do not achieve the desired accuracy and performance after applying the Accuracy-aware Quantization algorithm or you need an accurate fully-quantized model, we recommend either using Quantization-Aware Training from :doc:`NNCF `. - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices/saturation_issue.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices/saturation_issue.rst deleted file mode 100644 index 369adc5b981869..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantization_best_practices/saturation_issue.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. {#pot_saturation_issue} - -[Deprecated] Saturation (overflow) Issue Workaround -======================================================= - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -Introduction -#################### - -8-bit instructions of older Intel CPU generations (based on SSE, AVX-2, and AVX-512 instruction sets) are prone to so-called saturation (overflow) of the intermediate buffer when calculating the dot product, which is an essential part of Convolutional or MatMul operations. This saturation can lead to a drop in accuracy when running inference of 8-bit quantized models on the mentioned architectures. Additionally, it is impossible to predict if the issue occurs in a given setup since most computations are executed in parallel during DL model inference, which makes this process non-deterministic. This is a common problem for models with non-ReLU activation functions and low level of redundancy (for example, optimized or efficient models). It can prevent deploying the model on legacy hardware or creating cross-platform applications. The problem does not occur on GPUs or CPUs with Intel Deep Learning Boost (VNNI) technology and further generations. - -Saturation Problem Detection -############################ - -The only way to detect the saturation issue is to run inference on a CPU that allows it and then on one that does not (for example, a VNNI-based CPU). A significant difference in accuracy (more than 1%) will be the main indicator of the saturation issue impact. - -Saturation Issue Workaround -########################### - -While quantizing activations use the full range of 8-bit data types, there is a workaround using only 7 bits to represent weights (of Convolutional or Fully-Connected layers). Using this algorithm for the first layer can help mitigate the saturation issue for many models. However, this can lead to lower accuracy due to reduced representation of weights. - -POT tool provides three options to deal with the saturation issue. The options can be enabled in the POT configuration file using the ``saturation_fix`` parameter: - -* "First_layer" option -- (default) fix saturation issue for the first layer. -* "All" option -- apply for all layers in the model. -* "No" option -- do not apply saturation fix at all. - -Below is an example of the section in the POT configuration file with the ``saturation_fix`` option: - -.. code-block:: json - - "algorithms": [ - { - "name": "DefaultQuantization", - "params": { - "preset": "performance", - "stat_subset_size": 300, - "saturation_fix": "all" // Apply the saturation fix to all the layers - } - } - ] - - -If you observe the saturation issue, try the "all" option during model quantization. If the accuracy problem still occurs, try using `Quantization-aware training from NNCF `__ and fine-tuning the model. - -Use the "no" option when leaving out legacy CPU HW. It might also lead to slightly better accuracy. - -Additional Resources -#################### - -* `Lower Numerical Precision Deep Learning Inference and Training blogpost `__ -* :doc:`Configuration file description ` - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models.rst deleted file mode 100644 index 48bfb24fe9ce66..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models.rst +++ /dev/null @@ -1,196 +0,0 @@ -.. {#pot_default_quantization_usage} - -[Deprecated] Quantizing Models -====================================== - - - -.. toctree:: - :maxdepth: 1 - :hidden: - - DefaultQuantization Method - - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - -This guide describes how to apply model quantization with the Default Quantization method without accuracy control, using an unannotated dataset. -To use this method, create a Python script using an API of Post-Training Optimization Tool (POT) and implement data preparation logic and quantization pipeline. -If you are not familiar with Python, try :doc:`command-line interface ` of POT which is designed to quantize models from -OpenVINO `Model Zoo `__. The figure below shows the common workflow of the quantization script implemented with POT API. - -.. image:: _static/images/default_quantization_flow.svg - -The script should include three basic steps: - -1. Prepare data and dataset interface. -2. Select quantization parameters. -3. Define and run the quantization process. - -Prepare data and dataset interface -################################## - -In most cases, it is required to implement only the ``openvino.tools.pot.DataLoader`` interface, which allows acquiring data from a dataset and applying model-specific pre-processing providing access by index. Any implementation should override the following methods: - -* The ``__len__()``, returns the size of the dataset. -* The ``__getitem__()``, provides access to the data by index in the range of 0 to ``len(self)``. It can also encapsulate the logic of model-specific pre-processing. This method should return data in the ``(data, annotation)`` format, in which: - - * The ``data`` is the input that is passed to the model at inference so that it should be properly preprocessed. It can be either the ``numpy.array`` object or a dictionary, where the key is the name of the model input and the value is ``numpy.array`` which corresponds to this input. - * The ``annotation`` is not used by the Default Quantization method. Therefore, this object can be ``None`` in this case. - -Framework data loading classes can be wrapped by the ``openvino.tools.pot.DataLoader`` interface which is usually straightforward. -For example, the ``torch.utils.data.Dataset`` has a similar interface as the ``openvino.tools.pot.DataLoader``, -so that its TorchVision implementations can be easily wrapped by POT API. - -.. note:: - - Model-specific preprocessing (for example, mean/scale normalization), can be embedded into the model at the conversion step, using Model Optimizer. This should be considered during the implementation of the DataLoader interface to avoid "double" normalization, which can lead to the loss of accuracy after optimization. - - -The example code below defines the ``DataLoader`` for three popular use cases: images, text, and audio. - - -.. tab-set:: - - .. tab-item:: Images - :sync: images - - .. doxygensnippet:: tools/pot/docs/code/data_loaders.py - :language: python - :fragment: image_loader - - .. tab-item:: Text - :sync: text - - .. doxygensnippet:: tools/pot/docs/code/data_loaders.py - :language: python - :fragment: text_loader - - .. tab-item:: Audio - :sync: audio - - .. doxygensnippet:: tools/pot/docs/code/data_loaders.py - :language: python - :fragment: audio_loader - - -Select quantization parameters -############################## - -Default Quantization algorithm has mandatory and optional parameters which are defined as a dictionary: - -.. code-block:: py - :force: - - { - "name": "DefaultQuantization", - "params": { - "target_device": "ANY", - "stat_subset_size": 300, - "stat_batch_size": 1 - }, - } - - -* ``"target_device"`` - the following options are available: - - * ``"ANY"`` (or ``"CPU"``) - default option to quantize models for CPU, GPU, or NPU - * ``"CPU_SPR"`` - to quantize models for CPU SPR (4th Generation Intel® Xeon® Scalable processor family) - * ``"GNA"``, ``"GNA3"``, ``"GNA3.5"`` - to quantize models for GNA devices respectively. - -* ``"stat_subset_size"`` - size of the data subset to calculate activations statistics used for quantization. The whole dataset is used if no parameter is specified. It is recommended to use not less than 300 samples. -* ``"stat_batch_size"`` - size of the batch to calculate activations statistics used for quantization. 1 if no parameter is specified. - -For full specification, see the :doc:`Default Quantization method `. - -Run quantization -#################### - -POT API provides methods to load and save model objects from OpenVINO Intermediate Representation: the ``load_model`` and ``save_model``. It also has a concept of the ``Pipeline`` that sequentially applies specified optimization methods to the model. The ``create_pipeline`` method is used to instantiate a ``Pipeline`` object. -An example code below shows a basic quantization workflow: - - -.. code-block:: py - :force: - - from openvino.tools.pot import IEEngine - from openvino.tools.pot import load_model, save_model - from openvino.tools.pot import compress_model_weights - from openvino.tools.pot import create_pipeline - - # Model config specifies the name of the model and paths to .xml and .bin files of the model. - model_config = - { - "model_name": "model", - "model": path_to_xml, - "weights": path_to_bin, - } - - # Engine config. - engine_config = {"device": "CPU"} - - algorithms = [ - { - "name": "DefaultQuantization", - "params": { - "target_device": "ANY", - "stat_subset_size": 300, - "stat_batch_size": 1 - }, - } - ] - - # Step 1: Implement and create a user data loader. - data_loader = ImageLoader("") - - # Step 2: Load a model. - model = load_model(model_config=model_config) - - # Step 3: Initialize the engine for metric calculation and statistics collection. - engine = IEEngine(config=engine_config, data_loader=data_loader) - - # Step 4: Create a pipeline of compression algorithms and run it. - pipeline = create_pipeline(algorithms, engine) - compressed_model = pipeline.run(model=model) - - # Step 5 (Optional): Compress model weights to quantized precision - # to reduce the size of the final .bin file. - compress_model_weights(compressed_model) - - # Step 6: Save the compressed model to the desired path. - # Set save_path to the directory where the model should be saved. - compressed_model_paths = save_model( - model=compressed_model, - save_path="optimized_model", - model_name="optimized_model", - ) - - -The output of the script is the quantized model that can be used for inference in the same way as the original full-precision model. - -If high degradation of accuracy occurs after applying the Default Quantization method, -it is recommended to follow the tips from :doc:`Quantization Best Practices ` -article or use :doc:`Accuracy-aware Quantization ` method. - -Quantizing cascaded models -########################## - -When the optimized model is a cascaded one (consists of several submodels, for example, MT-CNN), it will be necessary to implement a complex inference pipeline that can properly handle different submodels and data flow between them. POT API provides the ``Engine`` interface for this purpose, which allows customization of the inference logic. However, it is recommended to inherit from ``IEEngine`` helper class that already contains all the logic required to do the inference based on OpenVINO Python API. For more details, see the following :doc:`example `. - -Examples -#################### - -* Tutorials: - - * `Quantization of Image Classification model `__ - * `Quantization of Object Detection model from Model Zoo `__ - * `Quantization of Segmentation model for medical data `__ - * `Quantization of BERT for Text Classification `__ - -* Samples: - - * :doc:`Quantization of 3D segmentation model ` - * :doc:`Quantization of Face Detection model ` - * :doc:`Quantization of speech model for GNA device ` - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models/default_quantization_algorithm.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models/default_quantization_algorithm.rst deleted file mode 100644 index 78c5f34df09f02..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models/default_quantization_algorithm.rst +++ /dev/null @@ -1,201 +0,0 @@ -.. {#pot_compression_algorithms_quantization_default_README} - -[Deprecated] DefaultQuantization Parameters -======================================================== - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -The DefaultQuantization Algorithm is designed to perform fast and accurate quantization. It does not offer direct control over the accuracy metric itself but provides many options that can be used to improve it. - -Parameters -#################### - -Default Quantization algorithm has mandatory and optional parameters. For more details on how to use these parameters, refer to :doc:`Best Practices ` document. Below is an example of the definition of Default Quantization method and its parameters: - -.. code-block:: py - :force: - - { - "name": "DefaultQuantization", # the name of optimization algorithm - "params": { - ... - } - } - - -Mandatory parameters -++++++++++++++++++++ - -- ``"preset"`` - a preset that controls the quantization mode (symmetric and asymmetric). It can take two values: - - - ``"performance"`` (default) - stands for symmetric quantization of weights and activations. This is the most efficient across all the HW. - - ``"mixed"`` - symmetric quantization of weights and asymmetric quantization of activations. This mode can be useful for the quantization of NN, which has both negative and positive input values in quantizing operations, for example, non-ReLU based CNN. - -- ``"stat_subset_size"`` - size of a subset to calculate activations statistics used for quantization. The whole dataset is used if no parameter is specified. It is recommended to use not less than 300 samples. -- ``"stat_batch_size"`` - size of a batch to calculate activations statistics used for quantization. It has a value of 1 if no parameter is specified. - -Optional parameters -+++++++++++++++++++ - -All other options should be considered as an advanced mode and require deep knowledge of the quantization process. Below -is an overall description of all possible parameters: - -- ``"model type"`` - required for accurate optimization of some model architectures. Now, only ``"transformer"`` type is supported for Transformer-based models (BERT, etc.). Default value is `None`. -- ``"inplace_statistics"`` - used to change a method of statistics collection from in-place (in-graph operations) to external collectors that require more memory but can increase optimization time. Default value is `True`. -- ``"ignored"`` - NN subgraphs which should be excluded from the optimization process - - - ``"scope"`` - list of particular nodes to exclude - - ``"operations"`` - list of operation types to exclude (expressed in OpenVINO IR notation). This list consists of the following tuples: - - - ``"type"`` - a type of ignored operation. - - ``"attributes"`` - if attributes are defined, they will be considered during inference. They are defined by a dictionary of ``"": ""`` pairs. - -- ``"weights"`` - this section describes the quantization scheme for weights and the way to estimate the quantization range for that. It is worth noting that changing the quantization scheme may lead to the inability to infer such mode on the existing HW. - - - ``"bits"`` - bit-width, the default value is "8". - - ``"mode"`` - a quantization mode (symmetric or asymmetric). - - ``"level_low"`` - the minimum level in the integer range to quantize. The default is "0" for an unsigned range, and "-2^(bit-1)" for a signed one. - - ``"level_high"`` - the maximum level in the integer range to quantize. The default is "2^bits-1" for an unsigned range, and "2^(bit-1)-1" for a signed one. - - ``"granularity"`` - quantization scale granularity. It can take the following values: - - - ``"pertensor"`` (default) - per-tensor quantization with one scale factor and zero-point. - - ``"perchannel"`` - per-channel quantization with per-channel scale factor and zero-point. - - - ``"range_estimator"`` - this section describes the parameters of the range estimator that is used in the MinMaxQuantization method to get the quantization ranges and filter outliers based on the collected statistics. Below are the parameters that can be modified to get better accuracy results: - - - ``"max"`` - parameters to estimate top border of quantizing floating-point range: - - - ``"type"`` - a type of the estimator: - - - ``"max"`` (default) - estimates the maximum in the quantizing set of value. - - ``"quantile"`` - estimates the quantile in the quantizing set of value. - - - ``"outlier_prob"`` - outlier probability used in the "quantile" estimator. - - - ``"min"`` - parameters to estimate the bottom border of quantizing floating-point range: - - - ``"type"`` - a type of the estimator: - - - ``"min"`` (default) - estimates the minimum in the quantizing set of value. - - ``"quantile"`` - estimates the quantile in the quantizing set of value. - - - ``"outlier_prob"`` - outlier probability used in the "quantile" estimator. - -- ``"activations"`` - this section describes the quantization scheme for activations and the way to estimate the quantization range for that. As before, changing the quantization scheme may lead to the inability to infer such mode on the existing HW: - - - ``"bits"`` - bit-width, the default value is "8". - - ``"mode"`` - a quantization mode (symmetric or asymmetric). - - ``"level_low"`` - the minimum level in the integer range to quantize. The default is "0" for an unsigned range, and "-2^(bit-1)" for a signed one. - - ``"level_high"`` - the maximum level in the integer range to quantize. The default is "2^bits-1" for an unsigned range, and "2^(bit-1)-1" for a signed one. - - ``"granularity"`` - quantization scale granularity. It can take the following values: - - - ``"pertensor"`` (default) - per-tensor quantization with one scale factor and zero-point. - - ``"perchannel"`` - per-channel quantization with per-channel scale factor and zero-point. - - - ``"range_estimator"`` - this section describes the parameters of the range estimator that is used in the MinMaxQuantization method to get the quantization ranges and filter outliers based on the collected statistics. These are the parameters that can be modified to get better accuracy results: - - - ``"preset"`` - preset that defines the same estimator for both top and bottom borders of quantizing floating-point range. Possible value is ``"quantile"``. - - ``"max"`` - parameters to estimate top border of quantizing floating-point range: - - - ``"aggregator"`` - a type of function used to aggregate statistics obtained with the estimator over the calibration dataset to get a value of the top border: - - - ``"mean"`` (default) - aggregates mean value. - - ``"max"`` - aggregates max value. - - ``"min"`` - aggregates min value. - - ``"median"`` - aggregates median value. - - ``"mean_no_outliers"`` - aggregates mean value after removal of extreme quantiles. - - ``"median_no_outliers"`` - aggregates median value after removal of extreme quantiles. - - ``"hl_estimator"`` - Hodges-Lehmann filter based aggregator. - - - ``"type"`` - a type of the estimator: - - - ``"max"`` (default) - estimates the maximum in the quantizing set of value. - - ``"quantile"`` - estimates the quantile in the quantizing set of value. - - - ``"outlier_prob"`` - outlier probability used in the "quantile" estimator. - - - ``"min"`` - parameters to estimate the bottom border of quantizing floating-point range: - - - ``"type"`` - a type of the estimator: - - - ``"max"`` (default) - estimates the maximum in the quantizing set of value. - - ``"quantile"`` - estimates the quantile in the quantizing set of value. - - - ``"outlier_prob"`` - outlier probability used in the "quantile" estimator. - -- ``"use_layerwise_tuning"`` - enables layer-wise fine-tuning of model parameters (biases, Convolution/MatMul weights, and FakeQuantize scales) by minimizing the mean squared error between original and quantized layer outputs. Enabling this option may increase compressed model accuracy, but will result in increased execution time and memory consumption. - -Additional Resources -#################### - -Tutorials: - -* `Quantization of Image Classification model `__ -* `Quantization of Object Detection model from Model Zoo `__ -* `Quantization of Segmentation model for medical data `__ -* `Quantization of BERT for Text Classification `__ - -Examples: - -* :doc:`Quantization of 3D segmentation model ` -* :doc:`Quantization of Face Detection model ` -* :doc:`Quantization of speech model for GNA device ` - -Command-line example: - -* :doc:`Quantization of Image Classification model ` - -A template and full specification for DefaultQuantization algorithm for POT command-line interface: - -* `Template `__ -* `Full specification `__ - - -.. dropdown:: Template - - .. code-block:: javascript - - /* This configuration file is the fastest way to get started with the default - quantization algorithm. It contains only mandatory options with commonly used - values. All other options can be considered as an advanced mode and require - deep knowledge of the quantization process. An overall description of all possible - parameters can be found in the default_quantization_spec.json */ - - { - /* Model parameters */ - - "model": { - "model_name": "model_name", // Model name - "model": "", // Path to model (.xml format) - "weights": "" // Path to weights (.bin format) - }, - - /* Parameters of the engine used for model inference */ - - "engine": { - "config": "" // Path to Accuracy Checker config - }, - - /* Optimization hyperparameters */ - - "compression": { - "target_device": "ANY", // Target device, the specificity of which will be taken - // into account during optimization - "algorithms": [ - { - "name": "DefaultQuantization", // Optimization algorithm name - "params": { - "preset": "performance", // Preset [performance, mixed, accuracy] which control the quantization - // mode (symmetric, mixed (weights symmetric and activations asymmetric) - // and fully asymmetric respectively) - - "stat_subset_size": 300 // Size of the subset to calculate activations statistics that can be used - // for quantization parameters calculation - } - } - ] - } - } - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy.rst deleted file mode 100644 index 0cac04e0e61d77..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy.rst +++ /dev/null @@ -1,205 +0,0 @@ -.. {#pot_accuracyaware_usage} - -[Deprecated] Quantizing Models with Accuracy Control -================================================================ - - -.. toctree:: - :maxdepth: 1 - :hidden: - - AccuracyAwareQuantization Method - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - -The Accuracy-aware Quantization algorithm allows performing quantization while maintaining accuracy within a pre-defined range. Note that it should be used only if the :doc:`Default Quantization ` introduces a significant accuracy degradation. The reason for it not being the primary choice is its potential for performance degradation, due to some layers getting reverted to the original precision. - -To proceed with this article, make sure you have read how to use :doc:`Default Quantization `. - -.. note:: - - The Accuracy-aware Quantization algorithm's behavior is different for the GNA ``target_device``. In this case, it searches for the best configuration and selects between INT8 and INT16 precisions for the weights of each layer. The algorithm works for the ``performance`` preset only. It is not useful for the ``accuracy`` preset, since the whole model is already in INT16 precision. - -A script for Accuracy-aware Quantization includes four steps: - -1. Prepare data and dataset interface. -2. Define accuracy metric. -3. Select quantization parameters. -4. Define and run the quantization process. - -Prepare data and dataset interface -################################## - -This step is the same as in :doc:`Default Quantization `. The only difference is that ``__getitem__()`` should return ``(data, annotation)`` or ``(data, annotation, metadata)``. The ``annotation`` is required and its format should correspond to the expectations of the ``Metric`` class. The ``metadata`` is an optional field that can be used to store additional information required for post-processing. - -Define accuracy metric -###################### - -To control accuracy during optimization, the ``openvino.tools.pot.Metric`` interface should be implemented. Each implementation should override the following properties and methods: - -**Properties** - -- ``value`` - returns the accuracy metric value for the last model output in a format of ``Dict[str, numpy.array]``. -- ``avg_value`` - returns the average accuracy metric over collected model results in a format of ``Dict[str, numpy.array]``. -- ``higher_better`` if a higher value of the metric corresponds to better performance, returns ``True`` , otherwise, ``False``. The default implementation returns ``True``. - -**Methods** - -- ``update(output, annotation)`` - calculates and updates the accuracy metric value, using the last model output and annotation. The model output and annotation should be passed in this method. It should also contain the model-specific post-processing in case the model returns the raw output. -- ``reset()`` - resets collected accuracy metric. -- ``get_attributes()`` - returns a dictionary of metric attributes: - - .. code-block:: console - - {metric_name: {attribute_name: value}} - - Required attributes: - - - ``direction`` - (``higher-better`` or ``higher-worse``) a string parameter defining whether the metric value should be increased in accuracy-aware algorithms. - - ``type`` - a string representation of a metric type. For example, "accuracy" or "mean_iou". - - -Below is an example of the accuracy top-1 metric implementation with POT API: -```python -from openvino.tools.pot import Metric - -class Accuracy(Metric): - - # Required methods - def __init__(self, top_k=1): - super().__init__() - self._top_k = top_k - self._name = 'accuracy@top{}'.format(self._top_k) - self._matches = [] # container of the results - - @property - def value(self): - """ Returns accuracy metric value for all model outputs. """ - return {self._name: self._matches[-1]} - - @property - def avg_value(self): - """ Returns accuracy metric value for all model outputs. """ - return {self._name: np.ravel(self._matches).mean()} - - def update(self, output, target): - """ Updates prediction matches. - :param output: model output - :param target: annotations - """ - if len(output) > 1: - raise Exception('The accuracy metric cannot be calculated ' - 'for a model with multiple outputs') - if isinstance(target, dict): - target = list(target.values()) - predictions = np.argsort(output[0], axis=1)[:, -self._top_k:] - match = [float(t in predictions[i]) for i, t in enumerate(target)] - - self._matches.append(match) - - def reset(self): - """ Resets collected matches """ - self._matches = [] - - def get_attributes(self): - """ - Returns a dictionary of metric attributes {metric_name: {attribute_name: value}}. - Required attributes: 'direction': 'higher-better' or 'higher-worse' - 'type': metric type - """ - return {self._name: {'direction': 'higher-better', - 'type': 'accuracy'}} -``` - - -An instance of the ``Metric`` implementation should be passed to ``IEEngine`` object responsible for model inference. - -.. code-block:: python - - metric = Accuracy() - engine = IEEngine(config=engine_config, data_loader=data_loader, metric=metric) - -Select quantization parameters -############################## - -Accuracy-aware Quantization uses the Default Quantization algorithm at the initialization step in such an order that all its parameters are also valid and can be specified. The only parameter required exclusively by Accuracy-aware Quantization is: - -- ``"maximal_drop"`` - the maximum accuracy drop which has to be achieved after the quantization. The default value is ``0.01`` (1%). - -Run quantization -################ - -The example code below shows a basic quantization workflow with accuracy control. ``UserDataLoader()`` is a placeholder for the implementation of ``DataLoader``. - -.. code-block:: python - - from openvino.tools.pot import IEEngine - from openvino.tools.pot import load_model, save_model - from openvino.tools.pot import compress_model_weights - from openvino.tools.pot import create_pipeline - - # Model config specifies the model name and paths to model .xml and .bin file - model_config = Dict( - { - "model_name": "model", - "model": path_to_xml, - "weights": path_to_bin, - } - ) - - # Engine config - engine_config = Dict({"device": "CPU"}) - - algorithms = [ - { - "name": "AccuracyAwareQuantization", - "params": { - "target_device": "ANY", - "stat_subset_size": 300, - 'maximal_drop': 0.02 - }, - } - ] - - # Step 1: Implement and create user's data loader. - data_loader = UserDataLoader() - - # Step 2: Implement and create user's data loader. - metric = Accuracy() - - # Step 3: Load the model. - model = load_model(model_config=model_config) - - # Step 4: Initialize the engine for metric calculation and statistics collection. - engine = IEEngine(config=engine_config, data_loader=data_loader, metric=metric) - - # Step 5: Create a pipeline of compression algorithms and run it. - pipeline = create_pipeline(algorithms, engine) - compressed_model = pipeline.run(model=model) - - # Step 6 (Optional): Compress model weights to quantized precision - # to reduce the size of the final .bin file. - compress_model_weights(compressed_model) - - # Step 7: Save the compressed model to the desired path. - # Set save_path to the directory where the model should be saved. - compressed_model_paths = save_model( - model=compressed_model, - save_path="optimized_model", - model_name="optimized_model", - ) - - # Step 8 (Optional): Evaluate the compressed model. Print the results. - metric_results = pipeline.evaluate(compressed_model) - - -It is worth noting that now the ``evaluate`` method that can compute accuracy on demand is also available in the ``Pipeline`` object. - -In case when Accuracy-aware Quantization does not allow achieving the desired accuracy-performance trade-off, it is recommended to try Quantization-aware Training from :doc:`NNCF `. - -Examples -######## - -* `Quantization of Object Detection model with control of accuracy `__ - - diff --git a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy/accuracy_aware_algorithm.rst b/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy/accuracy_aware_algorithm.rst deleted file mode 100644 index 42ba337e79b896..00000000000000 --- a/docs/articles_en/documentation/openvino_legacy_features/post_training_optimization_tool/quantizing_models_with_accuracy/accuracy_aware_algorithm.rst +++ /dev/null @@ -1,134 +0,0 @@ -.. {#accuracy_aware_README} - -[Deprecated] AccuracyAwareQuantization Parameters -======================================================== - -.. danger:: Post-training Optimization Tool is deprecated since OpenVINO 2023.0. :doc:`Neural Network Compression Framework (NNCF) ` is recommended for the post-training quantization instead. - - -Introduction -#################### - -Accuracy-aware Quantization algorithm is aimed at accurate quantization and allows the model's -accuracy to stay within the pre-defined range. This may cause a degradation in performance -in comparison to :ref:`Default Quantization ` -algorithm because some layers can be reverted back to the original precision. - -Parameters -#################### - -Since the :doc:`Default Quantization ` -algorithm is used as an initialization, all its parameters are also valid and can be specified. -Here is an example of the definition of the Accuracy-aware Quantization method and its parameters: - -.. code-block:: javascript - - { - "name": "AccuracyAwareQuantization", // the name of the optimization algorithm - "params": { - ... - } - } - - -Below are the descriptions of AccuracyAwareQuantization-specific parameters: - -- ``"ranking_subset_size"`` - size of a subset that is used to rank layers by their - contribution to the accuracy drop. Default value is ``300``, and the more samples it - has the better ranking, potentially. -- ``"max_iter_num"`` - the maximum number of iterations of the algorithm. In other - words, the maximum number of layers that may be reverted to floating-point - precision. By default, it is limited by the overall number of quantized layers. -- ``"maximal_drop"`` - the maximum accuracy drop which has to be achieved after the - quantization. The default value is ``0.01`` (1%). -- ``"drop_type"`` - a drop type of the accuracy metric: - - - ``"absolute"`` - the (default) absolute drop with respect to the results of the full-precision model. - - ``"relative"`` - relative to the results of the full-precision model. - -- ``"use_prev_if_drop_increase"`` - the use of network snapshot from the previous iteration when a drop - increases. The default value is ``True``. -- ``"base_algorithm"`` - name of the algorithm that is used to quantize a model at the - beginning. The default value is "DefaultQuantization". -- ``"convert_to_mixed_preset"`` - set to convert the model to "mixed" mode if the accuracy - criteria for the model quantized with "performance" preset are not satisfied. - This option can help to reduce number of layers that are reverted to floating-point - precision.Keep in mind that this is an **experimental** feature. -- ``"metrics"`` - an optional list of metrics that are taken into account during optimization. - It consists of tuples with the following parameters: - - - ``"name"`` - name of the metric to optimize. - - ``"baseline_value"`` - (optional parameter) a baseline metric value of the original - model. The validations onThe validation will be initiated entirely in the beginning if nothing is specified. - -- ``"metric_subset_ratio"`` - a part of the validation set that is used to compare - original full-precision and fully quantized models when creating a ranking subset - in case of predefined metric values of the original model. The default value is ``0.5``. -- ``"tune_hyperparams"`` - enables tuning of quantization parameters as a preliminary - step before reverting layers to the floating-point precision. It can bring - an additional boost in performance and accuracy, at the cost of increased overall - quantization time. The default value is ``False``. - -Additional Resources -#################### - -Example: - -* `Quantization of Object Detection model with the control of accuracy `__ - -A template and full specification for AccuracyAwareQuantization algorithm for POT command-line interface: - -* `Template `__ -* `Full specification `__ - - -.. dropdown:: Template - - .. code-block:: javascript - - /* This configuration file is the fastest way to get started with the accuracy-aware - quantization algorithm. It contains only mandatory options with commonly used - values. All other options can be considered as an advanced mode and require - deep knowledge of the quantization process. An overall description of all possible - parameters can be found in the accuracy_aware_quantization_spec.json */ - - { - /* Model parameters */ - - "model": { - "model_name": "model_name", // Model name - "model": "", // Path to model (.xml format) - "weights": "" // Path to weights (.bin format) - }, - - /* Parameters of the engine used for model inference */ - - "engine": { - "config": "" // Path to Accuracy Checker config - }, - - /* Optimization hyperparameters */ - - "compression": { - "target_device": "ANY", // Target device, the specificity of which will be taken - // into account during optimization - "algorithms": [ - { - "name": "AccuracyAwareQuantization", // Optimization algorithm name - "params": { - "preset": "performance", // Preset [performance, mixed, accuracy] which control the quantization - // mode (symmetric, mixed (weights symmetric and activations asymmetric) - // and fully asymmetric respectively) - - "stat_subset_size": 300, // Size of subset to calculate activations statistics that can be used - // for quantization parameters calculation - - "maximal_drop": 0.01, // Maximum accuracy drop which has to be achieved after the quantization - "tune_hyperparams": false // Whether to search the best quantization parameters for model - } - } - ] - } - } - - diff --git a/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst index f081eb016ba67d..52fe5fa1b5aa99 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/hello_classification.rst @@ -261,7 +261,7 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples ` - :doc:`Convert a Model ` -- :doc:`C API Reference ` +- :doc:`OpenVINO Runtime C API ` - `Hello Classification Python Sample on Github `__ - `Hello Classification C++ Sample on Github `__ - `Hello Classification C Sample on Github `__ diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst index 4ad10fbc386c56..83ed716bbcb452 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst @@ -6,8 +6,8 @@ CPU Device .. meta:: - :description: The CPU plugin in the Intel® Distribution of OpenVINO™ toolkit - is developed to achieve high performance inference of neural + :description: The CPU plugin in the Intel® Distribution of OpenVINO™ toolkit + is developed to achieve high performance inference of neural networks on Intel® x86-64 and Arm® CPUs. @@ -19,7 +19,7 @@ For an in-depth description of CPU plugin, see: .. note:: The scope of the CPU plugin features and optimizations on Arm® may differ from Intel® x86-64. If the limitation is not mentioned explicitly, the feature is supported for all CPU architectures. - + Device Name ########################################################### @@ -75,13 +75,8 @@ The ``u1/u8/i8`` data types are used for quantized operations only, i.e., those For more details on how to get a quantized model see the :doc:`low-precision optimization guide `. -.. note:: - - Platforms that do not support Intel® AVX512-VNNI have a known "saturation issue" that may lead to reduced computational accuracy for ``u8/i8`` precision calculations. - To get more information on how to detect such issues and possible workarounds, see the :doc:`saturation (overflow) issue section `. +.. note:: -.. note:: - Arm® platforms execute quantized models in simulation mode: the whole model (including quantization operations) is executed in floating-point precision. @@ -93,11 +88,11 @@ CPU plugin supports the following floating-point data types as inference precisi - ``f32`` (Intel® x86-64, Arm®) - ``bf16`` (Intel® x86-64) -The default floating-point precision of a CPU primitive is ``f32``. To support the ``f16`` OpenVINO IR the plugin internally converts +The default floating-point precision of a CPU primitive is ``f32``. To support the ``f16`` OpenVINO IR the plugin internally converts all the ``f16`` values to ``f32`` and all the calculations are performed using the native precision of ``f32``. On platforms that natively support ``bfloat16`` calculations (have the ``AVX512_BF16`` or ``AMX`` extension), the ``bf16`` type is automatically used instead of ``f32`` to achieve better performance (see the `Execution Mode Hint <#execution-mode-hint>`__). -Thus, no special steps are required to run a ``bf16`` model. For more details about the ``bfloat16`` format, see +Thus, no special steps are required to run a ``bf16`` model. For more details about the ``bfloat16`` format, see the `BFLOAT16 – Hardware Numerics Definition white paper `__. Using the ``bf16`` precision provides the following performance benefits: @@ -105,7 +100,7 @@ Using the ``bf16`` precision provides the following performance benefits: - ``bfloat16`` data type allows using Intel® Advanced Matrix Extension (AMX), which provides dramatically faster computations on corresponding hardware in comparison with AVX512 or AVX2 instructions in many DL operation implementations. - Reduced memory consumption since ``bfloat16`` data half the size of 32-bit float. -To check if the CPU device can support the ``bfloat16`` data type, use the :doc:`query device properties interface ` +To check if the CPU device can support the ``bfloat16`` data type, use the :doc:`query device properties interface ` to query ``ov::device::capabilities`` property, which should contain ``BF16`` in the list of CPU capabilities: @@ -129,7 +124,7 @@ to query ``ov::device::capabilities`` property, which should contain ``BF16`` in Inference Precision Hint ----------------------------------------------------------- -If the model has been converted to ``bf16``, the ``ov::hint::inference_precision`` is set to ``ov::element::bf16`` and can be checked via +If the model has been converted to ``bf16``, the ``ov::hint::inference_precision`` is set to ``ov::element::bf16`` and can be checked via the ``ov::CompiledModel::get_property`` call. The code below demonstrates how to get the element type: .. tab-set:: @@ -147,7 +142,7 @@ the ``ov::CompiledModel::get_property`` call. The code below demonstrates how to .. doxygensnippet:: docs/snippets/cpu/Bfloat16Inference1.cpp :language: cpp :fragment: [part1] - + To infer the model in ``f32`` precision instead of ``bf16`` on targets with native ``bf16`` support, set the ``ov::hint::inference_precision`` to ``ov::element::f32``. @@ -168,18 +163,18 @@ To infer the model in ``f32`` precision instead of ``bf16`` on targets with nati :fragment: [part2] -The ``Bfloat16`` software simulation mode is available on CPUs with Intel® AVX-512 instruction set that do not support the +The ``Bfloat16`` software simulation mode is available on CPUs with Intel® AVX-512 instruction set that do not support the native ``avx512_bf16`` instruction. This mode is used for development purposes and it does not guarantee good performance. To enable the simulation, the ``ov::hint::inference_precision`` has to be explicitly set to ``ov::element::bf16``. -.. note:: - +.. note:: + If ``ov::hint::inference_precision`` is set to ``ov::element::bf16`` on a CPU without native bfloat16 support or bfloat16 simulation mode, an exception is thrown. -.. note:: - - Due to the reduced mantissa size of the ``bfloat16`` data type, the resulting ``bf16`` inference accuracy may differ from the ``f32`` inference, - especially for models that were not trained using the ``bfloat16`` data type. If the ``bf16`` inference accuracy is not acceptable, +.. note:: + + Due to the reduced mantissa size of the ``bfloat16`` data type, the resulting ``bf16`` inference accuracy may differ from the ``f32`` inference, + especially for models that were not trained using the ``bfloat16`` data type. If the ``bf16`` inference accuracy is not acceptable, it is recommended to switch to the ``f32`` precision. Also, the performance/accuracy balance can be managed using the ``ov::hint::execution_mode`` hint, see the `Execution Mode Hint <#execution-mode-hint>`__. @@ -224,35 +219,35 @@ For more details, see the :doc:`Multi-device execution 1`` or ``ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)`` -property is set for CPU plugin, then multiple streams are created for the model. In case of CPU plugin, each stream has its own -host thread, which means that incoming infer requests can be processed simultaneously. Each stream is pinned to its own group of +If either ``ov::num_streams(n_streams)`` with ``n_streams > 1`` or ``ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)`` +property is set for CPU plugin, then multiple streams are created for the model. In case of CPU plugin, each stream has its own +host thread, which means that incoming infer requests can be processed simultaneously. Each stream is pinned to its own group of physical cores with respect to NUMA nodes physical memory usage to minimize overhead on data transfer between NUMA nodes. For more details, see the :doc:`optimization guide `. -.. note:: +.. note:: - When it comes to latency, be aware that running only one stream on multi-socket platform may introduce additional overheads - on data transfer between NUMA nodes. In that case it is better to use the ``ov::hint::PerformanceMode::LATENCY`` performance hint. + When it comes to latency, be aware that running only one stream on multi-socket platform may introduce additional overheads + on data transfer between NUMA nodes. In that case it is better to use the ``ov::hint::PerformanceMode::LATENCY`` performance hint. For more details see the :doc:`performance hints ` overview. -.. note:: +.. note:: Multi-stream execution is not supported on Arm® platforms. Latency and throughput hints have identical behavior and use only one stream for inference. - + Dynamic Shapes +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ CPU provides full functional support for models with dynamic shapes in terms of the opset coverage. -.. note:: +.. note:: The CPU plugin does not support tensors with dynamically changing rank. In case of an attempt to infer a model with such tensors, an exception will be thrown. -Some runtime optimizations work better if the model shapes are known in advance. Therefore, if the input data shape is -not changed between inference calls, it is recommended to use a model with static shapes or reshape the existing model +Some runtime optimizations work better if the model shapes are known in advance. Therefore, if the input data shape is +not changed between inference calls, it is recommended to use a model with static shapes or reshape the existing model with the static input shape to get the best performance. @@ -302,12 +297,12 @@ For more details, see :doc:`preprocessing API guide ` overview. @@ -317,7 +312,7 @@ Extensibility CPU plugin supports fallback on ``ov::Op`` reference implementation if the plugin does not have its own implementation for such operation. That means that :doc:`OpenVINO™ Extensibility Mechanism ` can be used for the plugin extension as well. -Enabling fallback on a custom operation implementation is possible by overriding the ``ov::Op::evaluate`` method in the derived operation +Enabling fallback on a custom operation implementation is possible by overriding the ``ov::Op::evaluate`` method in the derived operation class (see :doc:`custom OpenVINO™ operations ` for details). Stateful Models @@ -377,13 +372,13 @@ Optimization guide Multi-Threading Optimization +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -CPU inference will infer an input or multiple inputs in parallel on multiple logical processors. +CPU inference will infer an input or multiple inputs in parallel on multiple logical processors. User can use the following properties to limit available CPU resource for model inference. If the platform or operating system can support this behavior, OpenVINO Runtime will perform multi-threading scheduling based on limited available CPU resources. -- ``ov::inference_num_threads`` limits number of logical processors used for CPU inference. +- ``ov::inference_num_threads`` limits number of logical processors used for CPU inference. If the number set by the user is greater than the number of logical processors on the platform, multi-threading scheduler only uses the platform number for CPU inference. -- ``ov::hint::scheduling_core_type`` limits the type of CPU cores for CPU inference when user runs inference on a hybird platform that includes both Performance-cores (P-cores) with Efficient-cores (E-cores). +- ``ov::hint::scheduling_core_type`` limits the type of CPU cores for CPU inference when user runs inference on a hybird platform that includes both Performance-cores (P-cores) with Efficient-cores (E-cores). If user platform only has one type of CPU cores, this property has no effect, and CPU inference always uses this unique core type. - ``ov::hint::enable_hyper_threading`` limits the use of one or two logical processors per CPU core when platform has CPU hyperthreading enabled. If there is only one logical processor per CPU core, such as Efficient-cores, this property has no effect, and CPU inference uses all logical processors. @@ -392,71 +387,71 @@ User can use the following properties to limit available CPU resource for model .. tab-item:: Python :sync: py - + .. doxygensnippet:: docs/snippets/cpu/multi_threading.py :language: python :fragment: [ov:intel_cpu:multi_threading:part0] .. tab-item:: C++ :sync: cpp - + .. doxygensnippet:: docs/snippets/cpu/multi_threading.cpp :language: cpp :fragment: [ov:intel_cpu:multi_threading:part0] - -.. note:: - + +.. note:: + ``ov::hint::scheduling_core_type`` and ``ov::hint::enable_hyper_threading`` only support Intel® x86-64 CPU on Linux and Windows in current release. - + By default, OpenVINO Runtime will enable CPU threads pinning for better performance. User also can use property ``ov::hint::enable_cpu_pinning`` to switch it off. Disable threads pinning might be beneficial in complex applications with several workloads executed in parallel. .. tab-set:: .. tab-item:: Python :sync: py - + .. doxygensnippet:: docs/snippets/cpu/multi_threading.py :language: python :fragment: [ov:intel_cpu:multi_threading:part1] .. tab-item:: C++ :sync: cpp - + .. doxygensnippet:: docs/snippets/cpu/multi_threading.cpp :language: cpp :fragment: [ov:intel_cpu:multi_threading:part1] - + user can check the :doc:`optimization guide ` for details on multi-stream execution -.. note:: - +.. note:: + ``ov::hint::enable_cpu_pinning`` only support Linux in current release. - + Denormals Optimization +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -Denormal numbers (denormals) are non-zero, finite float numbers that are very close to zero, i.e. the numbers -in (0, 1.17549e-38) and (0, -1.17549e-38). In such cases, normalized-number encoding format does not have a capability +Denormal numbers (denormals) are non-zero, finite float numbers that are very close to zero, i.e. the numbers +in (0, 1.17549e-38) and (0, -1.17549e-38). In such cases, normalized-number encoding format does not have a capability to encode the number and underflow will happen. The computation involving such numbers is extremely slow on much hardware. -As a denormal number is extremely close to zero, treating a denormal directly as zero is a straightforward -and simple method to optimize computation of denormals. This optimization does not comply with IEEE 754 standard. -If it causes unacceptable accuracy degradation, the ``denormals_optimization`` property is introduced to control this behavior. -If there are denormal numbers in use cases, and no or acceptable accuracy drop is seen, set the property to `True` -to improve performance, otherwise set it to ``False``. If it is not set explicitly by the property and the application -does not perform any denormals optimization as well, the optimization is disabled by default. After enabling -the ``denormals_optimization`` property, OpenVINO will provide a cross operation system/ compiler and safe optimization +As a denormal number is extremely close to zero, treating a denormal directly as zero is a straightforward +and simple method to optimize computation of denormals. This optimization does not comply with IEEE 754 standard. +If it causes unacceptable accuracy degradation, the ``denormals_optimization`` property is introduced to control this behavior. +If there are denormal numbers in use cases, and no or acceptable accuracy drop is seen, set the property to `True` +to improve performance, otherwise set it to ``False``. If it is not set explicitly by the property and the application +does not perform any denormals optimization as well, the optimization is disabled by default. After enabling +the ``denormals_optimization`` property, OpenVINO will provide a cross operation system/ compiler and safe optimization on all platform when applicable. -There are cases when the application in which OpenVINO is used also performs this low-level denormals optimization. -If it is optimized by setting the FTZ(Flush-To-Zero) and DAZ(Denormals-As-Zero) flags in MXCSR register at the beginning -of the thread where OpenVINO is called, OpenVINO will inherit this setting in the same thread and sub-thread, -so there is no need to set the ``denormals_optimization`` property. In such cases, you are responsible for the +There are cases when the application in which OpenVINO is used also performs this low-level denormals optimization. +If it is optimized by setting the FTZ(Flush-To-Zero) and DAZ(Denormals-As-Zero) flags in MXCSR register at the beginning +of the thread where OpenVINO is called, OpenVINO will inherit this setting in the same thread and sub-thread, +so there is no need to set the ``denormals_optimization`` property. In such cases, you are responsible for the effectiveness and safety of the settings. -.. note:: +.. note:: The ``denormals_optimization`` property must be set before calling ``compile_model()``. @@ -466,14 +461,14 @@ To enable denormals optimization in the application, the ``denormals_optimizatio .. tab-item:: Python :sync: py - + .. doxygensnippet:: docs/snippets/ov_denormals.py :language: python :fragment: [ov:intel_cpu:denormals_optimization:part0] .. tab-item:: C++ :sync: cpp - + .. doxygensnippet:: docs/snippets/ov_denormals.cpp :language: cpp :fragment: [ov:intel_cpu:denormals_optimization:part0] @@ -482,26 +477,26 @@ To enable denormals optimization in the application, the ``denormals_optimizatio Sparse weights decompression (Intel® x86-64) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -``Sparse weights`` are weights where most of the elements are zero. The ratio of the number of zero elements -to the number of all elements is called ``sparse rate``. Thus, we assume that ``sparse weights`` are weights -with a high sparse rate. In case of ``sparse weights``, we can store only non-zero values in memory using -special storage structures, which allows us to use memory more efficiently. In turn, this can give us better +``Sparse weights`` are weights where most of the elements are zero. The ratio of the number of zero elements +to the number of all elements is called ``sparse rate``. Thus, we assume that ``sparse weights`` are weights +with a high sparse rate. In case of ``sparse weights``, we can store only non-zero values in memory using +special storage structures, which allows us to use memory more efficiently. In turn, this can give us better performance in the high memory bound workloads (e.g., throughput scenario). -``Sparse weights decompression feature`` allows to pack weights for Matrix Multiplication operations directly -in the CPU plugin at the model compilation stage and store non-zero values in a special packed format. Then, -during the execution of the model, the weights are unpacked and used in the computational kernel. Since the -weights are loaded from DDR/L3 cache in the packed format this significantly decreases memory consumption +``Sparse weights decompression feature`` allows to pack weights for Matrix Multiplication operations directly +in the CPU plugin at the model compilation stage and store non-zero values in a special packed format. Then, +during the execution of the model, the weights are unpacked and used in the computational kernel. Since the +weights are loaded from DDR/L3 cache in the packed format this significantly decreases memory consumption and as a consequence improve inference performance. -To use this feature, the user is provided with property ``sparse_weights_decompression_rate``, which can take -values from the interval \[0, 1\]. ``sparse_weights_decompression_rate`` defines sparse rate threshold: only operations -with higher sparse rate will be executed using ``sparse weights decompression feature``. The default value is ``1``, +To use this feature, the user is provided with property ``sparse_weights_decompression_rate``, which can take +values from the interval \[0, 1\]. ``sparse_weights_decompression_rate`` defines sparse rate threshold: only operations +with higher sparse rate will be executed using ``sparse weights decompression feature``. The default value is ``1``, which means the option is disabled. -.. note:: - - ``Sparse weights decompression feature`` is disabled by default since overall speed-up highly depends on +.. note:: + + ``Sparse weights decompression feature`` is disabled by default since overall speed-up highly depends on particular workload and for some cases the feature may introduce performance degradations. Code examples of how to use ``sparse_weights_decompression_rate``: @@ -510,25 +505,25 @@ Code examples of how to use ``sparse_weights_decompression_rate``: .. tab-item:: Python :sync: py - + .. doxygensnippet:: docs/snippets/cpu/ov_sparse_weights_decompression.py :language: python :fragment: [ov:intel_cpu:sparse_weights_decompression:part0] .. tab-item:: C++ :sync: cpp - + .. doxygensnippet:: docs/snippets/cpu/ov_sparse_weights_decompression.cpp :language: cpp :fragment: [ov:intel_cpu:sparse_weights_decompression:part0] -.. note:: - +.. note:: + The ``sparse_weights_decompression_rate`` property must be set before calling ``compile_model()``. -Information about the layers in which the ``sparse weights decompression feature`` was applied can be obtained -from perf counters log. The "exec type" field will contain the implementation type with the "sparse" particle +Information about the layers in which the ``sparse weights decompression feature`` was applied can be obtained +from perf counters log. The "exec type" field will contain the implementation type with the "sparse" particle ("brgemm_avx512_amx_sparse_I8" in the example below): .. code-block:: sh diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst index c44e03b29de2b2..870cdd423deb68 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst @@ -8,8 +8,8 @@ GNA Device .. meta:: - :description: The GNA plugin in OpenVINO™ Runtime enables running inference - on Intel® Gaussian & Neural Accelerator (GNA) and in the + :description: The GNA plugin in OpenVINO™ Runtime enables running inference + on Intel® Gaussian & Neural Accelerator (GNA) and in the software execution mode on CPU. @@ -22,12 +22,12 @@ For more details on how to configure a system to use GNA, see the :doc:`GNA conf .. note:: - Intel's GNA is being discontinued and Intel® Core™ Ultra (formerly known as Meteor Lake) + Intel's GNA is being discontinued and Intel® Core™ Ultra (formerly known as Meteor Lake) will be the last generation of hardware to include it. For this reason, the GNA plugin will soon be discontinued. - Consider Intel's new Neural Processing Unit as a low-power solution for offloading + Consider Intel's new Neural Processing Unit as a low-power solution for offloading neural network computation, for processors offering the technology. - + Intel® GNA Generational Differences @@ -51,8 +51,8 @@ and the term "GNA 3.0" refers to GNA hardware delivered on 12th, 13th generation Intel® GNA Forward and Backward Compatibility +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -When a model is run, using the GNA plugin, it is compiled internally for the specific hardware target. It is possible to export a compiled model, -using `Import/Export <#import-export>`__ functionality to use it later. In general, there is no guarantee that a model compiled and +When a model is run, using the GNA plugin, it is compiled internally for the specific hardware target. It is possible to export a compiled model, +using `Import/Export <#import-export>`__ functionality to use it later. In general, there is no guarantee that a model compiled and exported for GNA 2.0 runs on GNA 3.0 or vice versa. ================== ======================== ======================================================= ======================================================= @@ -65,8 +65,8 @@ exported for GNA 2.0 runs on GNA 3.0 or vice versa. .. note:: - In most cases, a network compiled for GNA 2.0 runs as expected on GNA 3.0. However, performance may be worse - compared to when a network is compiled specifically for the latter. The exception is a network with convolutions + In most cases, a network compiled for GNA 2.0 runs as expected on GNA 3.0. However, performance may be worse + compared to when a network is compiled specifically for the latter. The exception is a network with convolutions with the number of filters greater than 8192 (see the `Model and Operation Limitations <#model-and-operation-limitations>`__ section). @@ -88,9 +88,9 @@ For details, see a description of the ``ov::intel_gna::execution_mode`` property Recovery from Interruption by High-Priority Windows Audio Processes ############################################################################ -GNA is designed for real-time workloads i.e., noise reduction. For such workloads, processing should be time constrained. -Otherwise, extra delays may cause undesired effects such as *audio glitches*. The GNA driver provides a Quality of Service (QoS) -mechanism to ensure that processing can satisfy real-time requirements. The mechanism interrupts requests that might cause +GNA is designed for real-time workloads i.e., noise reduction. For such workloads, processing should be time constrained. +Otherwise, extra delays may cause undesired effects such as *audio glitches*. The GNA driver provides a Quality of Service (QoS) +mechanism to ensure that processing can satisfy real-time requirements. The mechanism interrupts requests that might cause high-priority Windows audio processes to miss the schedule. As a result, long running GNA tasks terminate early. To prepare the applications correctly, use Automatic QoS Feature described below. @@ -98,9 +98,9 @@ To prepare the applications correctly, use Automatic QoS Feature described below Automatic QoS Feature on Windows +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -Starting with the 2021.4.1 release of OpenVINO™ and the 03.00.00.1363 version of Windows GNA driver, the execution mode of -``ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK`` has been available to ensure that workloads satisfy real-time execution. -In this mode, the GNA driver automatically falls back on CPU for a particular infer request if the HW queue is not empty. +Starting with the 2021.4.1 release of OpenVINO™ and the 03.00.00.1363 version of Windows GNA driver, the execution mode of +``ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK`` has been available to ensure that workloads satisfy real-time execution. +In this mode, the GNA driver automatically falls back on CPU for a particular infer request if the HW queue is not empty. Therefore, there is no need for explicitly switching between GNA and CPU. .. tab-set:: @@ -128,33 +128,25 @@ Therefore, there is no need for explicitly switching between GNA and CPU. :fragment: [ov_gna_exec_mode_hw_with_sw_fback] -.. note:: - - Due to the "first come - first served" nature of GNA driver and the QoS feature, this mode may lead to increased - CPU consumption if there are several clients using GNA simultaneously. Even a lightweight competing infer request, - not cleared at the time when the user's GNA client process makes its request, can cause the user's request to be +.. note:: + + Due to the "first come - first served" nature of GNA driver and the QoS feature, this mode may lead to increased + CPU consumption if there are several clients using GNA simultaneously. Even a lightweight competing infer request, + not cleared at the time when the user's GNA client process makes its request, can cause the user's request to be executed on CPU, unnecessarily increasing CPU utilization and power. Supported Inference Data Types ########################################################### -Intel® GNA essentially operates in the low-precision mode which represents a mix of 8-bit (``i8``), 16-bit (``i16``), and 32-bit (``i32``) -integer computations. Unlike other OpenVINO devices supporting low-precision execution, it can calculate quantization factors at the -model loading time. Therefore, a model can be run without calibration. However, this mode may not provide satisfactory accuracy -because the internal quantization algorithm is based on heuristics, the efficiency of which depends on the model and dynamic range of input data. +Intel® GNA essentially operates in the low-precision mode which represents a mix of 8-bit (``i8``), 16-bit (``i16``), and 32-bit (``i32``) +integer computations. Unlike other OpenVINO devices supporting low-precision execution, it can calculate quantization factors at the +model loading time. Therefore, a model can be run without calibration. However, this mode may not provide satisfactory accuracy +because the internal quantization algorithm is based on heuristics, the efficiency of which depends on the model and dynamic range of input data. This mode is going to be deprecated soon. GNA supports the ``i16`` and ``i8`` quantized data types as inference precision of internal primitives. -GNA users are encouraged to use the :doc:`Post-Training Optimization Tool ` to get a model with -quantization hints based on statistics for the provided dataset. - :doc:`Hello Query Device C++ Sample ` can be used to print out supported data types for all detected devices. -:doc:`POT API Usage sample for GNA ` demonstrates how a model can be quantized for GNA, using POT API in two modes: - -* Accuracy (i16 weights) -* Performance (i8 weights) - For POT quantized models, the ``ov::hint::inference_precision`` property has no effect except in cases described in the `Model and Operation Limitations section <#model-and-operation-limitations>`__. @@ -173,9 +165,9 @@ For more details, see the :doc:`Model caching overview ` or +To compile a model, use either :ref:`compile Tool ` or :doc:`Speech C++ Sample `. Stateful Models @@ -226,10 +218,10 @@ Stateful Models GNA plugin natively supports stateful models. For more details on such models, refer to the :doc:`Stateful models `. -.. note:: +.. note:: - The GNA is typically used in streaming scenarios when minimizing latency is important. Taking into account that POT does not - support the ``TensorIterator`` operation, the recommendation is to use the ``transform`` option of model conversion API + The GNA is typically used in streaming scenarios when minimizing latency is important. Taking into account that POT does not + support the ``TensorIterator`` operation, the recommendation is to use the ``transform`` option of model conversion API to apply ``LowLatency2`` transformation when converting an original model. Profiling @@ -242,16 +234,16 @@ With the following methods, you can collect profiling information with various p .. tab-item:: Python :sync: py - + ``openvino.InferRequest.get_profiling_info`` .. tab-item:: C++ :sync: cpp - + ``ov::InferRequest::get_profiling_info`` -The current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information. +The current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information. The API enables you to retrieve counter units in cycles. You can convert cycles to seconds as follows: .. code-block:: sh @@ -319,7 +311,7 @@ Model and Operation Limitations +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Due to the specification of hardware architecture, Intel® GNA supports a limited set of operations (including their kinds and combinations). -For example, GNA Plugin should not be expected to run computer vision models because the plugin does not fully support 2D convolutions. +For example, GNA Plugin should not be expected to run computer vision models because the plugin does not fully support 2D convolutions. The exception are the models specifically adapted for the GNA Plugin. Limitations include: @@ -336,37 +328,37 @@ Limitations include: Support for 2D Convolutions up to GNA 3.0 ----------------------------------------------------------- -The Intel® GNA 1.0 and 2.0 hardware natively supports only 1D convolutions. However, 2D convolutions can be mapped to 1D when -a convolution kernel moves in a single direction. Initially, a limited subset of Intel® GNA 3.0 features are added to the +The Intel® GNA 1.0 and 2.0 hardware natively supports only 1D convolutions. However, 2D convolutions can be mapped to 1D when +a convolution kernel moves in a single direction. Initially, a limited subset of Intel® GNA 3.0 features are added to the previous feature set including: * **2D VALID Convolution With Small 2D Kernels:** Two-dimensional convolutions with the following kernel dimensions - [``H``,``W``] are supported: [1,1], [2,2], [3,3], [2,1], [3,1], [4,1], [5,1], [6,1], [7,1], [1,2], or [1,3]. - Input tensor dimensions are limited to [1,8,16,16] <= [``N``,``C``,``H``,``W``] <= [1,120,384,240]. Up to 384 ``C`` - channels may be used with a subset of kernel sizes (see the table below). Up to 256 kernels (output channels) - are supported. Pooling is limited to pool shapes of [1,1], [2,2], or [3,3]. Not all combinations of kernel + [``H``,``W``] are supported: [1,1], [2,2], [3,3], [2,1], [3,1], [4,1], [5,1], [6,1], [7,1], [1,2], or [1,3]. + Input tensor dimensions are limited to [1,8,16,16] <= [``N``,``C``,``H``,``W``] <= [1,120,384,240]. Up to 384 ``C`` + channels may be used with a subset of kernel sizes (see the table below). Up to 256 kernels (output channels) + are supported. Pooling is limited to pool shapes of [1,1], [2,2], or [3,3]. Not all combinations of kernel shape and input tensor shape are supported (see the tables below for exact limitations). -The tables below show that the exact limitation on the input tensor width W depends on the number of input channels +The tables below show that the exact limitation on the input tensor width W depends on the number of input channels *C* (indicated as *Ci* below) and the kernel shape. There is much more freedom to choose the input tensor height and number of output channels. -The following tables provide a more explicit representation of the Intel(R) GNA 3.0 2D convolution operations -initially supported. The limits depend strongly on number of input tensor channels (*Ci*) and the input tensor width (*W*). -Other factors are kernel height (*KH*), kernel width (*KW*), pool height (*PH*), pool width (*PW*), horizontal pool step (*SH*), -and vertical pool step (*PW*). For example, the first table shows that for a 3x3 kernel with max pooling, only square pools are supported, +The following tables provide a more explicit representation of the Intel(R) GNA 3.0 2D convolution operations +initially supported. The limits depend strongly on number of input tensor channels (*Ci*) and the input tensor width (*W*). +Other factors are kernel height (*KH*), kernel width (*KW*), pool height (*PH*), pool width (*PW*), horizontal pool step (*SH*), +and vertical pool step (*PW*). For example, the first table shows that for a 3x3 kernel with max pooling, only square pools are supported, and *W* is limited to 87 when there are 64 input channels. :download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i16) <../../../docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/assets/GNA_Maximum_Input_Tensor_Widths_i16.csv>` -:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i8) <../../../docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/assets/GNA_Maximum_Input_Tensor_Widths_i8.csv>` +:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i8) <../../../docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/assets/GNA_Maximum_Input_Tensor_Widths_i8.csv>` -.. note:: +.. note:: The above limitations only apply to the new hardware 2D convolution operation. For GNA 3.0, when possible, the Intel® GNA - plugin graph compiler flattens 2D convolutions so that the second generation Intel® GNA 1D convolution operations - (without these limitations) may be used. The plugin will also flatten 2D convolutions regardless of the sizes if GNA 2.0 + plugin graph compiler flattens 2D convolutions so that the second generation Intel® GNA 1D convolution operations + (without these limitations) may be used. The plugin will also flatten 2D convolutions regardless of the sizes if GNA 2.0 compilation target is selected (see below). Support for Convolutions since GNA 3.5 -------------------------------------------------------------------------------------------------------------------------------------- @@ -402,14 +394,14 @@ Support for 2D Convolutions using POT For POT to successfully work with the models including GNA3.0 2D convolutions, the following requirements must be met: * All convolution parameters are natively supported by HW (see tables above). -* The runtime precision is explicitly set by the ``ov::hint::inference_precision`` property as ``i8`` for the models produced by +* The runtime precision is explicitly set by the ``ov::hint::inference_precision`` property as ``i8`` for the models produced by the ``performance mode`` of POT, and as ``i16`` for the models produced by the ``accuracy mode`` of POT. Batch Size Limitation +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -Intel® GNA plugin supports processing of context-windowed speech frames in batches of 1-8 frames. +Intel® GNA plugin supports processing of context-windowed speech frames in batches of 1-8 frames. Refer to the :doc:`Layout API overview ` to determine batch dimension. To set the layout of model inputs in runtime, use the :doc:`Optimize Preprocessing ` guide: @@ -422,7 +414,7 @@ To set the layout of model inputs in runtime, use the :doc:`Optimize Preprocessi .. doxygensnippet:: docs/snippets/gna/set_batch.py :language: py :fragment: [import] - + .. doxygensnippet:: docs/snippets/gna/set_batch.py :language: py :fragment: [ov_gna_set_nc_layout] @@ -433,7 +425,7 @@ To set the layout of model inputs in runtime, use the :doc:`Optimize Preprocessi .. doxygensnippet:: docs/snippets/gna/set_batch.cpp :language: cpp :fragment: [include] - + .. doxygensnippet:: docs/snippets/gna/set_batch.cpp :language: cpp :fragment: [ov_gna_set_nc_layout] @@ -460,8 +452,8 @@ then set batch size: Increasing batch size only improves efficiency of ``MatMul`` layers. -.. note:: - +.. note:: + For models with ``Convolution``, ``LSTMCell``, ``GRUCell``, or ``ReadValue`` / ``Assign`` operations, the only supported batch size is 1. diff --git a/docs/sphinx_setup/_static/images/default_quantization_flow.svg b/docs/sphinx_setup/_static/images/default_quantization_flow.svg deleted file mode 100644 index d4de6dbbcc1164..00000000000000 --- a/docs/sphinx_setup/_static/images/default_quantization_flow.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:76808b81c147bdca7397a67cffcd347781b1dceb45d93c229a06e0423fe3055f -size 77646 diff --git a/docs/sphinx_setup/_static/images/workflow_simple.svg b/docs/sphinx_setup/_static/images/workflow_simple.svg deleted file mode 100644 index 93714e17321539..00000000000000 --- a/docs/sphinx_setup/_static/images/workflow_simple.svg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:84fc7114eef9ad310d72abc5d8f59b076d30031e0a42f18d518acc02e19bcc8d -size 59755 From 015ff79bf5443cb125edfc72a45f312d55af5759 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sat, 13 Jan 2024 09:55:06 +0400 Subject: [PATCH 33/43] Fixed API validator search (#22136) --- .../OpenVINODeveloperScriptsConfig.cmake | 2 ++ .../api_validator/api_validator.cmake | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake index fc9abc64b9e4cc..82b556cad6c5a6 100644 --- a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake +++ b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake @@ -206,6 +206,8 @@ set(CMAKE_POLICY_DEFAULT_CMP0025 NEW) set(CMAKE_POLICY_DEFAULT_CMP0026 NEW) # CMake 3.0+ (2.8.12): MacOS "@rpath" in target's install name set(CMAKE_POLICY_DEFAULT_CMP0042 NEW) +# CMake 3.1+: Simplify variable reference and escape sequence evaluation. +set(CMAKE_POLICY_DEFAULT_CMP0053 NEW) # CMake 3.9+: `RPATH` settings on macOS do not affect `install_name`. set(CMAKE_POLICY_DEFAULT_CMP0068 NEW) # CMake 3.12+: find_package() uses _ROOT variables. diff --git a/cmake/developer_package/api_validator/api_validator.cmake b/cmake/developer_package/api_validator/api_validator.cmake index 4eeb9e1e5e0b7e..090a8f84fbcc65 100644 --- a/cmake/developer_package/api_validator/api_validator.cmake +++ b/cmake/developer_package/api_validator/api_validator.cmake @@ -3,15 +3,15 @@ # if(WIN32) - set(PROGRAMFILES_ENV "ProgramFiles(X86)") + set(PROGRAMFILES_ENV "ProgramFiles\(X86\)") # check that PROGRAMFILES_ENV is defined, because in case of cross-compilation for Windows # we don't have such variable - if(DEFINED ENV{PROGRAMFILES_ENV}) + if(DEFINED ENV{${PROGRAMFILES_ENV}}) file(TO_CMAKE_PATH $ENV{${PROGRAMFILES_ENV}} PROGRAMFILES) set(WDK_PATHS "${PROGRAMFILES}/Windows Kits/10/bin/${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}/x64" - "${PROGRAMFILES}/Windows Kits/10/bin/x64") + "${PROGRAMFILES}/Windows Kits/10/bin/x64") message(STATUS "Trying to find apivalidator in: ") foreach(wdk_path IN LISTS WDK_PATHS) @@ -19,9 +19,9 @@ if(WIN32) endforeach() find_host_program(ONECORE_API_VALIDATOR - NAMES apivalidator - PATHS ${WDK_PATHS} - DOC "ApiValidator for OneCore compliance") + NAMES apivalidator + PATHS ${WDK_PATHS} + DOC "ApiValidator for OneCore compliance") if(ONECORE_API_VALIDATOR) message(STATUS "Found apivalidator: ${ONECORE_API_VALIDATOR}") From 0a8f1383826d949c497fe3d05fef9ad2b662fa7e Mon Sep 17 00:00:00 2001 From: Vishniakov Nikolai Date: Sat, 13 Jan 2024 09:33:49 +0100 Subject: [PATCH 34/43] [OV JS] Conditional enabling of JS API (#22139) * Disable js api building for vcpkg * Disable JS API by default * Add disable JS API conditions in features.cmake * Update cmake/features.cmake * Update src/bindings/js/CMakeLists.txt --------- Co-authored-by: Ilya Lavrenov --- cmake/features.cmake | 3 +++ src/bindings/js/CMakeLists.txt | 4 ++++ src/bindings/js/node/CMakeLists.txt | 4 ---- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/cmake/features.cmake b/cmake/features.cmake index 6e383edeeb695d..aadd1db976e3d0 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -177,6 +177,9 @@ ov_dependent_option (ENABLE_SYSTEM_SNAPPY "Enables use of system version of Snap ov_dependent_option (ENABLE_PYTHON_PACKAGING "Enables packaging of Python API in APT / YUM" OFF "ENABLE_PYTHON;UNIX" OFF) +ov_dependent_option (ENABLE_JS "Enables JS API building" ON + "NOT WIN32" OFF) + ov_option(ENABLE_OPENVINO_DEBUG "Enable output for OPENVINO_DEBUG statements" OFF) if(NOT BUILD_SHARED_LIBS AND ENABLE_OV_TF_FRONTEND) diff --git a/src/bindings/js/CMakeLists.txt b/src/bindings/js/CMakeLists.txt index 329a86c2fa6bee..20bf139a54a893 100644 --- a/src/bindings/js/CMakeLists.txt +++ b/src/bindings/js/CMakeLists.txt @@ -2,6 +2,10 @@ # SPDX-License-Identifier: Apache-2.0 # +if(NOT ENABLE_JS) + return() +endif() + project(OpenVINO_JS_API) add_subdirectory(node) diff --git a/src/bindings/js/node/CMakeLists.txt b/src/bindings/js/node/CMakeLists.txt index cc8918155f16d0..fffceb56799a96 100644 --- a/src/bindings/js/node/CMakeLists.txt +++ b/src/bindings/js/node/CMakeLists.txt @@ -2,10 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -if(WIN32) - return() -endif() - if(CMAKE_VERSION VERSION_LESS 3.14) message(WARNING "JS API is not available with CMake version less than 3.14, skipping") return() From c9738426168abc57b8b677979070443802afd5f4 Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Sat, 13 Jan 2024 12:54:14 +0400 Subject: [PATCH 35/43] Delete `ngraph` opsets (#22068) * Delete ngraph opset10-11 * Delete ngraph opset7 * Delete ngraph opset5 * Delete ngraph opset4 * Delete ngraph opset9 * Delete ngraph opset8 * ClangFormat * Delete ngraph opset6 * Delete NG opset3 usage * Delete ngraph opset1 --- src/core/include/ngraph/opsets/opset1.hpp | 25 ---------- src/core/include/ngraph/opsets/opset10.hpp | 25 ---------- .../include/ngraph/opsets/opset10_tbl.hpp | 12 ----- src/core/include/ngraph/opsets/opset11.hpp | 25 ---------- .../include/ngraph/opsets/opset11_tbl.hpp | 12 ----- src/core/include/ngraph/opsets/opset1_tbl.hpp | 43 ---------------- src/core/include/ngraph/opsets/opset2.hpp | 1 - src/core/include/ngraph/opsets/opset4.hpp | 25 ---------- src/core/include/ngraph/opsets/opset4_tbl.hpp | 12 ----- src/core/include/ngraph/opsets/opset5.hpp | 25 ---------- src/core/include/ngraph/opsets/opset5_tbl.hpp | 12 ----- src/core/include/ngraph/opsets/opset6.hpp | 25 ---------- src/core/include/ngraph/opsets/opset6_tbl.hpp | 12 ----- src/core/include/ngraph/opsets/opset7.hpp | 25 ---------- src/core/include/ngraph/opsets/opset7_tbl.hpp | 12 ----- src/core/include/ngraph/opsets/opset8.hpp | 25 ---------- src/core/include/ngraph/opsets/opset8_tbl.hpp | 12 ----- src/core/include/ngraph/opsets/opset9.hpp | 25 ---------- src/core/include/ngraph/opsets/opset9_tbl.hpp | 12 ----- src/frontends/onnx/frontend/src/op/gather.hpp | 8 +-- src/frontends/onnx/frontend/src/op/if.cpp | 4 +- src/frontends/onnx/frontend/src/op/lstm.cpp | 1 - .../src/op/mean_variance_normalization.cpp | 6 +-- .../onnx/frontend/src/op/qlinear_conv.cpp | 11 ++-- .../onnx/frontend/src/op/qlinear_matmul.cpp | 6 +-- .../onnx/frontend/src/op/random_uniform.cpp | 13 +++-- .../frontend/src/op/random_uniform_like.cpp | 13 +++-- .../onnx/frontend/src/op/roi_align.cpp | 50 +++++++++---------- .../onnx/frontend/src/op/softsign.cpp | 3 +- .../src/concat_reorder_inplace.cpp | 2 - .../single_layer_tests/depth_to_space.cpp | 1 - .../single_layer_tests/gather_elements.cpp | 1 - .../behavior/plugin/hetero_query_network.hpp | 1 - .../depth_to_space_transformation.cpp | 1 - .../shared_test_classes/single_layer/grn.hpp | 2 - .../single_layer/memory.hpp | 3 -- .../single_layer/prior_box.hpp | 2 - .../single_layer/prior_box_clustered.hpp | 2 - .../src/single_layer/adaptive_pooling.cpp | 2 - .../src/single_layer/eye.cpp | 1 - .../src/single_layer/memory.cpp | 9 ++-- .../src/single_layer/reverse.cpp | 2 - .../src/single_layer/roi_align.cpp | 3 -- .../src/subgraph/parameter_shapeof_result.cpp | 1 - 44 files changed, 60 insertions(+), 453 deletions(-) delete mode 100644 src/core/include/ngraph/opsets/opset1.hpp delete mode 100644 src/core/include/ngraph/opsets/opset10.hpp delete mode 100644 src/core/include/ngraph/opsets/opset10_tbl.hpp delete mode 100644 src/core/include/ngraph/opsets/opset11.hpp delete mode 100644 src/core/include/ngraph/opsets/opset11_tbl.hpp delete mode 100644 src/core/include/ngraph/opsets/opset1_tbl.hpp delete mode 100644 src/core/include/ngraph/opsets/opset4.hpp delete mode 100644 src/core/include/ngraph/opsets/opset4_tbl.hpp delete mode 100644 src/core/include/ngraph/opsets/opset5.hpp delete mode 100644 src/core/include/ngraph/opsets/opset5_tbl.hpp delete mode 100644 src/core/include/ngraph/opsets/opset6.hpp delete mode 100644 src/core/include/ngraph/opsets/opset6_tbl.hpp delete mode 100644 src/core/include/ngraph/opsets/opset7.hpp delete mode 100644 src/core/include/ngraph/opsets/opset7_tbl.hpp delete mode 100644 src/core/include/ngraph/opsets/opset8.hpp delete mode 100644 src/core/include/ngraph/opsets/opset8_tbl.hpp delete mode 100644 src/core/include/ngraph/opsets/opset9.hpp delete mode 100644 src/core/include/ngraph/opsets/opset9_tbl.hpp diff --git a/src/core/include/ngraph/opsets/opset1.hpp b/src/core/include/ngraph/opsets/opset1.hpp deleted file mode 100644 index 42b3236287c470..00000000000000 --- a/src/core/include/ngraph/opsets/opset1.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset1 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset1_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset1 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset10.hpp b/src/core/include/ngraph/opsets/opset10.hpp deleted file mode 100644 index 66b248147aedeb..00000000000000 --- a/src/core/include/ngraph/opsets/opset10.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset10 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset10_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset10 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset10_tbl.hpp b/src/core/include/ngraph/opsets/opset10_tbl.hpp deleted file mode 100644 index f596071fa6e0b9..00000000000000 --- a/src/core/include/ngraph/opsets/opset10_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset10_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/opsets/opset11.hpp b/src/core/include/ngraph/opsets/opset11.hpp deleted file mode 100644 index a4a36bd2fa2e86..00000000000000 --- a/src/core/include/ngraph/opsets/opset11.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset11 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset11_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset11 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset11_tbl.hpp b/src/core/include/ngraph/opsets/opset11_tbl.hpp deleted file mode 100644 index c815946ecfd42c..00000000000000 --- a/src/core/include/ngraph/opsets/opset11_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset11_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/opsets/opset1_tbl.hpp b/src/core/include/ngraph/opsets/opset1_tbl.hpp deleted file mode 100644 index 955a5311d0c397..00000000000000 --- a/src/core/include/ngraph/opsets/opset1_tbl.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// This collection contains one entry for each op. If an op is added it must be -// added to this list. -// -// In order to use this list you want to define a macro named exactly NGRAPH_OP -// When you are done you should undef the macro -// As an example if you wanted to make a list of all op names as strings you could do this: -// -// #define NGRAPH_OP(a,b) #a, -// std::vector op_names{ -// #include "this include file name" -// }; -// #undef NGRAPH_OP -// -// This sample expands to a list like this: -// "Abs", -// "Acos", -// ... -// -// #define NGRAPH_OP(a,b) b::a, -// std::vector op_names{ -// #include "this include file name" -// }; -// #undef NGRAPH_OP -// -// This sample expands to a list like this: -// ngraph::op::Abs, -// ngraph::op::Acos, -// ... -// -// It's that easy. You can use this for fun and profit. - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset1_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/opsets/opset2.hpp b/src/core/include/ngraph/opsets/opset2.hpp index 898c8fdfc5c1b2..d2f09479f941a2 100644 --- a/src/core/include/ngraph/opsets/opset2.hpp +++ b/src/core/include/ngraph/opsets/opset2.hpp @@ -15,7 +15,6 @@ #endif #include "ngraph/ops.hpp" -#include "ngraph/opsets/opset1.hpp" namespace ngraph { namespace opset2 { diff --git a/src/core/include/ngraph/opsets/opset4.hpp b/src/core/include/ngraph/opsets/opset4.hpp deleted file mode 100644 index 14cb115f0889f8..00000000000000 --- a/src/core/include/ngraph/opsets/opset4.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset4 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset4_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset4 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset4_tbl.hpp b/src/core/include/ngraph/opsets/opset4_tbl.hpp deleted file mode 100644 index 2001838ce9cc43..00000000000000 --- a/src/core/include/ngraph/opsets/opset4_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset4_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/opsets/opset5.hpp b/src/core/include/ngraph/opsets/opset5.hpp deleted file mode 100644 index 48cc80ef87cbb6..00000000000000 --- a/src/core/include/ngraph/opsets/opset5.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset5 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset5_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset5 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset5_tbl.hpp b/src/core/include/ngraph/opsets/opset5_tbl.hpp deleted file mode 100644 index bfd1d93357e981..00000000000000 --- a/src/core/include/ngraph/opsets/opset5_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset5_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/opsets/opset6.hpp b/src/core/include/ngraph/opsets/opset6.hpp deleted file mode 100644 index 29fbc43292681a..00000000000000 --- a/src/core/include/ngraph/opsets/opset6.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset6 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset6_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset6 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset6_tbl.hpp b/src/core/include/ngraph/opsets/opset6_tbl.hpp deleted file mode 100644 index 26d64306f3224f..00000000000000 --- a/src/core/include/ngraph/opsets/opset6_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset6_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/opsets/opset7.hpp b/src/core/include/ngraph/opsets/opset7.hpp deleted file mode 100644 index 4ca9e2b94d3698..00000000000000 --- a/src/core/include/ngraph/opsets/opset7.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset7 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset7_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset7 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset7_tbl.hpp b/src/core/include/ngraph/opsets/opset7_tbl.hpp deleted file mode 100644 index 36c535fda2bce6..00000000000000 --- a/src/core/include/ngraph/opsets/opset7_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset7_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/opsets/opset8.hpp b/src/core/include/ngraph/opsets/opset8.hpp deleted file mode 100644 index 5b3e634d6fe071..00000000000000 --- a/src/core/include/ngraph/opsets/opset8.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset8 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset8_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset8 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset8_tbl.hpp b/src/core/include/ngraph/opsets/opset8_tbl.hpp deleted file mode 100644 index a707e4f861e17e..00000000000000 --- a/src/core/include/ngraph/opsets/opset8_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset8_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/core/include/ngraph/opsets/opset9.hpp b/src/core/include/ngraph/opsets/opset9.hpp deleted file mode 100644 index 78e5e5726d47a3..00000000000000 --- a/src/core/include/ngraph/opsets/opset9.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/ops.hpp" - -namespace ngraph { -namespace opset9 { -#define NGRAPH_OP(a, b) using b::a; -#include "ngraph/opsets/opset9_tbl.hpp" -#undef NGRAPH_OP -} // namespace opset9 -} // namespace ngraph diff --git a/src/core/include/ngraph/opsets/opset9_tbl.hpp b/src/core/include/ngraph/opsets/opset9_tbl.hpp deleted file mode 100644 index f77e340a516cd6..00000000000000 --- a/src/core/include/ngraph/opsets/opset9_tbl.hpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef NGRAPH_OP -# warning "NGRAPH_OP not defined" -# define NGRAPH_OP(x, y) -#endif - -#define _OPENVINO_OP_REG NGRAPH_OP -#include "openvino/opsets/opset9_tbl.hpp" -#undef _OPENVINO_OP_REG diff --git a/src/frontends/onnx/frontend/src/op/gather.hpp b/src/frontends/onnx/frontend/src/op/gather.hpp index 330e8fc434d880..15f826f5f809b6 100644 --- a/src/frontends/onnx/frontend/src/op/gather.hpp +++ b/src/frontends/onnx/frontend/src/op/gather.hpp @@ -10,9 +10,9 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include #include "ngraph/node.hpp" -#include "ngraph/opsets/opset8.hpp" #include "ngraph/validation_util.hpp" #include "onnx_import/core/node.hpp" +#include "openvino/op/gather.hpp" namespace ngraph { namespace onnx_import { @@ -24,9 +24,9 @@ inline OutputVector gather(const Node& node) { auto indices = ng_inputs.at(1); auto axis = node.get_attribute_value("axis", 0); - return {std::make_shared(data, - indices, - default_opset::Constant::create(element::i64, Shape{}, {axis}))}; + return {std::make_shared(data, + indices, + default_opset::Constant::create(element::i64, Shape{}, {axis}))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/if.cpp b/src/frontends/onnx/frontend/src/op/if.cpp index 2e1cdf21f217ed..5c50eca7ffedc8 100644 --- a/src/frontends/onnx/frontend/src/op/if.cpp +++ b/src/frontends/onnx/frontend/src/op/if.cpp @@ -6,8 +6,8 @@ #include "core/graph.hpp" #include "ngraph/node.hpp" -#include "ngraph/opsets/opset8.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/if.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -30,7 +30,7 @@ OutputVector if_op(const Node& node) { auto else_branch = std::make_shared(else_subgraph->get_ov_outputs(), else_params, else_subgraph->get_name()); - auto if_node = std::make_shared(ng_inputs.at(0)); + auto if_node = std::make_shared(ng_inputs.at(0)); if_node->set_then_body(then_branch); if_node->set_else_body(else_branch); diff --git a/src/frontends/onnx/frontend/src/op/lstm.cpp b/src/frontends/onnx/frontend/src/op/lstm.cpp index f8255346b9cd39..495d34f119dc6f 100644 --- a/src/frontends/onnx/frontend/src/op/lstm.cpp +++ b/src/frontends/onnx/frontend/src/op/lstm.cpp @@ -18,7 +18,6 @@ #include "ngraph/op/constant.hpp" #include "ngraph/op/lstm_sequence.hpp" #include "ngraph/op/util/attr_types.hpp" -#include "ngraph/opsets/opset3.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "onnx_import/core/null_node.hpp" diff --git a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp index 6a1dca496f6d2c..6c94dabce3be4d 100644 --- a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp @@ -10,7 +10,6 @@ #include "default_opset.hpp" #include "ngraph/axis_set.hpp" #include "ngraph/op/mvn.hpp" -#include "ngraph/opsets/opset5.hpp" #include "ngraph/validation_util.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -23,7 +22,7 @@ OutputVector mean_variance_normalization(const Node& node) { bool across_channels = node.get_attribute_value("across_channels", 0); bool normalize_variance = node.get_attribute_value("normalize_variance", 1); - return {std::make_shared(data, across_channels, normalize_variance)}; + return {std::make_shared(data, across_channels, normalize_variance)}; } } // namespace set_1 @@ -37,8 +36,7 @@ OutputVector mean_variance_normalization(const Node& node) { ngraph::normalize_axes(node.get_description(), axes, data.get_partial_shape().rank()); OPENVINO_SUPPRESS_DEPRECATED_END auto const_axes = default_opset::Constant::create(element::i64, Shape{normalized_axes.size()}, normalized_axes); - return { - std::make_shared(data, const_axes, true, 1e-09f, ngraph::op::MVNEpsMode::OUTSIDE_SQRT)}; + return {std::make_shared(data, const_axes, true, 1e-09f, ngraph::op::MVNEpsMode::OUTSIDE_SQRT)}; } } // namespace set_9 diff --git a/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp b/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp index 2fe98f98fcb2b5..91dd6ff10f4440 100644 --- a/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp +++ b/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp @@ -14,8 +14,9 @@ #include "conv.hpp" #include "dequantize_linear.hpp" #include "exceptions.hpp" -#include "ngraph/opsets/opset6.hpp" #include "onnx_import/core/null_node.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/multiply.hpp" #include "quantize_linear.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -38,18 +39,18 @@ OutputVector qlinear_conv(const Node& node) { x = set_13::detail::dequantize_linear(x, x_scale, - std::make_shared(x_zero_point, element::f32), + std::make_shared(x_zero_point, element::f32), 1, node)[0]; w = set_13::detail::dequantize_linear(w, w_scale, - std::make_shared(w_zero_point, element::f32), + std::make_shared(w_zero_point, element::f32), 1, node)[0]; if (!ov::op::util::is_null(B)) { - B = std::make_shared(std::make_shared(B, x_scale.get_element_type()), - std::make_shared(x_scale, w_scale)) + B = std::make_shared(std::make_shared(B, x_scale.get_element_type()), + std::make_shared(x_scale, w_scale)) ->output(0); } diff --git a/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp b/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp index c6f0077e7cbac8..67dde8b5b8908a 100644 --- a/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp +++ b/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp @@ -10,7 +10,7 @@ #include "dequantize_linear.hpp" #include "matmul.hpp" -#include "ngraph/opsets/opset6.hpp" +#include "openvino/op/convert.hpp" #include "quantize_linear.hpp" #include "utils/reshape.hpp" @@ -34,13 +34,13 @@ OutputVector qlinear_matmul(const Node& node) { const auto& dequnatize_a = set_13::detail::dequantize_linear(a, a_scale, - std::make_shared(a_zero_point, element::f32), + std::make_shared(a_zero_point, element::f32), 1, node); const auto& dequnatize_b = set_13::detail::dequantize_linear(b, b_scale, - std::make_shared(b_zero_point, element::f32), + std::make_shared(b_zero_point, element::f32), 1, node); diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.cpp b/src/frontends/onnx/frontend/src/op/random_uniform.cpp index e07ddcc1c08585..95ab25b8f79470 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.cpp @@ -7,7 +7,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/opsets/opset8.hpp" #include "ngraph/shape.hpp" #include "utils/common.hpp" @@ -32,12 +31,12 @@ OutputVector random_uniform(const Node& node) { // TODO: This multiplication leads to a mismatch in accuracy. Issue: 123003 const auto seed_uint64 = static_cast(seed * 1000); - return {std::make_shared(target_shape_const, - low_const, - high_const, - target_type, - global_seed, - seed_uint64)}; + return {std::make_shared(target_shape_const, + low_const, + high_const, + target_type, + global_seed, + seed_uint64)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp index d3768f3127e5ae..6fbaba619cf5dc 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp @@ -7,7 +7,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/opsets/opset8.hpp" #include "ngraph/shape.hpp" #include "utils/common.hpp" @@ -38,12 +37,12 @@ OutputVector random_uniform_like(const Node& node) { const uint64_t global_seed = 0; const auto seed_uint64 = static_cast(seed * 1000); - return {std::make_shared(target_shape, - low_const, - high_const, - target_type, - global_seed, - seed_uint64)}; + return {std::make_shared(target_shape, + low_const, + high_const, + target_type, + global_seed, + seed_uint64)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/roi_align.cpp b/src/frontends/onnx/frontend/src/op/roi_align.cpp index fe006258c45fd3..fbdb77e0246e3a 100644 --- a/src/frontends/onnx/frontend/src/op/roi_align.cpp +++ b/src/frontends/onnx/frontend/src/op/roi_align.cpp @@ -6,8 +6,8 @@ #include -#include "ngraph/opsets/opset9.hpp" #include "openvino/frontend/exception.hpp" +#include "openvino/op/roi_align.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -28,18 +28,18 @@ OutputVector roi_align(const Node& node) { const auto sampling_ratio = static_cast(node.get_attribute_value("sampling_ratio", 1)); const auto spatial_scale = node.get_attribute_value("spatial_scale", 1.0f); const auto mode = node.get_attribute_value("mode", "avg"); - const auto pooling_mode = EnumNames::as_enum(mode); - const auto aligned_mode = opset9::ROIAlign::AlignedMode::ASYMMETRIC; // Compatible up to ONNX-opset16 - - return {std::make_shared(data, - rois, - num_rois, - pooled_h, - pooled_w, - sampling_ratio, - spatial_scale, - pooling_mode, - aligned_mode)}; + const auto pooling_mode = EnumNames::as_enum(mode); + const auto aligned_mode = ov::op::v9::ROIAlign::AlignedMode::ASYMMETRIC; // Compatible up to ONNX-opset16 + + return {std::make_shared(data, + rois, + num_rois, + pooled_h, + pooled_w, + sampling_ratio, + spatial_scale, + pooling_mode, + aligned_mode)}; } } // namespace set_1 namespace set_16 { @@ -57,25 +57,25 @@ OutputVector roi_align(const Node& node) { const auto sampling_ratio = node.get_attribute_value("sampling_ratio", 1); const auto spatial_scale = node.get_attribute_value("spatial_scale", 1.0f); const auto mode = node.get_attribute_value("mode", "avg"); - const auto pooling_mode = EnumNames::as_enum(mode); + const auto pooling_mode = EnumNames::as_enum(mode); const auto coordinate_transformation_mode = node.get_attribute_value("coordinate_transformation_mode", ""); - auto aligned_mode = opset9::ROIAlign::AlignedMode::HALF_PIXEL_FOR_NN; // Match ONNX ROIAlign-16 default + auto aligned_mode = ov::op::v9::ROIAlign::AlignedMode::HALF_PIXEL_FOR_NN; // Match ONNX ROIAlign-16 default if (coordinate_transformation_mode == "output_half_pixel") { - aligned_mode = opset9::ROIAlign::AlignedMode::ASYMMETRIC; + aligned_mode = ov::op::v9::ROIAlign::AlignedMode::ASYMMETRIC; } - return {std::make_shared(data, - rois, - num_rois, - static_cast(pooled_h), - static_cast(pooled_w), - static_cast(sampling_ratio), - spatial_scale, - pooling_mode, - aligned_mode)}; + return {std::make_shared(data, + rois, + num_rois, + static_cast(pooled_h), + static_cast(pooled_w), + static_cast(sampling_ratio), + spatial_scale, + pooling_mode, + aligned_mode)}; } } // namespace set_16 diff --git a/src/frontends/onnx/frontend/src/op/softsign.cpp b/src/frontends/onnx/frontend/src/op/softsign.cpp index 6ddee06bfe936b..c6fd91a190b111 100644 --- a/src/frontends/onnx/frontend/src/op/softsign.cpp +++ b/src/frontends/onnx/frontend/src/op/softsign.cpp @@ -8,7 +8,6 @@ #include #include "default_opset.hpp" -#include "ngraph/opsets/opset9.hpp" #include "ngraph/shape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -17,7 +16,7 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector softsign(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 } // namespace op diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reorder_inplace.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reorder_inplace.cpp index 72492463da3e46..e72590de5e48d4 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reorder_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reorder_inplace.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "common_test_utils/node_builders/constant.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp index ca7ac19ac93d11..dfa44b5758a4ea 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp @@ -3,7 +3,6 @@ // #include -#include #include "single_op_tests/depth_to_space.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp index ceb5b433de6a54..a27aa05c4f9f69 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gather_elements.cpp @@ -3,7 +3,6 @@ // #include -#include #include "single_op_tests/gather_elements.hpp" #include "common_test_utils/test_constants.hpp" diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp index c34bbc44c0a21e..bbe9239e439d93 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/hetero_query_network.hpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_common.hpp" -#include #include using namespace InferenceEngine; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index 84a35de492b7f6..ef1348d619ce46 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -19,7 +19,6 @@ #include "ov_models/builders.hpp" #include -#include #include #include #include diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp index 585f08f0beaa96..2574eef28c6f7e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp @@ -17,8 +17,6 @@ #include "ie_core.hpp" #include "ie_precision.hpp" -#include "ngraph/opsets/opset1.hpp" - #include "functional_test_utils/blob_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/common_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp index d39931e79366a8..01a4e22b9e98fb 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp @@ -9,9 +9,6 @@ #include #include -#include "ngraph/opsets/opset6.hpp" -#include "ngraph/opsets/opset3.hpp" - #include "shared_test_classes/base/layer_test_utils.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp index b1e752c12bae59..218fb3028f67e0 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp @@ -17,8 +17,6 @@ #include "ie_core.hpp" #include "ie_precision.hpp" -#include "ngraph/opsets/opset1.hpp" - #include "functional_test_utils/blob_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/common_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp index b712b4c9a09b75..60642609388e4a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp @@ -17,8 +17,6 @@ #include "ie_core.hpp" #include "ie_precision.hpp" -#include "ngraph/opsets/opset1.hpp" - #include "functional_test_utils/blob_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "common_test_utils/common_utils.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp index cb5019388da146..e9bbfaea2d32b5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "ov_models/builders.hpp" #include "common_test_utils/node_builders/constant.hpp" #include "shared_test_classes/single_layer/adaptive_pooling.hpp" diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp index 484a010da483f3..95105c34b9a91a 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp @@ -4,7 +4,6 @@ #include "shared_test_classes/single_layer/eye.hpp" #include -#include #include #include diff --git a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp index 15a3034e4a1d4b..cec0846756b65b 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp @@ -10,13 +10,14 @@ #include #include -#include "ngraph/opsets/opset7.hpp" #include "ngraph/pass/low_latency.hpp" #include "openvino/op/util/variable_context.hpp" #include "ov_models/builders.hpp" using namespace ngraph; -using namespace opset7; +using ov::op::v1::Add; +using ov::op::v0::TensorIterator; +using ov::op::v0::Result; namespace LayerTestsDefinitions { @@ -191,9 +192,9 @@ void MemoryTest::CreateCommonFunc() { : VariableInfo{inputShape, ngPrc, "v0"}; auto variable = std::make_shared(variable_info); auto read_value = CreateReadValueOp(param.at(0), variable); - auto add = std::make_shared(read_value, param.at(0)); + auto add = std::make_shared(read_value, param.at(0)); auto assign = CreateAssignOp(add, variable); - auto res = std::make_shared(add); + auto res = std::make_shared(add); function = std::make_shared(ResultVector{res}, SinkVector{assign}, param, "TestMemory"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp index b6f506092b16c0..eedb35fe746ac2 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp @@ -4,8 +4,6 @@ #include "shared_test_classes/single_layer/reverse.hpp" -#include - #include "ov_models/builders.hpp" using namespace InferenceEngine; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp index 683246fc841970..87e02e82dd7f70 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp @@ -4,9 +4,6 @@ #include "shared_test_classes/single_layer/roi_align.hpp" -#include -#include - #include "ov_models/builders.hpp" #include "openvino/core/enum_names.hpp" diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp index c68222efcf659b..c852fefdb401b7 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp @@ -5,7 +5,6 @@ #include "shared_test_classes/subgraph/parameter_shapeof_result.hpp" #include -#include namespace SubgraphTestsDefinitions { From e7791d45496542d647be4dd297ed5f7a9e89a503 Mon Sep 17 00:00:00 2001 From: Vishniakov Nikolai Date: Sat, 13 Jan 2024 10:09:37 +0100 Subject: [PATCH 36/43] Avoid DOWNLOAD_EXTRACT_TIMESTAMP warning (#22135) * Avoid DOWNLOAD_EXTRACT_TIMESTAMP warning * Change applying policy condition Co-authored-by: Ilya Lavrenov --------- Co-authored-by: Ilya Lavrenov --- CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 82277e5c875cfb..549f7c40a2a6e8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,6 +28,11 @@ if(POLICY CMP0091) cmake_policy(SET CMP0091 NEW) # Enables use of MSVC_RUNTIME_LIBRARY endif() +# Avoid warning about DOWNLOAD_EXTRACT_TIMESTAMP in CMake 3.24: +if(POLICY CMP0135) + cmake_policy(SET CMP0135 NEW) +endif() + project(OpenVINO DESCRIPTION "OpenVINO toolkit") find_package(OpenVINODeveloperScripts REQUIRED From f7849319d6aa927c8dcad76db7cbe5e8e7a46e34 Mon Sep 17 00:00:00 2001 From: Steve Yoo Date: Sun, 14 Jan 2024 09:06:49 +0900 Subject: [PATCH 37/43] [GPU] Intial update for CTCGreedyDecoderSeqLen dynamic shape and mutiple outputs support (#21564) * [GPU] Intial update for CTCGreedyDecoderSeqLen dynamic shape and mutiple outputs support (#21564) - primitve API update for dynamic shape - add shape inference test cases * Add functional tests for multiple outputs in dynamic shapes * Added dynamic shape supports and functional tests for CTCGreedyDecoder * Fix reshape_inst to access intended input layout * Add methods of load/save/hash of the primitive (#21564) * Remove output idx update part to make it another PR (#21564) * Fix JitConstants to avoid confusion * Update macros and remove use_multiple_outputs (#21564) * Update to use #elif (#21564) --- .../primitives/ctc_greedy_decoder.hpp | 17 +- .../src/graph/ctc_greedy_decoder.cpp | 41 ++++ .../graph/impls/ocl/ctc_greedy_decoder.cpp | 26 ++- .../graph/include/ctc_greedy_decoder_inst.h | 2 + .../cl_kernels/ctc_greedy_decoder_ref.cl | 10 +- .../ctc_greedy_decoder_kernel_base.cpp | 16 +- .../src/plugin/ops/ctc_greedy_decoder.cpp | 144 ++++++++----- .../ctc_greedy_decoder_seq_len.cpp | 2 +- .../dynamic/ctc_greedy_decoder.cpp | 116 +++++++++++ .../dynamic/ctc_greedy_decoder_seq_len.cpp | 190 ++++++++++++++++++ .../ctc_greedy_decoder_seq_len_si_test.cpp | 101 ++++++++++ 11 files changed, 604 insertions(+), 61 deletions(-) create mode 100644 src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp create mode 100644 src/plugins/intel_gpu/tests/unit/shape_infer/ctc_greedy_decoder_seq_len_si_test.cpp diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/ctc_greedy_decoder.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/ctc_greedy_decoder.hpp index 2cf19ca58cc44f..293a646cc64427 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/ctc_greedy_decoder.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/ctc_greedy_decoder.hpp @@ -29,7 +29,22 @@ struct ctc_greedy_decoder : public primitive_base { , ctc_merge_repeated(ctc_merge_repeated) , output_tensor(output_tensor) {} - uint32_t blank_index; + /// @brief Constructs ctc_greedy_decoder primitive. + /// @param id This primitive id. + /// @param input Input primitive id (input, sequence_indicators, blank_index(optional)). + /// @param ctc_merge_repeated Flag for merging repeated labels during the CTC calculation + ctc_greedy_decoder(const primitive_id& id, + const std::vector& inputs, + const uint32_t blank_index, + const bool ctc_merge_repeated, + const padding& output_padding = padding(), + data_types output_data_type = data_types::i32, + const size_t num_outputs = 1) + : primitive_base(id, inputs, {output_padding}, {optional_data_type{output_data_type}}, num_outputs) + , blank_index(blank_index) + , ctc_merge_repeated(ctc_merge_repeated) {} + + uint32_t blank_index = UINT32_MAX; bool ctc_merge_repeated = false; tensor output_tensor; primitive_id second_output; diff --git a/src/plugins/intel_gpu/src/graph/ctc_greedy_decoder.cpp b/src/plugins/intel_gpu/src/graph/ctc_greedy_decoder.cpp index f43c2131948363..a22abf5dde0874 100644 --- a/src/plugins/intel_gpu/src/graph/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_gpu/src/graph/ctc_greedy_decoder.cpp @@ -7,6 +7,9 @@ #include "json_object.h" #include +#include "ctc_greedy_decoder_seq_len_shape_inference.hpp" +#include "ctc_greedy_decoder_shape_inference.hpp" + namespace cldnn { GPU_DEFINE_PRIMITIVE_TYPE_ID(ctc_greedy_decoder) @@ -18,6 +21,44 @@ layout ctc_greedy_decoder_inst::calc_output_layout(ctc_greedy_decoder_node const return layout(output_type, input_node_layout.format, prim->output_tensor); } +template +std::vector ctc_greedy_decoder_inst::calc_output_layouts(ctc_greedy_decoder_node const& /*node*/, const kernel_impl_params& impl_param) { + std::vector layouts; + + auto desc = impl_param.typed_desc(); + + std::vector input_shapes; + for (size_t i = 0; i < desc->input.size(); ++i) { + auto input_shape = impl_param.get_input_layout(i).get(); + input_shapes.push_back(input_shape); + } + + if (desc->num_outputs == 1) { + ov::op::v0::CTCGreedyDecoder op; + + std::vector output_shapes = ov::op::v0::shape_infer(&op, input_shapes); + + auto dt = desc->get_output_data_type(0).value_or(impl_param.get_input_layout(0).data_type); + layouts.push_back({output_shapes[0], dt, format::get_default_format(output_shapes[0].size())}); + + } else { + ov::op::v6::CTCGreedyDecoderSeqLen op; + + std::vector output_shapes = ov::op::v6::shape_infer(&op, input_shapes); + + for (size_t i = 0; i < desc->num_outputs; ++i) { + auto dt = desc->get_output_data_type(i).value_or(impl_param.get_input_layout(i).data_type); + layouts.push_back({output_shapes[i], dt, format::get_default_format(output_shapes[i].size())}); + } + } + + return layouts; +} + +template std::vector +ctc_greedy_decoder_inst::calc_output_layouts(ctc_greedy_decoder_node const& node, + const kernel_impl_params& impl_param); + std::string ctc_greedy_decoder_inst::to_string(ctc_greedy_decoder_node const& node) { auto node_info = node.desc_to_json(); auto desc = node.get_primitive(); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/ctc_greedy_decoder.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/ctc_greedy_decoder.cpp index 3d00be5740b7e8..06ae2c91984d6f 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/ctc_greedy_decoder.cpp @@ -31,12 +31,30 @@ struct ctc_greedy_decoder_impl : typed_primitive_impl_ocl { auto has_second_output = !primitive->second_output.empty(); params.inputs.push_back(convert_data_tensor(impl_param.input_layouts[1])); params.merge_repeated = primitive->ctc_merge_repeated; - params.blank_index = primitive->blank_index; - params.outputs_num = has_second_output ? 2 : 1; - if (params.outputs_num == 2) { - params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1))); + bool allow_new_shape_infer = impl_param.get_program().get_config().get_property(ov::intel_gpu::allow_new_shape_infer); + if (allow_new_shape_infer && primitive->num_outputs == 2) { + if (primitive->blank_index == UINT32_MAX) { + params.blank_index = impl_param.get_input_layout(0).spatial(1) - 1; + } else { + params.blank_index = primitive->blank_index; + } + params.outputs_num = 2; + params.outputs.push_back(convert_data_tensor(impl_param.get_output_layout(1))); + + } else { + if (primitive->blank_index == UINT32_MAX) { + params.blank_index = impl_param.get_input_layout(0).spatial(1) - 1; + } else { + params.blank_index = primitive->blank_index; + } + params.outputs_num = has_second_output ? 2 : 1; + + if (params.outputs_num == 2) { + params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1))); + } } + return {params, optional_params}; } }; diff --git a/src/plugins/intel_gpu/src/graph/include/ctc_greedy_decoder_inst.h b/src/plugins/intel_gpu/src/graph/include/ctc_greedy_decoder_inst.h index 899e1f3bd0fa5a..2480c24b5b24ff 100644 --- a/src/plugins/intel_gpu/src/graph/include/ctc_greedy_decoder_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/ctc_greedy_decoder_inst.h @@ -32,6 +32,8 @@ class typed_primitive_inst : public typed_primitive_inst_bas using parent::parent; public: + template + static std::vector calc_output_layouts(ctc_greedy_decoder_node const& /*node*/, const kernel_impl_params& impl_param); static layout calc_output_layout(ctc_greedy_decoder_node const& node, kernel_impl_params const& impl_param); static std::string to_string(ctc_greedy_decoder_node const& node); diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/ctc_greedy_decoder_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/ctc_greedy_decoder_ref.cl index 67cdb55b455b89..b33da4d11fe489 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/ctc_greedy_decoder_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/ctc_greedy_decoder_ref.cl @@ -7,8 +7,10 @@ KERNEL(ctc_greedy_decoder_ref)(const __global INPUT0_TYPE* probabilities ,const __global INPUT1_TYPE* sequence_indicators ,__global OUTPUT_TYPE* output_sequences -#ifdef SECOND_OUTPUT_EXIST +#ifdef LEGACY_MULTIPLE_OUTPUTS ,__global INPUT2_TYPE* second_output +#elif NEW_MULTIPLE_OUTPUTS + ,__global OUTPUT1_TYPE* second_output #endif ) { @@ -23,13 +25,13 @@ KERNEL(ctc_greedy_decoder_ref)(const __global INPUT0_TYPE* probabilities for (int t = 0; t < T_; ++t) { // get maximum probability and its index -#ifdef SECOND_OUTPUT_EXIST +#if defined LEGACY_MULTIPLE_OUTPUTS || defined NEW_MULTIPLE_OUTPUTS if (t >= sequence_indicators[n]) break; #else if (sequence_indicators[t * N_ + n] == 0) break; #endif int max_class_idx = 0; -#ifdef SECOND_OUTPUT_EXIST +#if defined LEGACY_MULTIPLE_OUTPUTS || defined NEW_MULTIPLE_OUTPUTS const __global INPUT0_TYPE* probs = probabilities + n * C_ * T_ + t * C_; #else const __global INPUT0_TYPE* probs = probabilities + t * C_ * N_ + n * C_; @@ -51,7 +53,7 @@ KERNEL(ctc_greedy_decoder_ref)(const __global INPUT0_TYPE* probabilities prev_class_idx = max_class_idx; } -#ifdef SECOND_OUTPUT_EXIST +#if defined LEGACY_MULTIPLE_OUTPUTS || defined NEW_MULTIPLE_OUTPUTS second_output[n] = output_index - n * T_; #endif } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.cpp index 9b9e4c65c60b98..fa18afc340bc99 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.cpp @@ -18,8 +18,16 @@ JitConstants CTCGreedyDecoderKernelBase::GetJitConstants(const ctc_greedy_decode }); if (params.outputs_num == 2) { + if (params.inputs.size() == 3) { + jit.AddConstants({ + MakeJitConstant("LEGACY_MULTIPLE_OUTPUTS", 1) + }); + } else { + jit.AddConstants({ + MakeJitConstant("NEW_MULTIPLE_OUTPUTS", 1) + }); + } jit.AddConstants({ - MakeJitConstant("SECOND_OUTPUT_EXIST", 1), MakeJitConstant("N_", inp.Batch().v), MakeJitConstant("T_", inp.Feature().v) }); @@ -73,7 +81,11 @@ KernelsData CTCGreedyDecoderKernelBase::GetCommonKernelsData(const Params& param GetFusedPrimitiveInputsCount(params)); if (orgParams.outputs_num == 2) { - kernel.params.arguments.push_back({ArgumentDescriptor::Types::INPUT, 2}); + if (orgParams.inputs.size() == 3) { + kernel.params.arguments.push_back({ArgumentDescriptor::Types::INPUT, 2}); + } else { + kernel.params.arguments.push_back({ArgumentDescriptor::Types::OUTPUT, 1}); + } } return {kd}; diff --git a/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp b/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp index 5558f7be3e61f8..1535170b64c5fe 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp @@ -43,67 +43,113 @@ static void CreateCommonCTCGreedyDecoderOp(ProgramBuilder& p, const std::shared_ } } - uint32_t blank_index = static_cast(op->get_input_shape(0).back() - 1); - if (reordered_inputs.size() == 3) { - auto blank_index_node = std::dynamic_pointer_cast(op->get_input_node_shared_ptr(2)); - if (!blank_index_node) { - OPENVINO_THROW("Unsupported blank_index node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); + if (p.use_new_shape_infer()) { + size_t num_outputs = op->get_output_size(); + + auto get_output_paddings = [&]() { + std::vector output_paddings; + for (size_t i = 0; i < num_outputs; i++) + output_paddings.push_back(cldnn::padding()); + return output_paddings; + }; + + auto get_output_data_types = [&]() { + std::vector output_data_types; + for (size_t i = 0; i < num_outputs; i++) { + auto type = op->get_output_element_type(i); + output_data_types.push_back(cldnn::element_type_to_data_type(type)); + } + return output_data_types; + }; + + uint32_t blank_index = UINT32_MAX; + if (reordered_inputs.size() == 3) { + auto blank_index_node = std::dynamic_pointer_cast(op->get_input_node_shared_ptr(2)); + if (!blank_index_node) { + OPENVINO_THROW("Unsupported blank_index node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); + } + float val; + if (ov::shape_size(blank_index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(blank_index_node, val)) { + OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); + } + blank_index = static_cast(val); + reordered_inputs.pop_back(); } - float val; - if (ov::shape_size(blank_index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(blank_index_node, val)) { - OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); + + auto primitive = cldnn::ctc_greedy_decoder( + layer_type_name_ID(op), + reordered_inputs, + blank_index, + ctc_merge_repeated, + cldnn::padding({0, 0, 0, 0}, 0), + cldnn::element_type_to_data_type(op->get_output_element_type(0)), + op->get_output_size()); + primitive.output_paddings = get_output_paddings(); + primitive.output_data_types = get_output_data_types(); + p.add_primitive(*op, primitive); + } else { + uint32_t blank_index = static_cast(op->get_input_shape(0).back() - 1); + if (reordered_inputs.size() == 3) { + auto blank_index_node = std::dynamic_pointer_cast(op->get_input_node_shared_ptr(2)); + if (!blank_index_node) { + OPENVINO_THROW("Unsupported blank_index node type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); + } + float val; + if (ov::shape_size(blank_index_node->get_output_shape(0)) != 1 || !ov::op::util::get_single_value(blank_index_node, val)) { + OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); + } + blank_index = static_cast(val); + reordered_inputs.pop_back(); } - blank_index = static_cast(val); - reordered_inputs.pop_back(); - } - std::size_t num_output = op->get_output_size(); + std::size_t num_output = op->get_output_size(); - std::vector shared_memory; - if (num_output == 2) { - auto mutable_precision = op->get_output_element_type(1); - if (mutable_precision == ov::element::i64) { - mutable_precision = ov::element::i32; - } + std::vector shared_memory; + if (num_output == 2) { + auto mutable_precision = op->get_output_element_type(1); + if (mutable_precision == ov::element::i64) { + mutable_precision = ov::element::i32; + } - cldnn::layout mutableLayout = cldnn::layout( - cldnn::element_type_to_data_type(mutable_precision), - cldnn::format::get_default_format(op->get_output_shape(1).size()), - tensor_from_dims(op->get_output_shape(1))); + cldnn::layout mutableLayout = cldnn::layout( + cldnn::element_type_to_data_type(mutable_precision), + cldnn::format::get_default_format(op->get_output_shape(1).size()), + tensor_from_dims(op->get_output_shape(1))); - GPU_DEBUG_LOG << "[" << layer_type_name_ID(op) << ": mutable data]" << std::endl; - shared_memory.emplace_back(p.get_engine().allocate_memory(mutableLayout)); + GPU_DEBUG_LOG << "[" << layer_type_name_ID(op) << ": mutable data]" << std::endl; + shared_memory.emplace_back(p.get_engine().allocate_memory(mutableLayout)); - cldnn::primitive_id ctc_gd_mutable_id_w = layer_type_name_ID(op) + "_md_write"; - auto ctc_gd_mutable_prim = cldnn::mutable_data(ctc_gd_mutable_id_w, - shared_memory[0]); - p.add_primitive(*op, ctc_gd_mutable_prim); - reordered_inputs.push_back(ctc_gd_mutable_id_w); - } + cldnn::primitive_id ctc_gd_mutable_id_w = layer_type_name_ID(op) + "_md_write"; + auto ctc_gd_mutable_prim = cldnn::mutable_data(ctc_gd_mutable_id_w, + shared_memory[0]); + p.add_primitive(*op, ctc_gd_mutable_prim); + reordered_inputs.push_back(ctc_gd_mutable_id_w); + } - auto CTCGreedyDecoderLayerName = num_output == 2 ? layer_type_name_ID(op) + ".out0" : layer_type_name_ID(op); - auto primitive = cldnn::ctc_greedy_decoder( - CTCGreedyDecoderLayerName, - reordered_inputs, - blank_index, - ctc_merge_repeated, - tensor_from_dims(op->get_output_shape(0))); + auto CTCGreedyDecoderLayerName = num_output == 2 ? layer_type_name_ID(op) + ".out0" : layer_type_name_ID(op); + auto primitive = cldnn::ctc_greedy_decoder( + CTCGreedyDecoderLayerName, + reordered_inputs, + blank_index, + ctc_merge_repeated, + tensor_from_dims(op->get_output_shape(0))); - // GPU primitive supports only i32 as output data type - primitive.output_data_types = {cldnn::element_type_to_data_type(ov::element::i32)}; + // GPU primitive supports only i32 as output data type + primitive.output_data_types = {cldnn::element_type_to_data_type(ov::element::i32)}; - if (num_output == 2) { - primitive.second_output = reordered_inputs.back().pid; - } + if (num_output == 2) { + primitive.second_output = reordered_inputs.back().pid; + } - p.add_primitive(*op, primitive); + p.add_primitive(*op, primitive); - if (num_output == 2) { - cldnn::primitive_id ctc_gd_mutable_id_r = layer_type_name_ID(op) + ".out1"; - auto ctc_gd_mutable_prim_r = cldnn::mutable_data(ctc_gd_mutable_id_r, - { cldnn::input_info(CTCGreedyDecoderLayerName) }, - shared_memory[0]); - p.add_primitive(*op, ctc_gd_mutable_prim_r); + if (num_output == 2) { + cldnn::primitive_id ctc_gd_mutable_id_r = layer_type_name_ID(op) + ".out1"; + auto ctc_gd_mutable_prim_r = cldnn::mutable_data(ctc_gd_mutable_id_r, + { cldnn::input_info(CTCGreedyDecoderLayerName) }, + shared_memory[0]); + p.add_primitive(*op, ctc_gd_mutable_prim_r); + } } } diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder_seq_len.cpp index 12d318d107d342..4667ac7a1b9aca 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/ctc_greedy_decoder_seq_len.cpp @@ -9,7 +9,7 @@ namespace { using ov::test::CTCGreedyDecoderSeqLenLayerTest; -std::vector> inputShape{{{1, 1, 1}}, {{1, 6, 10}}, {{3, 3, 16}}, {{5, 3, 55}}}; +std::vector> inputShape{{{1, 28, 41}}, {{1, 1, 1}}, {{1, 6, 10}}, {{3, 3, 16}}, {{5, 3, 55}}}; const std::vector probPrecisions = { ov::element::f32, diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder.cpp new file mode 100644 index 00000000000000..ba6545f31a95cf --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder.cpp @@ -0,0 +1,116 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +// #include "single_op_tests/ctc_greedy_decoder.hpp" +// #include "shared_test_classes/single_op/ctc_greedy_decoder.hpp" +#include "common_test_utils/test_constants.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace { +using ov::test::InputShape; + +typedef std::tuple< + ov::element::Type, // Model type + InputShape, // Input shape + bool, // Merge repeated + std::string // Device name +> ctcGreedyDecoderParams; + +class CTCGreedyDecoderLayerGPUTest + : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + ov::element::Type model_type; + InputShape input_shape; + std::string targetDevice; + bool merge_repeated; + std::tie(model_type, input_shape, merge_repeated, targetDevice) = obj.param; + + std::ostringstream result; + const char separator = '_'; + + result << "IS=("; + result << ov::test::utils::partialShape2str({input_shape.first}) << "_" << "TS=("; + for (size_t i = 0lu; i < input_shape.second.size(); i++) { + result << ov::test::utils::vec2str(input_shape.second[i]) << "_"; + } + result << ")_"; + result << "netPRC=" << model_type.get_type_name() << separator; + result << "merge_repeated=" << std::boolalpha << merge_repeated << separator; + result << "trgDev=" << targetDevice; + + return result.str(); + } +protected: + void SetUp() override { + ov::element::Type model_type; + InputShape input_shape; + bool merge_repeated; + std::tie(model_type, input_shape, merge_repeated, targetDevice) = GetParam(); + inputDynamicShapes = {input_shape.first, {}}; + for (size_t i = 0; i < input_shape.second.size(); ++i) { + targetStaticShapes.push_back({input_shape.second[i], {}}); + } + + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + size_t T = targetStaticShapes[0][0][0]; + size_t B = targetStaticShapes[0][0][1]; + + std::mt19937 gen(1); + std::uniform_int_distribution dist(1, T); + + std::vector sequence_mask_data(B * T, 0); + for (size_t b = 0; b < B; b++) { + int len = dist(gen); + for (int t = 0; t < len; t++) { + sequence_mask_data[t * B + b] = 1; + } + } + auto sequence_mask_node = std::make_shared(model_type, ov::Shape{T, B}, sequence_mask_data); + + auto ctc_greedy_decoder = std::make_shared(param, sequence_mask_node, merge_repeated); + + auto result = std::make_shared(ctc_greedy_decoder); + function = std::make_shared(result, ov::ParameterVector{param}, "CTCGreedyDecoder"); + } +}; + + +TEST_P(CTCGreedyDecoderLayerGPUTest, Inference) { + run(); +}; + +// Common params +const std::vector netPrecisions = { + ov::element::f32, + ov::element::f16 +}; +std::vector mergeRepeated{true, false}; + +std::vector input_shapes_dynamic = { + { + {{-1, -1, -1}, {{ 50, 3, 3 }}}, + {{-1, -1, -1}, {{ 50, 3, 7 }}}, + {{-1, -1, -1}, {{ 50, 3, 8 }}}, + {{-1, -1, -1}, {{ 50, 3, 16 }}}, + {{-1, -1, -1}, {{ 50, 3, 128 }}}, + {{-1, -1, -1}, {{ 50, 3, 49 }}}, + {{-1, -1, -1}, {{ 50, 3, 55 }}}, + {{-1, -1, -1}, {{ 1, 1, 16 }}} + } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_CtcGreedyDecoderBasicDynamic, + CTCGreedyDecoderLayerGPUTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(input_shapes_dynamic), + ::testing::ValuesIn(mergeRepeated), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + CTCGreedyDecoderLayerGPUTest::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp new file mode 100644 index 00000000000000..0c7b25766d0a63 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp @@ -0,0 +1,190 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "common_test_utils/test_constants.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" + +using namespace InferenceEngine; +using namespace ov::test; + +namespace GPULayerTestsDefinitions { + +typedef std::tuple< + InputShape, // Input shape + int, // Sequence lengths + ov::element::Type, // Probabilities precision + ov::element::Type, // Indices precision + int, // Blank index + bool, // Merge repeated + std::string // Device name +> ctcGreedyDecoderSeqLenParams; + +class CTCGreedyDecoderSeqLenLayerGPUTest + : public testing::WithParamInterface, + virtual public SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + InputShape inputShape; + int sequenceLengths; + ov::element::Type dataPrecision, indicesPrecision; + int blankIndex; + bool mergeRepeated; + std::string targetDevice; + std::tie(inputShape, + sequenceLengths, + dataPrecision, + indicesPrecision, + blankIndex, + mergeRepeated, + targetDevice) = obj.param; + + std::ostringstream result; + + result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_" << "TS=("; + for (const auto& shape : inputShape.second) { + result << ov::test::utils::vec2str(shape) << "_"; + } + result << ")_"; + result << "seqLen=" << sequenceLengths << '_'; + result << "dataPRC=" << dataPrecision.get_type_name() << '_'; + result << "idxPRC=" << indicesPrecision.get_type_name() << '_'; + result << "BlankIdx=" << blankIndex << '_'; + result << "mergeRepeated=" << std::boolalpha << mergeRepeated << '_'; + result << "trgDev=" << targetDevice; + + return result.str(); + } + +protected: + void SetUp() override { + InputShape inputShape; + int sequenceLengths; + ov::element::Type model_type, indices_type; + int blankIndex; + bool mergeRepeated; + std::tie(inputShape, + sequenceLengths, + model_type, + indices_type, + blankIndex, + mergeRepeated, + targetDevice) = GetParam(); + inputDynamicShapes = {inputShape.first, {}}; + for (size_t i = 0; i < inputShape.second.size(); ++i) { + targetStaticShapes.push_back({inputShape.second[i], {}}); + } + + ov::ParameterVector params {std::make_shared(model_type, inputDynamicShapes.front())}; + + const auto sequenceLenNode = [&] { + const size_t B = targetStaticShapes[0][0][0]; + const size_t T = targetStaticShapes[0][0][1]; + + // Cap sequence length up to T + const int seqLen = std::min(T, sequenceLengths); + + std::mt19937 gen{42}; + std::uniform_int_distribution dist(1, seqLen); + + std::vector sequenceLenData(B); + for (size_t b = 0; b < B; b++) { + const int len = dist(gen); + sequenceLenData[b] = len; + } + + return std::make_shared(indices_type, ov::Shape{B}, sequenceLenData); + }(); + + // Cap blank index up to C - 1 + int C = targetStaticShapes[0][0][2]; + blankIndex = std::min(blankIndex, C - 1); + + const auto blankIndexNode = [&] { + if (indices_type == ov::element::i32) { + const auto blankIdxDataI32 = std::vector{blankIndex}; + return std::make_shared(indices_type, ov::Shape{1}, blankIdxDataI32); + } else if (indices_type == ov::element::i64) { + const auto blankIdxDataI64 = std::vector{blankIndex}; + return std::make_shared(indices_type, ov::Shape{1}, blankIdxDataI64); + } + throw std::logic_error("Unsupported index precision"); + }(); + + auto ctcGreedyDecoderSeqLen = std::make_shared(params[0], + sequenceLenNode, + blankIndexNode, + mergeRepeated, + indices_type, + indices_type); + + ov::OutputVector results; + for (size_t i = 0; i < ctcGreedyDecoderSeqLen->get_output_size(); i++) { + results.push_back(std::make_shared(ctcGreedyDecoderSeqLen->output(i))); + } + function = std::make_shared(results, params, "CTCGreedyDecoderSeqLen"); + } +}; + +TEST_P(CTCGreedyDecoderSeqLenLayerGPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + run(); +}; + +namespace { + +std::vector inputShapeDynamic = { + { + {{-1, -1, -1}, {{1, 28, 41}}}, + {{-1, -1, -1}, {{1, 1, 1}}}, + {{-1, -1, -1}, {{1, 6, 10}}}, + {{-1, -1, -1}, {{3, 3, 16}}}, + {{-1, -1, -1}, {{5, 3, 55}}}, + } +}; + +const std::vector probPrecisions = { + ov::element::f32, + ov::element::f16 +}; +const std::vector idxPrecisions = { + ov::element::i32, + ov::element::i64 +}; + +std::vector mergeRepeated{true, false}; + +INSTANTIATE_TEST_SUITE_P(smoke_ctc_greedy_decoder_seq_len_dynamic, + CTCGreedyDecoderSeqLenLayerGPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapeDynamic), + ::testing::Values(10), + ::testing::ValuesIn(probPrecisions), + ::testing::ValuesIn(idxPrecisions), + ::testing::Values(0), + ::testing::ValuesIn(mergeRepeated), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + CTCGreedyDecoderSeqLenLayerGPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_ctc_greedy_decoder_seq_len_bi_dynamic, + CTCGreedyDecoderSeqLenLayerGPUTest, + ::testing::Combine(::testing::ValuesIn(std::vector{ + {{-1, -1, -1}, {{2, 8, 11}}}, + {{-1, -1, -1}, {{4, 10, 55}}}}), + ::testing::ValuesIn(std::vector{5, 100}), + ::testing::ValuesIn(probPrecisions), + ::testing::ValuesIn(idxPrecisions), + ::testing::ValuesIn(std::vector{0, 5, 10}), + ::testing::ValuesIn(mergeRepeated), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + CTCGreedyDecoderSeqLenLayerGPUTest::getTestCaseName); +} // namespace +} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/ctc_greedy_decoder_seq_len_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/ctc_greedy_decoder_seq_len_si_test.cpp new file mode 100644 index 00000000000000..045a06005e554a --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/ctc_greedy_decoder_seq_len_si_test.cpp @@ -0,0 +1,101 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "test_utils.h" + +#include +#include +#include + +#include "ctc_greedy_decoder_inst.h" + +#include "program_wrapper.h" + +using namespace cldnn; +using namespace ::tests; + +namespace shape_infer_tests { + +struct ctc_greedy_decoder_seq_len_test_params { + std::vector in_layouts; + std::vector blank_index; + std::vector expected_layouts; +}; + +class ctc_greedy_decoder_seq_len_test : public testing::TestWithParam { }; + +TEST_P(ctc_greedy_decoder_seq_len_test, shape_infer) { + auto p = GetParam(); + auto& engine = get_test_engine(); + + std::vector> input_prims; + std::vector input_prim_ids; + { + auto prim_id = "input"; + auto input_layout_prim = std::make_shared(prim_id, p.in_layouts[0]); + input_prims.push_back(input_layout_prim); + input_prim_ids.push_back(input_info(prim_id)); + } + + for (size_t i = 1; i < p.in_layouts.size(); i++) { + auto prim_id = "const" + std::to_string(i); + auto prim_mem = engine.allocate_memory(p.in_layouts[i]); + if (i == 2) + set_values(prim_mem, p.blank_index); + auto const_data_prim = std::make_shared(prim_id, prim_mem); + input_prims.push_back(const_data_prim); + input_prim_ids.push_back(input_info(prim_id)); + } + + auto ctc_greedy_decoder_seq_len_prim = std::make_shared( + "output", + input_prim_ids, + p.blank_index[0], + true, + padding(), + data_types::i32, + 2); + + cldnn::program prog(engine); + auto& ctc_greedy_decoder_seq_len_node = prog.get_or_create(ctc_greedy_decoder_seq_len_prim); + for (auto& prim : input_prims) { + auto& input_layout_node = prog.get_or_create(prim); + program_wrapper::add_connection(prog, input_layout_node, ctc_greedy_decoder_seq_len_node); + } + + auto res = ctc_greedy_decoder_inst::calc_output_layouts(ctc_greedy_decoder_seq_len_node, *ctc_greedy_decoder_seq_len_node.get_kernel_impl_params()); + + ASSERT_EQ(res.size(), 2); + for (size_t i = 0; i < p.expected_layouts.size(); i++) + ASSERT_EQ(res[i], p.expected_layouts[i]); +} + +INSTANTIATE_TEST_SUITE_P(smoke, ctc_greedy_decoder_seq_len_test, + testing::ValuesIn(std::vector{ + { + { + {layout{ov::PartialShape{1, 6, 10}, data_types::f32, format::bfyx}}, + {layout{ov::PartialShape{1}, data_types::i32, format::bfyx}}, + }, + {-1}, + { + {layout{ov::PartialShape{1, 6}, data_types::i32, format::bfyx}}, + {layout{ov::PartialShape{1}, data_types::i32, format::bfyx}}, + }, + }, + { + { + {layout{ov::PartialShape{1, 6, 10}, data_types::f32, format::bfyx}}, + {layout{ov::PartialShape{1}, data_types::i32, format::bfyx}}, + {layout{ov::PartialShape{1}, data_types::i32, format::bfyx}}, + }, + {5}, + { + {layout{ov::PartialShape{1, 6}, data_types::i32, format::bfyx}}, + {layout{ov::PartialShape{1}, data_types::i32, format::bfyx}}, + }, + }, + })); + +} // namespace shape_infer_tests From 76338b3789dc4dc23f086915451a8fb047891be5 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Mon, 15 Jan 2024 08:53:05 +0400 Subject: [PATCH 38/43] [GPU] Fixed mem alloc size and pad propagation for kv cache opt (#22128) --- .../intel_gpu/src/graph/primitive_inst.cpp | 17 ++++---- .../subgraph_tests/dynamic/kv_cache.cpp | 41 +++++++++++++------ 2 files changed, 39 insertions(+), 19 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 937179e14b03f2..73248603fe9058 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -521,10 +521,9 @@ event::ptr primitive_inst::realloc_if_needed() { } // update layout to ensure that it repsects paddings for correct allocation size - if (_node->is_type() && !_impl_params->can_be_optimized()) { + if (_node_output_layout.data_padding.get_dynamic_pad_dims() != tensor(0)) { const auto current_buf_size = updated_layout.get_buffer_size().sizes(); - ov::Shape current_shape(current_buf_size.begin(), current_buf_size.end()); - updated_layout.set_partial_shape(current_shape); + updated_layout = layout(ov::Shape(current_buf_size.begin(), current_buf_size.end()), updated_layout.data_type, updated_layout.format); } bool can_reuse_buffer = _outputs[0] && updated_layout.count() <= max_output_layout_size; @@ -535,12 +534,12 @@ event::ptr primitive_inst::realloc_if_needed() { return ev; } - auto current_shape = actual_layout.get_shape(); + auto current_shape = updated_layout.get_shape(); auto& sp = *get_network().get_shape_predictor(); - auto dt_size = ov::element::Type(actual_layout.data_type).bitwidth(); + auto dt_size = ov::element::Type(updated_layout.data_type).bitwidth(); auto prealloc_info = sp.predict_preallocation_shape(id(), current_shape, dt_size, can_reuse_buffer); if (prealloc_info.first && sp.can_preallocate(ov::shape_size(prealloc_info.second) * dt_size)) { - auto new_layout = actual_layout; + auto new_layout = updated_layout; new_layout.set_partial_shape(prealloc_info.second); updated_params.output_layouts[0] = new_layout; } @@ -561,7 +560,7 @@ event::ptr primitive_inst::realloc_if_needed() { } else { GPU_DEBUG_TRACE_DETAIL << id() << ": realloc output memory. " << " Current buffer_size=" << max_output_layout_size - << " Requested buffer_size=" << actual_layout.count() << std::endl; + << " Requested buffer_size=" << updated_layout.count() << std::endl; _outputs = allocate_outputs(&updated_params, need_reset_output_memory(), true); // TODO : need to handle multiple outputs max_output_layout_size = updated_params.output_layouts[0].count(); @@ -976,11 +975,15 @@ void primitive_inst::do_runtime_skip_gather() { for (int64_t i = 0; i < static_cast(idx_shape[0]); ++i) { if (idx_data[i] != i) { GPU_DEBUG_TRACE_DETAIL << "--- Cannot optimize because idx_data [" << i << "] (" << idx_data[i] << ") != " << i << std::endl; + if (_impl_params->output_layouts[0].data_padding.get_dynamic_pad_dims() != tensor(0)) + _impl_params->output_layouts[0].data_padding = padding(); set_can_be_optimized(false); return; } } } + // propagate input layout including correct paddings. + _impl_params->output_layouts[0] = _impl_params->input_layouts[0]; GPU_DEBUG_TRACE_DETAIL << "[do_runtime_skip_gather] " << id() << " : can_be_optimized" << std::endl; GPU_DEBUG_TRACE_DETAIL << " - Input layout : " << _impl_params->get_input_layout(0).to_short_string() << std::endl; GPU_DEBUG_TRACE_DETAIL << " - Indices layout : " << _impl_params->get_input_layout(1).to_short_string() << std::endl; diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp index f2e69e1c298683..599f247210d44c 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp @@ -254,6 +254,7 @@ class KVCacheTests: public ::testing::Test { void test_smoke_multipleIterations_stateful(bool is_caching_test, bool fuse_cache_reorder, bool build_state_initializer, + size_t batch = 1, ov::element::Type model_element_type = ov::element::f16) { #if defined(ANDROID) GTEST_SKIP(); @@ -277,7 +278,6 @@ class KVCacheTests: public ::testing::Test { properties.insert(ov::cache_dir(cacheDirName)); } - const size_t batch = 1; const size_t n_heads = 32; const size_t n_features = 10; const size_t context_size = 20; @@ -311,14 +311,23 @@ class KVCacheTests: public ::testing::Test { auto output0 = model->get_results().at(0); auto beam_idx_shape = ov::Shape{batch}; - auto beam_idx_data = ov::Tensor(ov::element::i32, beam_idx_shape); + + auto beam_idx_data_0 = ov::Tensor(ov::element::i32, beam_idx_shape); + auto beam_idx_data_1 = ov::Tensor(ov::element::i32, beam_idx_shape); for (size_t i = 0; i < batch; i++) { - beam_idx_data.data()[i] = i; + beam_idx_data_0.data()[i] = i; + beam_idx_data_1.data()[i] = batch - i - 1; } - auto get_ref_results = [&ref_model, fuse_cache_reorder, &beam_idx_shape, &beam_idx_data](const ov::Tensor& kv_cache, - const ov::Tensor& new_token_data, - const ov::Tensor& matmul_data) { + std::vector beam_idx_data_array = { + beam_idx_data_0, + beam_idx_data_1, + }; + + auto get_ref_results = [&ref_model, fuse_cache_reorder, &beam_idx_shape](const ov::Tensor& kv_cache, + const ov::Tensor& new_token_data, + const ov::Tensor& matmul_data, + const ov::Tensor& beam_idx_data) { auto input0 = ref_model->get_parameters().at(0); auto input1 = ref_model->get_parameters().at(1); auto input2 = ref_model->get_parameters().at(2); @@ -367,9 +376,6 @@ class KVCacheTests: public ::testing::Test { infer_request.set_tensor(input0, new_token_input); infer_request.set_tensor(input1, matmul_input); - if (fuse_cache_reorder) { - infer_request.set_tensor(input2, beam_idx_data); - } for (size_t num_repeats = 0; num_repeats < 2; num_repeats++) { ov::Tensor ref_kv_cache; @@ -388,9 +394,13 @@ class KVCacheTests: public ::testing::Test { new_token_data.copy_to(new_token_input); matmul_data.copy_to(matmul_input); + if (fuse_cache_reorder) { + infer_request.set_tensor(input2, beam_idx_data_array[0]); + } + ref_kv_cache = ov::Tensor(element_type, kv_cache_size_initial); - auto ref_results = get_ref_results(ref_kv_cache, new_token_data, matmul_data); + auto ref_results = get_ref_results(ref_kv_cache, new_token_data, matmul_data, beam_idx_data_array[0]); ref_kv_cache = ref_results[0]; infer_request.infer(); @@ -408,7 +418,11 @@ class KVCacheTests: public ::testing::Test { ov::Shape matmul_in_size_loop = {batch, n_heads, input_tokens, context_length}; auto new_token_data = ov::test::utils::create_and_fill_tensor(element_type, new_token_size); auto matmul_data = ov::test::utils::create_and_fill_tensor(element_type, matmul_in_size_loop); - auto ref_results = get_ref_results(ref_kv_cache, new_token_data, matmul_data); + if (fuse_cache_reorder) { + infer_request.set_tensor(input2, beam_idx_data_array[i % beam_idx_data_array.size()]); + } + + auto ref_results = get_ref_results(ref_kv_cache, new_token_data, matmul_data, beam_idx_data_array[i % beam_idx_data_array.size()]); ref_kv_cache = ref_results[0]; new_token_input.set_shape(new_token_data.get_shape()); @@ -461,7 +475,10 @@ TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer_c } TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer_f32) { - this->test_smoke_multipleIterations_stateful(false, true, true, ov::element::f32); + this->test_smoke_multipleIterations_stateful(false, true, true, 1, ov::element::f32); +} +TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer_batch_3) { + this->test_smoke_multipleIterations_stateful(false, true, true, 3); } } // namespace From 75f87ad19b043f42049ec8df75043e07eb55caee Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Mon, 15 Jan 2024 10:50:56 +0400 Subject: [PATCH 39/43] [Snippets] Fixed access by expired ref (#22132) --- src/common/snippets/src/lowered/pass/allocate_buffers.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/common/snippets/src/lowered/pass/allocate_buffers.cpp b/src/common/snippets/src/lowered/pass/allocate_buffers.cpp index 18ef0d09b9704e..d34b442fd33051 100644 --- a/src/common/snippets/src/lowered/pass/allocate_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/allocate_buffers.cpp @@ -67,18 +67,19 @@ void AllocateBuffers::set_buffer_offset(const ExpressionPtr& buffer_expr, const bool AllocateBuffers::run(lowered::LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::AllocateBuffers"); m_buffer_scratchpad_size = 0; - PassPipeline pipeline; + if (m_is_optimized_mode) { BufferClusters buffer_clusters; + PassPipeline pipeline; pipeline.register_pass(); pipeline.register_pass(); pipeline.register_pass(buffer_clusters); pipeline.register_pass(m_buffer_scratchpad_size, buffer_clusters); pipeline.register_pass(); + pipeline.run(linear_ir); } else { - pipeline.register_pass(m_buffer_scratchpad_size); + InitBuffersDefault(m_buffer_scratchpad_size).run(linear_ir); } - pipeline.run(linear_ir); return m_buffer_scratchpad_size > 0; } From a8311777d1e3bd8143e52c0b171f8763c3a98960 Mon Sep 17 00:00:00 2001 From: Tingqian Li Date: Mon, 15 Jan 2024 14:55:13 +0800 Subject: [PATCH 40/43] [CPU] Optimize SDPA's shape inference (#22037) --- .../intel_cpu/src/nodes/scaled_attn.cpp | 3 +- .../shape_inference/custom/scaled_attn.cpp | 68 ++++++++++ .../shape_inference/custom/scaled_attn.hpp | 24 ++++ .../cpu_opset/common/op/sdpa.cpp | 5 + .../custom_shape_infer/custom_shape_infer.cpp | 5 +- .../custom_shape_infer/scaled_attn.cpp | 127 ++++++++++++++++++ 6 files changed, 230 insertions(+), 2 deletions(-) create mode 100644 src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp create mode 100644 src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.hpp create mode 100644 src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/scaled_attn.cpp diff --git a/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp b/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp index 848fbf8982c4ee..e56d289a20cece 100644 --- a/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp +++ b/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp @@ -15,6 +15,7 @@ #include "openvino/core/parallel.hpp" #include "openvino/op/scaled_dot_product_attention.hpp" #include "openvino/util/common_util.hpp" +#include "shape_inference/custom/scaled_attn.hpp" #include "shape_inference/shape_inference_internal_dyn.hpp" #include "utils/plain_tensor.hpp" @@ -638,7 +639,7 @@ struct ScaledDotProductAttention::AttentionExecutor : public ScaledDotProductAtt }; ScaledDotProductAttention::ScaledDotProductAttention(const std::shared_ptr& op, const GraphContext::CPtr context) - : Node(op, context, NgraphShapeInferFactory(op, EMPTY_PORT_MASK)), m_tmp_reorder(true) { + : Node(op, context, SDPAShapeInferFactory(op)), m_tmp_reorder(true) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW("CPU: " + errorMessage); diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp new file mode 100644 index 00000000000000..ba6064d5eab007 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "scaled_attn.hpp" + +#include "shape_inference/shape_inference_cpu.hpp" +#include "shape_inference/shape_inference_ngraph.hpp" +#include "transformations/cpu_opset/common/op/sdpa.hpp" +#include "utils.hpp" + +namespace ov { +namespace intel_cpu { +namespace node { + +class SDPAShapeInfer : public ShapeInferEmptyPads { +public: + SDPAShapeInfer(const ScaledDotProductAttentionWithKVCache::Config& config) : m_config(config) {} + + IShapeInfer::Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override { + const auto& query_dims = input_shapes.front().get(); + VectorDims present_kv_dims = input_shapes.back().get(); + const auto& beam_idx_dims = input_shapes.end()[-3].get(); + const auto& permute_axes = m_config.permute_axes; + + if (permute_axes.empty()) { + // [B, H, L, S] + present_kv_dims[0] = beam_idx_dims[0]; + present_kv_dims[2] += query_dims[2]; + return {{query_dims, present_kv_dims, present_kv_dims}, ShapeInferStatus::success}; + } + + // permute_axes[0,1,2,3] gives axis indices of B,H,L,S for query & present_kv + const size_t batch_index = permute_axes[0]; + const size_t length_index = permute_axes[2]; + present_kv_dims[batch_index] = beam_idx_dims[0]; + present_kv_dims[length_index] += query_dims[length_index]; + + auto n_dims = query_dims.size(); + VectorDims output_dims(n_dims); + for (size_t i = 0; i < n_dims; i++) { + output_dims[i] = query_dims[permute_axes[i]]; + } + return {{output_dims, present_kv_dims, present_kv_dims}, ShapeInferStatus::success}; + } + + port_mask_t get_port_mask() const override { + return EMPTY_PORT_MASK; + } + +private: + ScaledDotProductAttentionWithKVCache::Config m_config; +}; + +ShapeInferPtr SDPAShapeInferFactory::makeShapeInfer() const { + if (auto sdpa = std::dynamic_pointer_cast(m_op)) { + const auto& config = sdpa->get_config(); + if (config.output_BLHxS == false) + return std::make_shared(config); + } + // fallback to ngraph shape infer on non-perf-critical case + return std::make_shared(make_shape_inference(m_op), EMPTY_PORT_MASK); +} + +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.hpp new file mode 100644 index 00000000000000..8b8e06acb268f9 --- /dev/null +++ b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "shape_inference/shape_inference_cpu.hpp" + +#pragma once +namespace ov { +namespace intel_cpu { +namespace node { + +class SDPAShapeInferFactory : public ShapeInferFactory { +public: + SDPAShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ShapeInferPtr makeShapeInfer() const override; + +private: + std::shared_ptr m_op; +}; +} // namespace node +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.cpp index 31bce21d3579d3..0f780594934105 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.cpp @@ -28,6 +28,8 @@ void ov::intel_cpu::ScaledDotProductAttentionWithKVCache::validate_and_infer_typ auto q_ps = get_input_partial_shape(0); // [B, H, L0, S] auto past_kv_ps = get_input_partial_shape(input_num - 1); + // [present_kv_batch_size] + auto beam_idx_ps = get_input_partial_shape(input_num - 3); auto output_logits = q_ps; NODE_VALIDATION_CHECK(this, m_config.output_BLHxS == false); @@ -35,6 +37,7 @@ void ov::intel_cpu::ScaledDotProductAttentionWithKVCache::validate_and_infer_typ // permute_axes from original to [B, H, L, S] const auto& permute_axes = this->m_config.permute_axes; if (past_kv_ps.rank().is_static()) { + const size_t batch_index = permute_axes.empty() ? 0 : permute_axes[0]; const size_t length_index = permute_axes.empty() ? q_ps.size() - 2 : permute_axes[permute_axes.size() - 2]; const size_t head_num_index = permute_axes.empty() ? q_ps.size() - 3 : permute_axes[permute_axes.size() - 3]; NODE_VALIDATION_CHECK(this, q_ps.size() == past_kv_ps.size()); @@ -50,6 +53,8 @@ void ov::intel_cpu::ScaledDotProductAttentionWithKVCache::validate_and_infer_typ continue; } } + // batch_size can be dynamically changed by gather logic + past_kv_ps[batch_index] = beam_idx_ps[0]; past_kv_ps[length_index] += q_ps[length_index]; } if (!permute_axes.empty()) { diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.cpp index 038e3185235950..b0bf4c384e5693 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/custom_shape_infer.cpp @@ -1,9 +1,10 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#include "custom_shape_infer.hpp" + #include -#include "custom_shape_infer.hpp" #include "openvino/cc/factory.h" #include "openvino/core/partial_shape.hpp" #include "openvino/core/type.hpp" @@ -20,6 +21,7 @@ #include "shape_inference/custom/priorbox.hpp" #include "shape_inference/custom/priorbox_clustered.hpp" #include "shape_inference/custom/reshape.hpp" +#include "shape_inference/custom/scaled_attn.hpp" #include "shape_inference/custom/shapeof.hpp" #include "shape_inference/custom/strided_slice.hpp" #include "shape_inference/custom/transpose.hpp" @@ -59,6 +61,7 @@ class CustomShapeInferFF : public openvino::cc::Factory + +#include "common_test_utils/test_assertions.hpp" +#include "custom_shape_infer.hpp" +#include "openvino/op/ops.hpp" +#include "transformations/cpu_opset/common/op/sdpa.hpp" + +namespace ov { +namespace intel_cpu { +namespace unit_test { +namespace cpu_shape_infer { +using namespace ov; +using namespace ov::intel_cpu; +using namespace testing; + +using SDPATestParams = std::tuple, // permute_axes + unit_test::ShapeVector // Expected output shapes + >; + +class SDPACpuShapeInferenceTest + : public unit_test::OpCpuShapeInferenceTest, + public WithParamInterface { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + unit_test::ShapeVector tmp_input_shapes; + std::vector tmp_permute_axes; + unit_test::ShapeVector tmp_exp_shape; + std::tie(tmp_input_shapes, tmp_permute_axes, tmp_exp_shape) = obj.param; + std::ostringstream result; + result << "IS" << ov::test::utils::vec2str(tmp_input_shapes) << "_"; + result << "permute_axes" << ov::test::utils::vec2str(tmp_permute_axes) << "_"; + result << "exp_shape" << ov::test::utils::vec2str(tmp_exp_shape); + return result.str(); + } + +protected: + void SetUp() override { + std::tie(input_shapes, permute_axes, output_shapes) = GetParam(); + + args.clear(); + for (const auto& ishape : input_shapes) { + args.push_back(std::make_shared(element::f32, ishape.get_shape())); + } + } + OutputVector args; + std::vector permute_axes; +}; + +TEST_P(SDPACpuShapeInferenceTest, shape_inference) { + ov::intel_cpu::ScaledDotProductAttentionWithKVCache::Config config; + config.permute_axes = permute_axes; + const auto op = make_op(args, config); + unit_test::cpu_test_shape_infer(op.get(), input_shapes, output_shapes); +} + +INSTANTIATE_TEST_SUITE_P(CpuShapeInfer, + SDPACpuShapeInferenceTest, + Values( + // llama + make_tuple(unit_test::ShapeVector{{1, 32, 14, 128}, + {1, 32, 14, 128}, + {1, 32, 14, 128}, + {1, 1, 14, 14}, + {1}, + {1, 32, 0, 128}, + {1, 32, 0, 128}}, + std::vector{}, + unit_test::ShapeVector{{1, 32, 14, 128}, {1, 32, 14, 128}, {1, 32, 14, 128}}), + make_tuple(unit_test::ShapeVector{{1, 32, 1, 128}, + {1, 32, 1, 128}, + {1, 32, 1, 128}, + {1, 1, 1, 16}, + {1}, + {1, 32, 15, 128}, + {1, 32, 15, 128}}, + std::vector{}, + unit_test::ShapeVector{{1, 32, 1, 128}, {1, 32, 16, 128}, {1, 32, 16, 128}}), + // chatglm + make_tuple(unit_test::ShapeVector{{1, 1, 32, 128}, + {1, 1, 2, 128}, + {1, 1, 2, 128}, + {1, 1, 1, 8}, + {1}, + {7, 1, 2, 128}, + {7, 1, 2, 128}}, + std::vector{1, 2, 0, 3}, + unit_test::ShapeVector{{1, 32, 1, 128}, {8, 1, 2, 128}, {8, 1, 2, 128}}), + make_tuple(unit_test::ShapeVector{{7, 1, 32, 128}, + {7, 1, 2, 128}, + {7, 1, 2, 128}, + {1, 1, 7, 7}, + {1}, + {0, 1, 2, 128}, + {0, 1, 2, 128}}, + std::vector{1, 2, 0, 3}, + unit_test::ShapeVector{{1, 32, 7, 128}, {7, 1, 2, 128}, {7, 1, 2, 128}}), + // qwen + make_tuple(unit_test::ShapeVector{{1, 1, 32, 128}, + {1, 1, 32, 128}, + {1, 1, 32, 128}, + {1, 1, 1, 5}, + {1}, + {1, 4, 32, 128}, + {1, 4, 32, 128}}, + std::vector{0, 2, 1, 3}, + unit_test::ShapeVector{{1, 32, 1, 128}, {1, 5, 32, 128}, {1, 5, 32, 128}}), + + make_tuple(unit_test::ShapeVector{{1, 4, 32, 128}, + {1, 4, 32, 128}, + {1, 4, 32, 128}, + {1, 1, 4, 4}, + {1}, + {1, 0, 32, 128}, + {1, 0, 32, 128}}, + std::vector{0, 2, 1, 3}, + unit_test::ShapeVector{{1, 32, 4, 128}, {1, 4, 32, 128}, {1, 4, 32, 128}})), + SDPACpuShapeInferenceTest::getTestCaseName); + +} // namespace cpu_shape_infer +} // namespace unit_test +} // namespace intel_cpu +} // namespace ov From 3bf6f11dfdad37a4aea6e9126d685e27c8581885 Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Mon, 15 Jan 2024 14:59:45 +0800 Subject: [PATCH 41/43] [Core][CPU] Upgrade ie::extension to ov::extension (#21915) --- .../plugins/create_plugins_hpp.cmake | 4 +- cmake/developer_package/plugins/plugins.cmake | 2 +- .../developer_package/plugins/plugins.hpp.in | 16 +- src/core/include/openvino/core/extension.hpp | 16 +- .../interface/ie_iplugin_internal.hpp | 25 +- src/inference/src/dev/core_impl.cpp | 4 +- src/inference/src/dev/core_impl.hpp | 11 +- src/plugins/intel_cpu/src/compiled_model.cpp | 6 +- src/plugins/intel_cpu/src/compiled_model.h | 3 - src/plugins/intel_cpu/src/extension.cpp | 299 +++++++----------- src/plugins/intel_cpu/src/extension.h | 22 -- src/plugins/intel_cpu/src/extension_mngr.cpp | 41 --- src/plugins/intel_cpu/src/extension_mngr.h | 28 -- src/plugins/intel_cpu/src/graph_context.h | 8 - src/plugins/intel_cpu/src/node.cpp | 5 +- src/plugins/intel_cpu/src/node.h | 5 - src/plugins/intel_cpu/src/nodes/if.h | 1 - .../intel_cpu/src/nodes/tensoriterator.h | 1 - src/plugins/intel_cpu/src/plugin.cpp | 15 +- src/plugins/intel_cpu/src/plugin.h | 5 - src/plugins/intel_cpu/src/serialize.cpp | 21 +- src/plugins/intel_cpu/src/serialize.h | 4 +- .../src/utils/ngraph_transformation.hpp | 4 +- .../tests/unit/graph/memory_state.cpp | 4 +- .../graph/merge_transpose_reorder_test.cpp | 2 +- .../graph/resolve_edge_conflicts_test.cpp | 2 +- .../tests/unit/nodes/reorder_node_test.cpp | 1 - 27 files changed, 167 insertions(+), 388 deletions(-) delete mode 100644 src/plugins/intel_cpu/src/extension.h delete mode 100644 src/plugins/intel_cpu/src/extension_mngr.cpp delete mode 100644 src/plugins/intel_cpu/src/extension_mngr.h diff --git a/cmake/developer_package/plugins/create_plugins_hpp.cmake b/cmake/developer_package/plugins/create_plugins_hpp.cmake index 1fedf858ce58ca..2c90da6cc9d97c 100644 --- a/cmake/developer_package/plugins/create_plugins_hpp.cmake +++ b/cmake/developer_package/plugins/create_plugins_hpp.cmake @@ -42,10 +42,10 @@ foreach(dev_map IN LISTS OV_DEVICE_MAPPING) # declarations set(OV_PLUGINS_DECLARATIONS "${OV_PLUGINS_DECLARATIONS} - IE_DEFINE_PLUGIN_CREATE_FUNCTION_DECLARATION(${_OV_CREATE_PLUGIN_FUNC});") + OV_DEFINE_PLUGIN_CREATE_FUNCTION_DECLARATION(${_OV_CREATE_PLUGIN_FUNC});") if(${actual_dev_name}_AS_EXTENSION) set(OV_PLUGINS_DECLARATIONS "${OV_PLUGINS_DECLARATIONS} - IE_DEFINE_EXTENSION_CREATE_FUNCTION_DECLARATION(${_OV_CREATE_EXTENSION_FUNC});") + OV_DEFINE_EXTENSION_CREATE_FUNCTION_DECLARATION(${_OV_CREATE_EXTENSION_FUNC});") else() set(_OV_CREATE_EXTENSION_FUNC "nullptr") endif() diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index 16a9e935a896c8..a8ee3e47d25497 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -80,7 +80,7 @@ function(ov_add_plugin) if(OV_PLUGIN_AS_EXTENSION) # to distinguish functions creating extensions objects target_compile_definitions(${OV_PLUGIN_NAME} PRIVATE - IE_CREATE_EXTENSION=CreateExtensionShared${OV_PLUGIN_DEVICE_NAME}) + OV_CREATE_EXTENSION=CreateExtensionShared${OV_PLUGIN_DEVICE_NAME}) endif() endif() diff --git a/cmake/developer_package/plugins/plugins.hpp.in b/cmake/developer_package/plugins/plugins.hpp.in index 224f77c8cb980b..2af0666e7b84a4 100644 --- a/cmake/developer_package/plugins/plugins.hpp.in +++ b/cmake/developer_package/plugins/plugins.hpp.in @@ -9,13 +9,23 @@ #ifdef OPENVINO_STATIC_LIBRARY -#include "cpp_interfaces/interface/ie_iplugin_internal.hpp" +// The Macro used to create extensions for static library +#define OV_DEFINE_EXTENSION_CREATE_FUNCTION_DECLARATION(_OV_CREATE_EXTENSION_FUNC) \ + OPENVINO_EXTENSION_C_API void \ + _OV_CREATE_EXTENSION_FUNC(std::vector<::ov::Extension::Ptr>& ext) + +// The Macro used to create plugin for static library +#define OV_DEFINE_PLUGIN_CREATE_FUNCTION_DECLARATION(_OV_CREATE_PLUGIN_FUNC) \ + OPENVINO_PLUGIN_API void \ + _OV_CREATE_PLUGIN_FUNC(::std::shared_ptr<::ov::IPlugin> &plugin) noexcept(false) @OV_PLUGINS_DECLARATIONS@ +using CreateExtensionFunc = void(std::vector<::ov::Extension::Ptr>&); +using CreatePluginEngineFunc = void(std::shared_ptr<::ov::IPlugin>&); struct Value { - InferenceEngine::CreatePluginEngineFunc * m_create_plugin_func; - InferenceEngine::CreateExtensionFunc * m_create_extension_func; + CreatePluginEngineFunc * m_create_plugin_func; + CreateExtensionFunc * m_create_extension_func; std::map m_default_config; }; diff --git a/src/core/include/openvino/core/extension.hpp b/src/core/include/openvino/core/extension.hpp index 7e02703e6281a2..a403675ad2e522 100644 --- a/src/core/include/openvino/core/extension.hpp +++ b/src/core/include/openvino/core/extension.hpp @@ -28,24 +28,28 @@ class OPENVINO_API Extension { virtual ~Extension(); }; +} // namespace ov +#ifndef OV_CREATE_EXTENSION /** * @brief The entry point for library with OpenVINO extensions * * @param vector of extensions */ OPENVINO_EXTENSION_C_API -void create_extensions(std::vector&); +void create_extensions(std::vector&); -} // namespace ov +# define OV_CREATE_EXTENSION create_extensions + +#endif /** * @brief Macro generates the entry point for the library * * @param vector of extensions */ -#define OPENVINO_CREATE_EXTENSIONS(extensions) \ - OPENVINO_EXTENSION_C_API \ - void ::ov::create_extensions(std::vector<::ov::Extension::Ptr>& ext) { \ - ext = extensions; \ +#define OPENVINO_CREATE_EXTENSIONS(extensions) \ + OPENVINO_EXTENSION_C_API void OV_CREATE_EXTENSION(std::vector& ext); \ + OPENVINO_EXTENSION_C_API void OV_CREATE_EXTENSION(std::vector& ext) { \ + ext = extensions; \ } diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index 859e56df154f05..eb0e8d38c46f87 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -19,6 +19,7 @@ #include "ie_iextension.h" #include "ie_input_info.hpp" #include "ie_parameter.hpp" +#include "openvino/core/extension.hpp" #include "openvino/runtime/iplugin.hpp" #include "openvino/util/pp.hpp" #include "so_ptr.hpp" @@ -377,16 +378,6 @@ class INFERENCE_ENGINE_1_0_DEPRECATED INFERENCE_ENGINE_API_CLASS(IInferencePlugi bool _isNewAPI; //!< A flag which shows used API }; -/** - * @private - */ -using CreatePluginEngineFunc = void(std::shared_ptr<::ov::IPlugin>&); - -/** - * @private - */ -using CreateExtensionFunc = void(std::shared_ptr&); - /** * @def IE_CREATE_PLUGIN * @brief Defines a name of a function creating plugin instance @@ -428,17 +419,3 @@ convert_plugin(const std::shared_ptr& from); ie_plugin->SetVersion(version); \ plugin = convert_plugin(ie_plugin); \ } - -/** - * @private - */ -#define IE_DEFINE_PLUGIN_CREATE_FUNCTION_DECLARATION(_IE_CREATE_PLUGIN_FUNC) \ - INFERENCE_PLUGIN_API(void) \ - _IE_CREATE_PLUGIN_FUNC(::std::shared_ptr<::ov::IPlugin>& plugin) noexcept(false) - -/** - * @private - */ -#define IE_DEFINE_EXTENSION_CREATE_FUNCTION_DECLARATION(_IE_CREATE_EXTENSION_FUNC) \ - INFERENCE_EXTENSION_API(void) \ - _IE_CREATE_EXTENSION_FUNC(::InferenceEngine::IExtensionPtr& ext) diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index ced075a2d89f76..6489f7e4af8b32 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -703,9 +703,9 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const { if (desc.extensionCreateFunc) { // static OpenVINO case try { - InferenceEngine::IExtensionPtr ext; + std::vector ext; desc.extensionCreateFunc(ext); - AddExtensionUnsafe(ext); + add_extensions_unsafe(ext); } catch (const InferenceEngine::GeneralError&) { // the same extension can be registered multiple times - ignore it! } diff --git a/src/inference/src/dev/core_impl.hpp b/src/inference/src/dev/core_impl.hpp index 86365f891fcc64..2a4415ad941bd4 100644 --- a/src/inference/src/dev/core_impl.hpp +++ b/src/inference/src/dev/core_impl.hpp @@ -26,6 +26,9 @@ namespace ov { +using CreateExtensionFunc = void(std::vector<::ov::Extension::Ptr>&); +using CreatePluginEngineFunc = void(std::shared_ptr<::ov::IPlugin>&); + const std::string DEFAULT_DEVICE_NAME = "DEFAULT_DEVICE"; struct Parsed { @@ -123,8 +126,8 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t ov::util::FilePath libraryLocation; ov::AnyMap defaultConfig; std::vector listOfExtentions; - InferenceEngine::CreatePluginEngineFunc* pluginCreateFunc = nullptr; - InferenceEngine::CreateExtensionFunc* extensionCreateFunc = nullptr; + CreatePluginEngineFunc* pluginCreateFunc = nullptr; + CreateExtensionFunc* extensionCreateFunc = nullptr; PluginDescriptor() = default; @@ -136,9 +139,9 @@ class CoreImpl : public InferenceEngine::ICore, public std::enable_shared_from_t this->listOfExtentions = listOfExtentions; } - PluginDescriptor(InferenceEngine::CreatePluginEngineFunc* pluginCreateFunc, + PluginDescriptor(CreatePluginEngineFunc* pluginCreateFunc, const ov::AnyMap& defaultConfig = {}, - InferenceEngine::CreateExtensionFunc* extensionCreateFunc = nullptr) { + CreateExtensionFunc* extensionCreateFunc = nullptr) { this->pluginCreateFunc = pluginCreateFunc; this->defaultConfig = defaultConfig; this->extensionCreateFunc = extensionCreateFunc; diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index f45f1e2a2b70b3..f81f59f94ae418 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -40,13 +40,11 @@ struct ImmediateSerialExecutor : public ov::threading::ITaskExecutor { CompiledModel::CompiledModel(const std::shared_ptr& model, const std::shared_ptr& plugin, const Config& cfg, - const ExtensionManager::Ptr& extMgr, const bool loaded_from_cache) : ov::ICompiledModel::ICompiledModel(model, plugin), m_model(model), m_plugin(plugin), m_cfg{cfg}, - extensionManager(extMgr), m_name{model->get_name()}, m_loaded_from_cache(loaded_from_cache) { bool isFloatModel = !ov::op::util::has_op_with_type(m_model); @@ -125,7 +123,7 @@ CompiledModel::GraphGuard::Lock CompiledModel::get_graph() const { (m_cfg.lpTransformsMode == Config::On) && ov::pass::low_precision::LowPrecision::isFunctionQuantized(m_model); - ctx = std::make_shared(m_cfg, extensionManager, weightsCache, isQuantizedFlag); + ctx = std::make_shared(m_cfg, weightsCache, isQuantizedFlag); } const std::shared_ptr model = m_model; graphLock._graph.CreateGraph(model, ctx); @@ -306,7 +304,7 @@ ov::Any CompiledModel::get_property(const std::string& name) const { } void CompiledModel::export_model(std::ostream& modelStream) const { - ModelSerializer serializer(modelStream, extensionManager); + ModelSerializer serializer(modelStream); serializer << m_model; } diff --git a/src/plugins/intel_cpu/src/compiled_model.h b/src/plugins/intel_cpu/src/compiled_model.h index 0561d97e556952..d11ece0e8c2aea 100644 --- a/src/plugins/intel_cpu/src/compiled_model.h +++ b/src/plugins/intel_cpu/src/compiled_model.h @@ -7,7 +7,6 @@ #include #include -#include "extension_mngr.h" #include "graph.h" #include "graph_context.h" #include "openvino/runtime/icompiled_model.hpp" @@ -26,7 +25,6 @@ class CompiledModel : public ov::ICompiledModel { CompiledModel(const std::shared_ptr& model, const std::shared_ptr& plugin, const Config& cfg, - const ExtensionManager::Ptr& extMgr, const bool loaded_from_cache = false); std::shared_ptr create_infer_request() const override; @@ -55,7 +53,6 @@ class CompiledModel : public ov::ICompiledModel { // Usage example: helps to avoid data races during CPU Graph initialization in multi-streams scenario std::shared_ptr m_mutex; Config m_cfg; - ExtensionManager::Ptr extensionManager; mutable std::atomic_int m_numRequests = {0}; std::string m_name; struct GraphGuard : public Graph { diff --git a/src/plugins/intel_cpu/src/extension.cpp b/src/plugins/intel_cpu/src/extension.cpp index 9cda1c4fa26175..41d91bfc382681 100644 --- a/src/plugins/intel_cpu/src/extension.cpp +++ b/src/plugins/intel_cpu/src/extension.cpp @@ -2,204 +2,137 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "extension.h" +#include "openvino/core/extension.hpp" + +#include "openvino/core/op_extension.hpp" +#include "ov_ops/augru_cell.hpp" +#include "ov_ops/augru_sequence.hpp" +#include "ov_ops/multiclass_nms_ie_internal.hpp" +#include "ov_ops/nms_ie_internal.hpp" +#include "ov_ops/nms_static_shape_ie.hpp" +#include "ov_ops/type_relaxed.hpp" +#include "snippets/op/subgraph.hpp" #include "transformations/cpu_opset/common/op/fully_connected.hpp" #include "transformations/cpu_opset/common/op/leaky_relu.hpp" +#include "transformations/cpu_opset/common/op/ngram.hpp" #include "transformations/cpu_opset/common/op/power_static.hpp" #include "transformations/cpu_opset/common/op/sdpa.hpp" #include "transformations/cpu_opset/common/op/swish_cpu.hpp" -#include "transformations/cpu_opset/common/op/ngram.hpp" -#include "transformations/cpu_opset/x64/op/mha.hpp" #include "transformations/cpu_opset/x64/op/interaction.hpp" -#include "transformations/snippets/x64/op/load_convert.hpp" -#include "transformations/snippets/x64/op/store_convert.hpp" -#include "transformations/snippets/x64/op/brgemm_cpu.hpp" +#include "transformations/cpu_opset/x64/op/mha.hpp" #include "transformations/snippets/x64/op/brgemm_copy_b.hpp" +#include "transformations/snippets/x64/op/brgemm_cpu.hpp" +#include "transformations/snippets/x64/op/load_convert.hpp" #include "transformations/snippets/x64/op/perf_count_rdtsc.hpp" +#include "transformations/snippets/x64/op/store_convert.hpp" -#include -#include -#include -#include -#include -#include - -#include "snippets/op/subgraph.hpp" - -#include - -namespace ov { -namespace intel_cpu { - -void Extension::GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept { - static const InferenceEngine::Version version = { - {1, 0}, // extension API version - "1.0", - "Extension" // extension description message - }; - - versionInfo = &version; -} - -void Extension::Unload() noexcept {} - -std::map Extension::getOpSets() { - auto cpu_plugin_opset = []() { - ngraph::OpSet opset; +#define OP_EXTENSION(NAME) std::make_shared>(), #if defined(OPENVINO_ARCH_X86_64) -#define NGRAPH_OP_X64(NAME, NAMESPACE) NGRAPH_OP(NAME, NAMESPACE) +# define OP_EXTENSION_X64(NAME) OP_EXTENSION(NAME) #else -#define NGRAPH_OP_X64(NAME, NAMESPACE) +# define OP_EXTENSION_X64(NAME) #endif -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); - NGRAPH_OP(FullyConnectedNode, ov::intel_cpu) - NGRAPH_OP(LeakyReluNode, ov::intel_cpu) - NGRAPH_OP(PowerStaticNode, ov::intel_cpu) - NGRAPH_OP(SwishNode, ov::intel_cpu) - NGRAPH_OP(NgramNode, ov::intel_cpu) - NGRAPH_OP_X64(MHANode, ov::intel_cpu) - NGRAPH_OP_X64(InteractionNode, ov::intel_cpu) - NGRAPH_OP_X64(ScaledDotProductAttentionWithKVCache, ov::intel_cpu) -#undef NGRAPH_OP - - return opset; - }; - - auto type_relaxed_opset = []() { - ngraph::OpSet opset; - -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert>(); - NGRAPH_OP(Add, ov::op::v1) - NGRAPH_OP(AvgPool, ov::op::v1) - NGRAPH_OP(Clamp, ov::op::v0) - NGRAPH_OP(Concat, ov::op::v0) - NGRAPH_OP(Convolution, ov::op::v1) - NGRAPH_OP(ConvolutionBackpropData, ov::op::v1) - NGRAPH_OP(DepthToSpace, ov::op::v0) - NGRAPH_OP(Equal, ov::op::v1) - NGRAPH_OP(FakeQuantize, ov::op::v0) - NGRAPH_OP(Greater, ov::op::v1) - NGRAPH_OP(GreaterEqual, ov::op::v1) - NGRAPH_OP(GroupConvolution, ov::op::v1) - NGRAPH_OP(GroupConvolutionBackpropData, ov::op::v1) - NGRAPH_OP(Interpolate, ov::op::v0) - NGRAPH_OP(Interpolate, ov::op::v4) - NGRAPH_OP(Less, ov::op::v1) - NGRAPH_OP(LessEqual, ov::op::v1) - NGRAPH_OP(LogicalAnd, ov::op::v1) - NGRAPH_OP(LogicalNot, ov::op::v1) - NGRAPH_OP(LogicalOr, ov::op::v1) - NGRAPH_OP(LogicalXor, ov::op::v1) - NGRAPH_OP(MatMul, ov::op::v0) - NGRAPH_OP(MaxPool, ov::op::v1) - NGRAPH_OP(Multiply, ov::op::v1) - NGRAPH_OP(NormalizeL2, ov::op::v0) - NGRAPH_OP(NotEqual, ov::op::v1) - NGRAPH_OP(PRelu, ov::op::v0) - NGRAPH_OP(Relu, ov::op::v0) - NGRAPH_OP(ReduceMax, ov::op::v1) - NGRAPH_OP(ReduceLogicalAnd, ov::op::v1) - NGRAPH_OP(ReduceLogicalOr, ov::op::v1) - NGRAPH_OP(ReduceMean, ov::op::v1) - NGRAPH_OP(ReduceMin, ov::op::v1) - NGRAPH_OP(ReduceSum, ov::op::v1) - NGRAPH_OP(Reshape, ov::op::v1) - NGRAPH_OP(Select, ov::op::v1) - NGRAPH_OP(ShapeOf, ov::op::v0) - NGRAPH_OP(ShuffleChannels, ov::op::v0) - NGRAPH_OP(Squeeze, ov::op::v0) - NGRAPH_OP(Subtract, ov::op::v1) - NGRAPH_OP(Unsqueeze, ov::op::v0) - NGRAPH_OP(MVN, ov::op::v0) - NGRAPH_OP(MVN, ov::op::v6) - NGRAPH_OP(Select, ov::op::v1) - NGRAPH_OP(ConvolutionBackpropData, ov::op::v1) -#undef NGRAPH_OP - - return opset; - }; - - auto ie_internal_opset = []() { - ngraph::OpSet opset; - -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); - NGRAPH_OP(NonMaxSuppressionIEInternal, ov::op::internal) - NGRAPH_OP(MulticlassNmsIEInternal, ov::op::internal) - NGRAPH_OP(AUGRUCell, ov::op::internal) - NGRAPH_OP(AUGRUSequence, ov::op::internal) - NGRAPH_OP(NmsStaticShapeIE, ov::op::internal) -#undef NGRAPH_OP - - return opset; - }; - - auto snippets_opset = []() { - ngraph::OpSet opset; +#define CPU_EXTENSIONS \ + OP_EXTENSION(ov::intel_cpu::FullyConnectedNode) \ + OP_EXTENSION(ov::intel_cpu::LeakyReluNode) \ + OP_EXTENSION(ov::intel_cpu::PowerStaticNode) \ + OP_EXTENSION(ov::intel_cpu::SwishNode) \ + OP_EXTENSION(ov::intel_cpu::NgramNode) \ + OP_EXTENSION(ov::op::internal::NonMaxSuppressionIEInternal) \ + OP_EXTENSION(ov::op::internal::MulticlassNmsIEInternal) \ + OP_EXTENSION(ov::op::internal::AUGRUCell) \ + OP_EXTENSION(ov::op::internal::AUGRUSequence) \ + OP_EXTENSION(ov::op::internal::NmsStaticShapeIE) \ + OP_EXTENSION_X64(ov::intel_cpu::MHANode) \ + OP_EXTENSION_X64(ov::intel_cpu::InteractionNode) \ + OP_EXTENSION_X64(ov::intel_cpu::ScaledDotProductAttentionWithKVCache) \ + OP_EXTENSION_X64(ov::intel_cpu::LoadConvertSaturation) \ + OP_EXTENSION_X64(ov::intel_cpu::LoadConvertTruncation) \ + OP_EXTENSION_X64(ov::intel_cpu::StoreConvertSaturation) \ + OP_EXTENSION_X64(ov::intel_cpu::StoreConvertTruncation) \ + OP_EXTENSION_X64(ov::intel_cpu::BrgemmCPU) \ + OP_EXTENSION_X64(ov::intel_cpu::BrgemmCopyB) + +#define TYPE_RELAXED_EXTENSIONS \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) \ + OP_EXTENSION(ov::op::TypeRelaxed) -#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); - NGRAPH_OP(Brgemm, ov::snippets::op) - NGRAPH_OP(BroadcastLoad, ov::snippets::op) - NGRAPH_OP(BroadcastMove, ov::snippets::op) - NGRAPH_OP(ConvertSaturation, ov::snippets::op) - NGRAPH_OP(ConvertTruncation, ov::snippets::op) - NGRAPH_OP(Fill, ov::snippets::op) - NGRAPH_OP(HorizonMax, ov::snippets::op) - NGRAPH_OP(HorizonSum, ov::snippets::op) - NGRAPH_OP(Kernel, ov::snippets::op) - NGRAPH_OP(IntermediateMemoryBuffer, ov::snippets::op) - NGRAPH_OP(Load, ov::snippets::op) - NGRAPH_OP(LoadReshape, ov::snippets::op) - NGRAPH_OP(LoopBegin, ov::snippets::op) - NGRAPH_OP(LoopEnd, ov::snippets::op) - NGRAPH_OP(NewMemoryBuffer, ov::snippets::op) - NGRAPH_OP(Nop, ov::snippets::op) - NGRAPH_OP(PowerStatic, ov::snippets::op) - NGRAPH_OP(Scalar, ov::snippets::op) - NGRAPH_OP(Store, ov::snippets::op) - NGRAPH_OP(Subgraph, ov::snippets::op) - NGRAPH_OP(VectorBuffer, ov::snippets::op) - NGRAPH_OP(RankNormalization, ov::snippets::op) #ifdef SNIPPETS_DEBUG_CAPS - NGRAPH_OP(PerfCountBegin, ov::snippets::op) - NGRAPH_OP(PerfCountEnd, ov::snippets::op) -#endif - NGRAPH_OP_X64(LoadConvertSaturation, ov::intel_cpu) - NGRAPH_OP_X64(LoadConvertTruncation, ov::intel_cpu) - NGRAPH_OP_X64(StoreConvertSaturation, ov::intel_cpu) - NGRAPH_OP_X64(StoreConvertTruncation, ov::intel_cpu) - NGRAPH_OP_X64(BrgemmCPU, ov::intel_cpu) - NGRAPH_OP_X64(BrgemmCopyB, ov::intel_cpu) -#ifdef SNIPPETS_DEBUG_CAPS - NGRAPH_OP_X64(PerfCountRdtscBegin, ov::intel_cpu) - NGRAPH_OP_X64(PerfCountRdtscEnd, ov::intel_cpu) +# define SNIPPETS_DEBUG_CAPS_EXTENSIONS \ + OP_EXTENSION(ov::snippets::op::PerfCountBegin) \ + OP_EXTENSION(ov::snippets::op::PerfCountEnd) \ + OP_EXTENSION_X64(ov::intel_cpu::PerfCountRdtscBegin) \ + OP_EXTENSION_X64(ov::intel_cpu::PerfCountRdtscEnd) +#else +# define SNIPPETS_DEBUG_CAPS_EXTENSIONS #endif -#undef NGRAPH_OP - - return opset; - }; - - static std::map opsets = { - { "cpu_plugin_opset", cpu_plugin_opset() }, - { "type_relaxed_opset", type_relaxed_opset() }, - { "ie_internal_opset", ie_internal_opset() }, - { "SnippetsOpset", snippets_opset() }, - }; - - return opsets; -} - -std::vector Extension::getImplTypes(const std::shared_ptr&) { - return {}; -} - -InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr& node, const std::string& implType) { - return nullptr; -} - -} // namespace intel_cpu -} // namespace ov -// Generate exported function -IE_DEFINE_EXTENSION_CREATE_FUNCTION(ov::intel_cpu::Extension) +#define SNIPPETS_EXTENSIONS \ + OP_EXTENSION(ov::snippets::op::Brgemm) \ + OP_EXTENSION(ov::snippets::op::BroadcastLoad) \ + OP_EXTENSION(ov::snippets::op::BroadcastMove) \ + OP_EXTENSION(ov::snippets::op::ConvertSaturation) \ + OP_EXTENSION(ov::snippets::op::ConvertTruncation) \ + OP_EXTENSION(ov::snippets::op::Fill) \ + OP_EXTENSION(ov::snippets::op::HorizonMax) \ + OP_EXTENSION(ov::snippets::op::HorizonSum) \ + OP_EXTENSION(ov::snippets::op::Kernel) \ + OP_EXTENSION(ov::snippets::op::IntermediateMemoryBuffer) \ + OP_EXTENSION(ov::snippets::op::Load) \ + OP_EXTENSION(ov::snippets::op::LoadReshape) \ + OP_EXTENSION(ov::snippets::op::LoopBegin) \ + OP_EXTENSION(ov::snippets::op::LoopEnd) \ + OP_EXTENSION(ov::snippets::op::NewMemoryBuffer) \ + OP_EXTENSION(ov::snippets::op::Nop) \ + OP_EXTENSION(ov::snippets::op::PowerStatic) \ + OP_EXTENSION(ov::snippets::op::Scalar) \ + OP_EXTENSION(ov::snippets::op::Store) \ + OP_EXTENSION(ov::snippets::op::Subgraph) \ + OP_EXTENSION(ov::snippets::op::VectorBuffer) \ + OP_EXTENSION(ov::snippets::op::RankNormalization) + +OPENVINO_CREATE_EXTENSIONS(std::vector( + {CPU_EXTENSIONS TYPE_RELAXED_EXTENSIONS SNIPPETS_EXTENSIONS SNIPPETS_DEBUG_CAPS_EXTENSIONS})); diff --git a/src/plugins/intel_cpu/src/extension.h b/src/plugins/intel_cpu/src/extension.h deleted file mode 100644 index 6d36a20b38a598..00000000000000 --- a/src/plugins/intel_cpu/src/extension.h +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace intel_cpu { - -class Extension : public InferenceEngine::IExtension { -public: - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override; - void Unload() noexcept override; - std::map getOpSets() override; - std::vector getImplTypes(const std::shared_ptr& node) override; - InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, const std::string& implType) override; -}; - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/extension_mngr.cpp b/src/plugins/intel_cpu/src/extension_mngr.cpp deleted file mode 100644 index d842f227fb5590..00000000000000 --- a/src/plugins/intel_cpu/src/extension_mngr.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "extension_mngr.h" - -using namespace InferenceEngine; - -namespace ov { -namespace intel_cpu { - -void ExtensionManager::AddExtension(const IExtensionPtr& extension) { - _extensions.push_back(extension); -} - -InferenceEngine::ILayerImpl::Ptr ExtensionManager::CreateImplementation(const std::shared_ptr& op) { - if (!op) - OPENVINO_THROW("Cannot get nGraph operation!"); - for (const auto& ext : _extensions) { - auto implTypes = ext->getImplTypes(op); - for (const auto& type : implTypes) { - if (type != "CPU") - continue; - auto impl = ext->getImplementation(op, "CPU"); - if (impl) - return impl; - } - } - return nullptr; -} - -const std::vector & ExtensionManager::Extensions() const { - return _extensions; -} - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/extension_mngr.h b/src/plugins/intel_cpu/src/extension_mngr.h deleted file mode 100644 index 67505861bcdd75..00000000000000 --- a/src/plugins/intel_cpu/src/extension_mngr.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -namespace ov { -namespace intel_cpu { - -class ExtensionManager { -public: - using Ptr = std::shared_ptr; - ExtensionManager() = default; - InferenceEngine::ILayerImpl::Ptr CreateImplementation(const std::shared_ptr& op); - void AddExtension(const InferenceEngine::IExtensionPtr& extension); - const std::vector & Extensions() const; - -private: - std::vector _extensions; -}; - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/graph_context.h b/src/plugins/intel_cpu/src/graph_context.h index 2d90d039ba1489..3fc8f7e40fbf21 100644 --- a/src/plugins/intel_cpu/src/graph_context.h +++ b/src/plugins/intel_cpu/src/graph_context.h @@ -7,7 +7,6 @@ #include "cache/multi_cache.h" #include "config.h" #include "dnnl_scratch_pad.h" -#include "extension_mngr.h" #include "weights_cache.hpp" namespace ov { @@ -19,11 +18,9 @@ class GraphContext { typedef std::shared_ptr CPtr; GraphContext(const Config& config, - ExtensionManager::Ptr extensionManager, WeightsSharing::Ptr w_cache, bool isGraphQuantized) : config(config), - extensionManager(extensionManager), weightsCache(w_cache), isGraphQuantizedFlag(isGraphQuantized) { rtParamsCache = std::make_shared(config.rtCacheCapacity); @@ -34,10 +31,6 @@ class GraphContext { return config; } - ExtensionManager::Ptr getExtensionManager() const { - return extensionManager; - } - WeightsSharing::Ptr getWeightsCache() const { return weightsCache; } @@ -60,7 +53,6 @@ class GraphContext { private: Config config; // network-level config - ExtensionManager::Ptr extensionManager; WeightsSharing::Ptr weightsCache; // per NUMA node caches for sharing weights data MultiCachePtr rtParamsCache; // primitive cache diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index de686d0ff3185b..660d2b8d6bd7b2 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -10,7 +10,6 @@ #include "dnnl_extension_utils.h" #include "dnnl_types.h" #include "edge.h" -#include "extension_mngr.h" #include "itt.h" #include "memory_desc/cpu_memory_desc_utils.h" #include "memory_desc/dnnl_blocked_memory_desc.h" @@ -1295,7 +1294,7 @@ Node* Node::NodesFactory::create(const std::shared_ptr& op, const Grap if (newNode == nullptr) { try { std::unique_ptr ol(createNodeIfRegistered(intel_cpu, TypeFromName(op->get_type_name()), op, context)); - if (ol != nullptr && ol->created(context->getExtensionManager())) + if (ol != nullptr && ol->created()) newNode = ol.release(); } catch (const ov::Exception& ex) { if (dynamic_cast(&ex) != nullptr) { @@ -1309,7 +1308,7 @@ Node* Node::NodesFactory::create(const std::shared_ptr& op, const Grap if (newNode == nullptr) { try { std::unique_ptr ol(new Reference(op, context, errorMessage)); - if (ol != nullptr && ol->created(context->getExtensionManager())) + if (ol != nullptr && ol->created()) newNode = ol.release(); } catch (const ov::Exception& ex) { if (dynamic_cast(&ex) != nullptr) { diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index 7601d09c4cd0d1..da529fbefacde7 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -12,8 +12,6 @@ #include "dnnl_postops_composer.h" #include "dnnl_scratch_pad.h" #include "edge.h" -#include "extension_mngr.h" -#include "graph_context.h" #include "nodes/common/blocked_desc_creator.h" #include "nodes/executors/executor.hpp" #include "nodes/executors/mvn_list.hpp" @@ -410,9 +408,6 @@ class Node { const std::vector& outputDesc) {} virtual void initDescriptor(const NodeConfig& config); virtual bool created() const = 0; - virtual bool created(const ExtensionManager::Ptr& extMgr) { - return created(); - } /** * @brief Performs Node initialization based on graph context. diff --git a/src/plugins/intel_cpu/src/nodes/if.h b/src/plugins/intel_cpu/src/nodes/if.h index 76a87874209984..ff41bd2a8c6dbb 100644 --- a/src/plugins/intel_cpu/src/nodes/if.h +++ b/src/plugins/intel_cpu/src/nodes/if.h @@ -59,7 +59,6 @@ class If : public Node { ptrdiff_t size; }; - ExtensionManager::Ptr ext_mng; Graph subGraphThen; Graph subGraphElse; std::vector> inputMemThen, inputMemElse; diff --git a/src/plugins/intel_cpu/src/nodes/tensoriterator.h b/src/plugins/intel_cpu/src/nodes/tensoriterator.h index 104ee077f9a163..07a1c0106b799b 100644 --- a/src/plugins/intel_cpu/src/nodes/tensoriterator.h +++ b/src/plugins/intel_cpu/src/nodes/tensoriterator.h @@ -140,7 +140,6 @@ class TensorIterator : public Node { int getNumIteration(const std::vector& inputPortMap, const std::vector& outputPortMap) const; bool runAsDynamic() const; - ExtensionManager::Ptr ext_mng; Graph sub_graph; std::vector> input_mems; std::vector output_mem; diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index e9a64a821431a6..0313e5ed3c4a8b 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -4,8 +4,6 @@ #include "plugin.h" -#include "extension.h" -#include "extension_mngr.h" #include "itt.h" #include "internal_properties.hpp" #include "openvino/runtime/intel_cpu/properties.hpp" @@ -171,7 +169,6 @@ Engine::Engine() : get_executor_manager()->execute_task_by_streams_executor(IStreamsExecutor::Config::PreferredCoreType::BIG, [] { dnnl::impl::cpu::x64::cpu(); }); - extensionManager->AddExtension(std::make_shared()); #if defined(OV_CPU_WITH_ACL) scheduler_guard = SchedulerGuard::instance(); #endif @@ -614,7 +611,7 @@ Engine::compile_model(const std::shared_ptr& model, const ov::A denormals_as_zero(false); } } - return std::make_shared(cloned_model, shared_from_this(), conf, extensionManager); + return std::make_shared(cloned_model, shared_from_this(), conf); } void Engine::set_property(const ov::AnyMap &config) { @@ -868,12 +865,6 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio OPENVINO_THROW("Cannot get unsupported property: ", name); } -OPENVINO_SUPPRESS_DEPRECATED_START -void Engine::add_extension(const InferenceEngine::IExtensionPtr& extension) { - extensionManager->AddExtension(extension); -} -OPENVINO_SUPPRESS_DEPRECATED_END - ov::SupportedOpsMap Engine::query_model(const std::shared_ptr& model, const ov::AnyMap& config) const { WeightsSharing::Ptr fake_w_cache; @@ -892,7 +883,7 @@ ov::SupportedOpsMap Engine::query_model(const std::shared_ptr& const Config::SnippetsMode snippetsMode = getSnippetsMode(config, conf); auto context = - std::make_shared(conf, extensionManager, fake_w_cache, false); + std::make_shared(conf, fake_w_cache, false); auto supported = ov::get_supported_nodes( model, @@ -945,7 +936,7 @@ std::shared_ptr Engine::import_model(std::istream& networkMo // import config props from caching model calculate_streams(conf, model, true); - auto compiled_model = std::make_shared(model, shared_from_this(), conf, extensionManager, true); + auto compiled_model = std::make_shared(model, shared_from_this(), conf, true); return compiled_model; } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/plugin.h b/src/plugins/intel_cpu/src/plugin.h index 256eafdbadbaab..756387aa48a13d 100644 --- a/src/plugins/intel_cpu/src/plugin.h +++ b/src/plugins/intel_cpu/src/plugin.h @@ -43,10 +43,6 @@ class Engine : public ov::IPlugin { OPENVINO_THROW_NOT_IMPLEMENTED("Not Implemented get_default_context is not supported by CPU plugin!"); }; - OPENVINO_SUPPRESS_DEPRECATED_START - void add_extension(const std::shared_ptr& extension) override; - OPENVINO_SUPPRESS_DEPRECATED_END - private: bool is_legacy_api() const; @@ -62,7 +58,6 @@ class Engine : public ov::IPlugin { void calculate_streams(Config& conf, const std::shared_ptr& model, bool imported = false) const; Config engConfig; - ExtensionManager::Ptr extensionManager = std::make_shared(); /* Explicily configured streams have higher priority than performance hints. So track if streams is set explicitly (not auto-configured) */ bool streamsExplicitlySetForEngine = false; diff --git a/src/plugins/intel_cpu/src/serialize.cpp b/src/plugins/intel_cpu/src/serialize.cpp index ce7304de14af9a..777d7ea8a04ecc 100644 --- a/src/plugins/intel_cpu/src/serialize.cpp +++ b/src/plugins/intel_cpu/src/serialize.cpp @@ -24,27 +24,10 @@ static void setInfo(pugi::xml_node& root, std::shared_ptr& model) { } } -ModelSerializer::ModelSerializer(std::ostream & ostream, ExtensionManager::Ptr extensionManager) - : _ostream(ostream) - , _extensionManager(extensionManager) { -} +ModelSerializer::ModelSerializer(std::ostream& ostream) : _ostream(ostream) {} void ModelSerializer::operator<<(const std::shared_ptr& model) { OPENVINO_SUPPRESS_DEPRECATED_START - auto getCustomOpSets = [this]() { - std::map custom_opsets; - - if (_extensionManager) { - auto extensions = _extensionManager->Extensions(); - for (const auto& extension : extensions) { - auto opset = extension->getOpSets(); - custom_opsets.insert(std::begin(opset), std::end(opset)); - } - } - - return custom_opsets; - }; - auto serializeInfo = [&](std::ostream& stream) { const std::string name = "cnndata"; pugi::xml_document xml_doc; @@ -59,7 +42,7 @@ void ModelSerializer::operator<<(const std::shared_ptr& model) { }; // Serialize to old representation in case of old API - ov::pass::StreamSerialize serializer(_ostream, getCustomOpSets(), serializeInfo); + ov::pass::StreamSerialize serializer(_ostream, serializeInfo); OPENVINO_SUPPRESS_DEPRECATED_END serializer.run_on_model(std::const_pointer_cast(model->clone())); } diff --git a/src/plugins/intel_cpu/src/serialize.h b/src/plugins/intel_cpu/src/serialize.h index 5bbb22661003c7..b0c57a7ea9d91a 100644 --- a/src/plugins/intel_cpu/src/serialize.h +++ b/src/plugins/intel_cpu/src/serialize.h @@ -6,19 +6,17 @@ #include #include "cpp/ie_cnn_network.h" -#include "extension_mngr.h" namespace ov { namespace intel_cpu { class ModelSerializer { public: - ModelSerializer(std::ostream& ostream, ExtensionManager::Ptr extensionManager); + ModelSerializer(std::ostream& ostream); void operator<<(const std::shared_ptr& model); private: std::ostream& _ostream; - ExtensionManager::Ptr _extensionManager; }; class ModelDeserializer { diff --git a/src/plugins/intel_cpu/src/utils/ngraph_transformation.hpp b/src/plugins/intel_cpu/src/utils/ngraph_transformation.hpp index 47466dbab52052..f7e9ecf30dbd4d 100644 --- a/src/plugins/intel_cpu/src/utils/ngraph_transformation.hpp +++ b/src/plugins/intel_cpu/src/utils/ngraph_transformation.hpp @@ -4,7 +4,6 @@ #pragma once #ifdef CPU_DEBUG_CAPS -#include "extension.h" #include "debug_caps_config.h" #include "openvino/util/file_util.hpp" #include @@ -68,8 +67,7 @@ class TransformationDumper { ov::pass::Manager serializer; if (config.dumpIR.format.filter[DebugCapsConfig::IrFormatFilter::XmlBin]) { - auto custom_opsets = std::make_shared()->getOpSets(); - serializer.register_pass(pathAndName + ".xml", "", custom_opsets); + serializer.register_pass(pathAndName + ".xml", ""); } if (config.dumpIR.format.filter[DebugCapsConfig::IrFormatFilter::Xml]) { diff --git a/src/plugins/intel_cpu/tests/unit/graph/memory_state.cpp b/src/plugins/intel_cpu/tests/unit/graph/memory_state.cpp index 3b2c070ac3579e..e98d5ba1f85bb3 100644 --- a/src/plugins/intel_cpu/tests/unit/graph/memory_state.cpp +++ b/src/plugins/intel_cpu/tests/unit/graph/memory_state.cpp @@ -79,7 +79,7 @@ TEST(MemStateGraphTest, smoke_Check_Memory_Modification_Guard) { Config conf; conf.rtCacheCapacity = 0; - auto context = std::make_shared(conf, nullptr, nullptr, false); + auto context = std::make_shared(conf, nullptr, false); auto input_node = std::make_shared(param, context); auto memory_input = std::make_shared(read, context); @@ -266,7 +266,7 @@ TEST(MemStateGraphTest, smoke_ShapeOf_no_Inplace_Conflicts) { Config conf; conf.rtCacheCapacity = 0; - auto context = std::make_shared(conf, nullptr, nullptr, false); + auto context = std::make_shared(conf, nullptr, false); auto input_node = std::make_shared(param, context); auto memory_input = std::make_shared(read, context); diff --git a/src/plugins/intel_cpu/tests/unit/graph/merge_transpose_reorder_test.cpp b/src/plugins/intel_cpu/tests/unit/graph/merge_transpose_reorder_test.cpp index b765c8e40e283e..fe43a4301a0268 100644 --- a/src/plugins/intel_cpu/tests/unit/graph/merge_transpose_reorder_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/graph/merge_transpose_reorder_test.cpp @@ -60,7 +60,7 @@ class MergeTransposeReorderIsOptimizedCPUTest : public ::testing::Test { // Config conf; conf.rtCacheCapacity = 100; - auto context = std::make_shared(conf, nullptr, nullptr, false); + auto context = std::make_shared(conf, nullptr, false); const dnnl::engine cpuEngine = context->getEngine(); m_graph = std::unique_ptr(new Graph()); diff --git a/src/plugins/intel_cpu/tests/unit/graph/resolve_edge_conflicts_test.cpp b/src/plugins/intel_cpu/tests/unit/graph/resolve_edge_conflicts_test.cpp index 2c048e5e13b0e0..1ca1558a0a3d28 100644 --- a/src/plugins/intel_cpu/tests/unit/graph/resolve_edge_conflicts_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/graph/resolve_edge_conflicts_test.cpp @@ -36,7 +36,7 @@ TEST(ResolveEdgeConflictsCPUTest, smoke_Run_ResolveEdgeConflicts) { */ Config conf; conf.rtCacheCapacity = 100; - auto context = std::make_shared(conf, nullptr, nullptr, false); + auto context = std::make_shared(conf, nullptr, false); const dnnl::engine cpuEngine = context->getEngine(); std::unique_ptr graph = std::unique_ptr(new Graph()); diff --git a/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp b/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp index 5dd1e01f5610a9..ff528d8e65dc74 100644 --- a/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp @@ -108,7 +108,6 @@ class ReorderCPUTestGraph { Config conf; conf.rtCacheCapacity = 100; auto context = std::make_shared(conf, - nullptr, std::make_shared(), false); const dnnl::engine cpuEngine = context->getEngine(); From 53d7c501bd49209a8fc8a5f1cbfe1619c81312f3 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Mon, 15 Jan 2024 08:20:07 +0100 Subject: [PATCH 42/43] [DOCS] torchvision and optimization section (#22043) --- .../openvino_workflow/model_preparation.rst | 1 - .../model_preparation/pytorch_vision.rst | 12 ---- .../dldt_deployment_optimization_guide.rst | 5 +- .../dldt_deployment_optimization_tput.rst | 11 ++- ..._deployment_optimization_tput_advanced.rst | 4 +- .../precision_control.rst | 5 +- .../preprocessing_overview.rst | 1 + .../torchvision_preprocessing_converter.rst | 71 +++++++++++++++++++ docs/dev/get_started.md | 2 +- 9 files changed, 89 insertions(+), 23 deletions(-) delete mode 100644 docs/articles_en/openvino_workflow/model_preparation/pytorch_vision.rst create mode 100644 docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/torchvision_preprocessing_converter.rst diff --git a/docs/articles_en/openvino_workflow/model_preparation.rst b/docs/articles_en/openvino_workflow/model_preparation.rst index f9bc468066669c..a70d79c5233ad6 100644 --- a/docs/articles_en/openvino_workflow/model_preparation.rst +++ b/docs/articles_en/openvino_workflow/model_preparation.rst @@ -15,7 +15,6 @@ Model Preparation Convert to OpenVINO Model Conversion Parameters Setting Input Shapes - PyVision preprocessing You can obtain a model in one of supported formats, **PyTorch, TensorFlow, TensorFlow Lite, ONNX, and PaddlePaddle**, diff --git a/docs/articles_en/openvino_workflow/model_preparation/pytorch_vision.rst b/docs/articles_en/openvino_workflow/model_preparation/pytorch_vision.rst deleted file mode 100644 index 34df9e465e022e..00000000000000 --- a/docs/articles_en/openvino_workflow/model_preparation/pytorch_vision.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. {#pytorch_vision} - -PyVision -======================= - - -.. meta:: - :description: Learn about supported model formats and the methods used to convert, read, and compile them in OpenVINO™. - -Images input to AI models often need to be preprocessed in order to have proper dimensions or data type. -Instead of doing it with another library in an additional pipeline step, you can use torchvision.transforms OpenVINO feature. -It automatically translates a torchvision preprocessing pipeline to OpenVINO operators and then embeds them into your OpenVINO model, reducing overall program complexity and allowing additional performance optimizations to take place. \ No newline at end of file diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide.rst index 2628e8be39cb24..5c389e3d789aa5 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide.rst @@ -13,10 +13,11 @@ Optimize Inference openvino_docs_OV_UG_Precision_Control openvino_docs_deployment_optimization_guide_latency openvino_docs_deployment_optimization_guide_tput - openvino_docs_deployment_optimization_guide_tput_advanced + Advanced Throughput Options openvino_docs_OV_UG_Preprocessing_Overview openvino_docs_deployment_optimization_guide_internals - openvino_docs_memory_optimization_guide + Optimizing memory usage + .. meta:: :description: Improving inference performance involves model and runtime diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.rst index 3f1595189acb8c..b26d0c9e6132ad 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.rst @@ -10,8 +10,15 @@ Optimizing for Throughput simultaneously which improves the device utilization. -As described in the section on the :doc:`latency-specific considerations `, one of the possible use cases is *delivering every single request at the minimal delay*. -Throughput, on the other hand, is about inference scenarios in which potentially **large number of inference requests are served simultaneously to improve the device utilization**. +.. toctree:: + :maxdepth: 1 + :hidden: + + Advanced Throughput Options + + +As described in the section on the :doc:`latency-specific optimizations `, one of the possible use cases is delivering every single request with minimal delay. +Throughput, on the other hand, is about inference scenarios in which potentially **large numbers of inference requests are served simultaneously to improve resource use**. The associated increase in latency is not linearly dependent on the number of requests executed in parallel. A trade-off between overall throughput and serial performance of individual requests can be achieved with the right performance configuration of OpenVINO. diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.rst index eff5a21ae1c202..b792cfca42be33 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.rst @@ -1,6 +1,6 @@ .. {#openvino_docs_deployment_optimization_guide_tput_advanced} -Using Advanced Throughput Options: Streams and Batching +Advanced Throughput Options: Streams and Batching ======================================================= @@ -8,7 +8,7 @@ Using Advanced Throughput Options: Streams and Batching :description: With OpenVINO streams a device may handle processing multiple inference requests and the batching helps to saturate the device and leads to higher throughput. - + OpenVINO Streams #################### diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst index 91ae9b011f5746..63de6309809943 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst @@ -9,9 +9,8 @@ The choice of data types is essential to the inference runtime, which can have a 1. Model storage precision (IR precision), 2. Model inference precision. -Previously, these 2 precisions were interrelated, and model storage precision could affect the inference precision in some devices (e.g. GPU did ``f16`` inference only for ``f16`` IRs). - -With the ``2023.0`` release this behavior has been changed and the inference precision no longer depends on the precision of IR. Now users have several knobs to find the balance between model performance and accuracy. +Inference precision no longer depends on the precision of IR, which means that users +have several options to find the balance between model performance and accuracy. Essentially, the IR precision becomes a way of compressing the model by reducing the precision of the weights, and it does not affect how the devices execute the model. This change clears up a lot of confusion where, for example, you couldn't execute a high-performance model on the GPU by default, and the behavior between devices was different. diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview.rst index bac4584fb4cf99..48020d3928de0c 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview.rst @@ -11,6 +11,7 @@ Optimize Preprocessing openvino_docs_OV_UG_Preprocessing_Details openvino_docs_OV_UG_Layout_Overview openvino_docs_OV_UG_Preprocess_Usecase_save + Torchvision preprocessing converter .. meta:: :description: The preprocessing entails additional operations to transform diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/torchvision_preprocessing_converter.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/torchvision_preprocessing_converter.rst new file mode 100644 index 00000000000000..264edda073b2d6 --- /dev/null +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/torchvision_preprocessing_converter.rst @@ -0,0 +1,71 @@ +.. {#torchvision_preprocessing_converter} + +Torchvision preprocessing converter +======================================= + + +.. meta:: + :description: See how OpenVINO™ enables torchvision preprocessing + to optimize model inference. + + +The Torchvision-to-OpenVINO converter enables automatic translation of operators from the torchvision +preprocessing pipeline to the OpenVINO format and embed them in your model. It is often used to adjust +images serving as input for AI models to have proper dimensions or data types. + +As the converter is fully based on the **openvino.preprocess** module, you can implement the **torchvision.transforms** +feature easily and without the use of external libraries, reducing the overall application complexity +and enabling additional performance optimizations. + + +.. note:: + + Not all torchvision transforms are supported yet. The following operations are available: + + .. code-block:: + + transforms.Compose + transforms.Normalize + transforms.ConvertImageDtype + transforms.Grayscale + transforms.Pad + transforms.ToTensor + transforms.CenterCrop + transforms.Resize + + +Example +################### + +.. code-block:: py + + preprocess_pipeline = torchvision.transforms.Compose( + [ + torchvision.transforms.Resize(256, interpolation=transforms.InterpolationMode.NEAREST), + torchvision.transforms.CenterCrop((216, 218)), + torchvision.transforms.Pad((2, 3, 4, 5), fill=3), + torchvision.transforms.ToTensor(), + torchvision.transforms.ConvertImageDtype(torch.float32), + torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + ) + + torch_model = SimpleConvnet(input_channels=3) + + torch.onnx.export(torch_model, torch.randn(1, 3, 224, 224), "test_convnet.onnx", verbose=False, input_names=["input"], output_names=["output"]) + core = Core() + ov_model = core.read_model(model="test_convnet.onnx") + + test_input = np.random.randint(255, size=(260, 260, 3), dtype=np.uint16) + ov_model = PreprocessConverter.from_torchvision( + model=ov_model, transform=preprocess_pipeline, input_example=Image.fromarray(test_input.astype("uint8"), "RGB") + ) + ov_model = core.compile_model(ov_model, "CPU") + ov_input = np.expand_dims(test_input, axis=0) + output = ov_model.output(0) + ov_result = ov_model(ov_input)[output] + + + + + diff --git a/docs/dev/get_started.md b/docs/dev/get_started.md index 93b6000dbee1b1..b5d09af43dd7cb 100644 --- a/docs/dev/get_started.md +++ b/docs/dev/get_started.md @@ -18,4 +18,4 @@ Explore other resources to learn more about OpenVINO: * [OpenVINO Developer Documentation](./index.md) * [OpenVINO Samples](../../samples) * [OpenVINO Building Documentation](./building_documentation.md) - * [CMake Options for Custom Compilation](./cmake_options_for_custom_comiplation.md) + * [CMake Options for Custom Compilation](./cmake_options_for_custom_compilation.md) From 938600fbf7aba0c836e12787b0ce037dae7ecc8c Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Mon, 15 Jan 2024 09:57:24 +0100 Subject: [PATCH 43/43] [tests] resolve HF models 4th batch (#21907) * add inspect pytorch modules * add infer for each submodule * skip -> xfail in several models * successfull separate modules execution * revert jukebox, whisper * ready for rewiew * rely on tags insted of names * fix import error --- .../torch_tests/hf_transformers_models | 11 +++-- .../torch_tests/test_hf_transformers.py | 44 ++++++++++++++++++- 2 files changed, 50 insertions(+), 5 deletions(-) diff --git a/tests/model_hub_tests/torch_tests/hf_transformers_models b/tests/model_hub_tests/torch_tests/hf_transformers_models index cec6509471c141..2cec1a1b744901 100644 --- a/tests/model_hub_tests/torch_tests/hf_transformers_models +++ b/tests/model_hub_tests/torch_tests/hf_transformers_models @@ -209,7 +209,8 @@ krasserm/perceiver-io-mlm,perceiver-io-masked-language-model,skip,Load problem krasserm/perceiver-io-optical-flow,perceiver-io-optical-flow,skip,Load problem krasserm/perceiver-io-txt-clf-imdb,perceiver-io-text-classifier,skip,Load problem ksmcg/fan_small_12_p16_224,fan,skip,Load problem -laion/clap-htsat-unfused,clap,skip,Load problem +laion/clap-htsat-unfused:audio_model,clap +laion/clap-htsat-unfused:audio_projection,clap Langboat/ReGPT-125M-200G,re_gpt,skip,Load problem lengyue233/content-vec-best,hubert Lewislou/cellseg_sribd,cell_sribd,skip,Load problem @@ -241,7 +242,7 @@ microsoft/beit-base-patch16-224-pt22k-ft22k,beit microsoft/biogpt,biogpt microsoft/conditional-detr-resnet-50,conditional_detr microsoft/deberta-base,deberta -microsoft/git-large-coco,git,skip,Load problem +microsoft/git-large-coco,git,xfail,Tracing error: Please check correctness of provided example_input (but eval was correct) microsoft/layoutlm-base-uncased,layoutlm microsoft/layoutlmv2-base-uncased,layoutlmv2,xfail,Tracing error: Please check correctness of provided example_input (but eval was correct) microsoft/layoutlmv3-base,layoutlmv3 @@ -316,7 +317,9 @@ RWKV/rwkv-4-169m-pile,rwkv sahasrarjn/interbert,BERT,skip,Load problem saibo/genkalm-medium-gpt2,genkalm,skip,Load problem SajjadAyoubi/clip-fa-vision,clip_vision_model -Salesforce/blip2-flan-t5-xl,blip-2,skip,Load problem +Salesforce/blip2-flan-t5-xl:vision_model,blip-2 +Salesforce/blip2-flan-t5-xl:qformer,blip-2 +Salesforce/blip2-flan-t5-xl:language_projection,blip-2 Salesforce/blip-image-captioning-large,blip Salesforce/instructblip-vicuna-7b,instructblip,skip,Load problem SamLowe/roberta-base-go_emotions,roberta @@ -410,5 +413,5 @@ Yova/SmallCapOPT7M,smallcap,skip,Load problem yusufani/trclip-vitl14-e10,trclip,skip,Load problem yysung53/dpr,text_similarity,skip,Load problem Zetatech/pvt-tiny-224,pvt,skip,Load problem -ZinengTang/tvlt-base,tvlt,skip,Load problem +ZinengTang/tvlt-base,tvlt,xfail,Conversion is failed for aten::cat: Argument element types are inconsistent zuppif/resnetd-18,resnetd,skip,Load problem diff --git a/tests/model_hub_tests/torch_tests/test_hf_transformers.py b/tests/model_hub_tests/torch_tests/test_hf_transformers.py index a47730d5ba1aab..1e8c3d76983120 100644 --- a/tests/model_hub_tests/torch_tests/test_hf_transformers.py +++ b/tests/model_hub_tests/torch_tests/test_hf_transformers.py @@ -99,7 +99,13 @@ def setup_class(self): self.cuda_available, self.gptq_postinit = None, None def load_model(self, name, type): + import torch + name_suffix = '' from transformers import AutoConfig + if name.find(':') != -1: + name_suffix = name[name.find(':') + 1:] + name = name[:name.find(':')] + mi = model_info(name) auto_processor = None model = None @@ -163,6 +169,41 @@ def load_model(self, name, type): processor = AutoProcessor.from_pretrained(name) model = AutoModel.from_pretrained(name, **model_kwargs) example = dict(processor(images=self.image, task_inputs=["semantic"], return_tensors="pt")) + elif 'clap' in mi.tags: + from transformers import AutoModel + model = AutoModel.from_pretrained(name) + + import torch + example_inputs_map = { + 'audio_model': {'input_features': torch.randn([1, 1, 1001, 64], dtype=torch.float32)}, + 'audio_projection': {'hidden_states': torch.randn([1, 768], dtype=torch.float32)}, + } + model = model._modules[name_suffix] + example = example_inputs_map[name_suffix] + elif 'git' in mi.tags: + from transformers import AutoProcessor, AutoModelForCausalLM + processor = AutoProcessor.from_pretrained(name) + model = AutoModelForCausalLM.from_pretrained(name) + import torch + example = {'pixel_values': torch.randn(*(1, 3, 224, 224), dtype=torch.float32), + 'input_ids': torch.randint(1, 100, size=(1, 13), dtype=torch.int64)} + elif 'blip-2' in mi.tags: + from transformers import AutoProcessor, AutoModelForVisualQuestionAnswering + + processor = AutoProcessor.from_pretrained(name) + model = AutoModelForVisualQuestionAnswering.from_pretrained(name) + + example = dict(processor(images=self.image, return_tensors="pt")) + import torch + example_inputs_map = { + 'vision_model' : {'pixel_values': torch.randn([1, 3, 224, 224], dtype=torch.float32)}, + 'qformer': {'query_embeds' : torch.randn([1, 32, 768], dtype=torch.float32), + 'encoder_hidden_states' : torch.randn([1, 257, 1408], dtype=torch.float32), + 'encoder_attention_mask' : torch.ones([1, 257], dtype=torch.int64)}, + 'language_projection': {'input' : torch.randn([1, 32, 768], dtype=torch.float32)}, + } + model = model._modules[name_suffix] + example = example_inputs_map[name_suffix] elif "t5" in mi.tags: from transformers import T5Tokenizer tokenizer = T5Tokenizer.from_pretrained(name) @@ -257,6 +298,7 @@ def forward(self, pixel_values, input_ids, attention_mask): elif 'speecht5' in mi.tags: from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan from datasets import load_dataset + processor = SpeechT5Processor.from_pretrained(name) model = SpeechT5ForTextToSpeech.from_pretrained(name) @@ -264,7 +306,7 @@ def forward(self, pixel_values, input_ids, attention_mask): # load xvector containing speaker's voice characteristics from a dataset embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) - + example = {'input_ids': inputs["input_ids"], 'speaker_embeddings': speaker_embeddings} class DecoratorModelForSeq2SeqLM(torch.nn.Module): def __init__(self, model):