diff --git a/plugins/openvino/package-lock.json b/plugins/openvino/package-lock.json index ab272a8784..5ae10161d6 100644 --- a/plugins/openvino/package-lock.json +++ b/plugins/openvino/package-lock.json @@ -1,12 +1,12 @@ { "name": "@scrypted/openvino", - "version": "0.1.42", + "version": "0.1.43", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@scrypted/openvino", - "version": "0.1.42", + "version": "0.1.43", "devDependencies": { "@scrypted/sdk": "file:../../sdk" } diff --git a/plugins/openvino/package.json b/plugins/openvino/package.json index 684ea2f08f..268e65ed81 100644 --- a/plugins/openvino/package.json +++ b/plugins/openvino/package.json @@ -42,5 +42,5 @@ "devDependencies": { "@scrypted/sdk": "file:../../sdk" }, - "version": "0.1.42" + "version": "0.1.43" } diff --git a/plugins/openvino/src/ov/__init__.py b/plugins/openvino/src/ov/__init__.py index bb47e28833..981b39fa94 100644 --- a/plugins/openvino/src/ov/__init__.py +++ b/plugins/openvino/src/ov/__init__.py @@ -1,7 +1,5 @@ from __future__ import annotations -import asyncio -import concurrent.futures import json import re from typing import Any, Tuple @@ -96,6 +94,7 @@ def __init__(self, nativeId: str | None = None): try: self.compiled_model = self.core.compile_model(xmlFile, mode) + print("EXECUTION_DEVICES", self.compiled_model.get_property("EXECUTION_DEVICES")) except: import traceback traceback.print_exc() @@ -114,8 +113,6 @@ def __init__(self, nativeId: str | None = None): labels_contents = open(labelsFile, 'r').read() self.labels = parse_label_contents(labels_contents) - self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1, thread_name_prefix="openvino", ) - async def getSettings(self) -> list[Setting]: mode = self.storage.getItem('mode') or 'Default' model = self.storage.getItem('model') or 'Default' @@ -181,7 +178,7 @@ def get_input_size(self) -> Tuple[int, int]: return [self.model_dim, self.model_dim] async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss): - def predict(): + async def predict(): infer_request = self.compiled_model.create_infer_request() # the input_tensor can be created with the shared_memory=True parameter, # but that seems to cause issues on some platforms. @@ -259,7 +256,7 @@ def torelative(value: float): return objs try: - objs = await asyncio.get_event_loop().run_in_executor(self.executor, predict) + objs = await predict() except: import traceback traceback.print_exc()