From 2b7f6c04b4a5037d5bbe6586611f376c8f134086 Mon Sep 17 00:00:00 2001 From: zaigie Date: Thu, 4 Jan 2024 22:19:12 +0800 Subject: [PATCH] release: 0.3.2 --- README.md | 18 ++++++++++++------ README.zh.md | 18 ++++++++++++------ pyproject.toml | 4 ++-- src/stream_infer/__init__.py | 2 +- 4 files changed, 27 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 69fb8cc..91a21c6 100644 --- a/README.md +++ b/README.md @@ -201,7 +201,7 @@ from stream_infer.dispatcher import Dispatcher import requests ... class RequestDispatcher(Dispatcher): - def __init__(self, max_size: int = 120): + def __init__(self, max_size): super().__init__(max_size) self.sess = requests.Session() ... @@ -217,14 +217,18 @@ class RequestDispatcher(Dispatcher): ... # Offline inference -dispatcher = RequestDispatcher.create(offline=True, max_size=140) +dispatcher = RequestDispatcher.create(offline=True, max_size=30) # Real-time inference -dispatcher = RequestDispatcher.create(max_size=150) +dispatcher = RequestDispatcher.create(max_size=15) ``` +You may have noticed that the instantiation of dispatcher differs between offline and real-time inference. This is because **in real-time inference, playback and inference are not in the same process**, and both need to share the same dispatcher, only the offline parameter has been changed, but the internal implementation uses the DispatcherManager agent. + > [!CAUTION] -> You may have noticed that the instantiation of dispatcher differs between offline and real-time inference. This is because **in real-time inference, playback and inference are not in the same process**, and both need to share the same dispatcher, only the offline parameter has been changed, but the internal implementation uses the DispatcherManager agent. +> For the `max_size` parameter, the default value is 30, which keeps the latest 30 frames of ndarray data in the buffer. **The larger this parameter, the more memory the program occupies!** +> +> It is recommended to set it to `max_size = max(frame_count * (frame_step if frame_step else 1))` based on the actual inference interval. ### Inference @@ -257,7 +261,7 @@ Here, we can give HeadDetectionAlgo a name to identify the running algorithm (ne The parameters for loading an algorithm are the framework's core functionality, allowing you to freely implement frame retrieval logic: - frame_count: The number of frames the algorithm needs to get, which is the number of frames the run() function will receive. -- frame_step: Take 1 frame every `frame_step`, up to `frame_count` frames. (when `frame_count` is equal to 1, this parameter determines only the startup delay) +- frame_step: Take 1 frame every `frame_step`, up to `frame_count` frames, receive 0. (when `frame_count` is equal to 1, this parameter determines only the startup delay) - interval: In seconds, indicating the frequency of algorithm calls, like `AnyOtherAlgo` will only be called once a minute to save resources when not needed. ### Producer @@ -285,9 +289,11 @@ from stream_infer import Player ... -player = Player(dispatcher, producer, video_path) +player = Player(dispatcher, producer, video_path, show_progress) ``` +The `show_progress` parameter defaults to True, in which case the tqdm is used to display the progress bar. When set to False, progress is printed through the logger. + ### Run Simply run the entire script through Inference's `start()`. diff --git a/README.zh.md b/README.zh.md index 0b7295e..06a480e 100644 --- a/README.zh.md +++ b/README.zh.md @@ -199,7 +199,7 @@ from stream_infer.dispatcher import Dispatcher import requests ... class RequestDispatcher(Dispatcher): - def __init__(self, max_size: int = 120): + def __init__(self, max_size): super().__init__(max_size) self.sess = requests.Session() ... @@ -215,14 +215,18 @@ class RequestDispatcher(Dispatcher): ... # 离线推理 -dispatcher = RequestDispatcher.create(offline=True, max_size=140) +dispatcher = RequestDispatcher.create(offline=True, max_size=30) # 实时推理 -dispatcher = RequestDispatcher.create(max_size=150) +dispatcher = RequestDispatcher.create(max_size=15) ``` +您可能注意到,在离线推理和实时推理下实例化 dispatcher 的方式不同,这是因为 **实时推理下播放与推理不在一个进程中** ,而两者都需要共享同一个 dispatcher,虽然只是改变了 offline 参数,但其内部实现使用了 DispatcherManager 代理。 + > [!CAUTION] -> 您可能注意到,在离线推理和实时推理下实例化 dispatcher 的方式不同,这是因为 **实时推理下播放与推理不在一个进程中** ,而两者都需要共享同一个 dispatcher,虽然只是改变了 offline 参数,但其内部实现使用了 DispatcherManager 代理。 +> 对于 `max_size`参数,默认值为 30,会将最新的 30 帧 ndarray 数据存在缓冲区中,**该参数越大,程序占用的内存就越大!** +> +> 建议根据实际推理间隔情况设置为 `max_size = max(frame_count * (frame_step if frame_step else 1))` ### Inference @@ -255,7 +259,7 @@ inference.load_algo(AnyOtherAlgo("other"), 5, 6, 60) 而加载算法的几个参数则是框架的核心功能,让您能自由实现取帧逻辑: - frame_count:算法需要获取的帧数量,也就是最终 run() 函数中收到的 frames 数量。 -- frame_step:每隔 `frame_step` 取 1 帧,共取 `frame_count` 帧。(当 `frame_count` 为 1 时,这个参数决定的只是启动延迟) +- frame_step:每隔 `frame_step` 取 1 帧,共取 `frame_count` 帧,可为 0。(当 `frame_count` 为 1 时,这个参数决定的只是启动延迟) - interval:单位秒,表示算法调用频率,如 `AnyOtherAlgo` 就只会在一分钟才调用一次,用来在不需要调用它的时候节省资源 ### Producer @@ -283,9 +287,11 @@ from stream_infer import Player ... -player = Player(dispatcher, producer, video_path) +player = Player(dispatcher, producer, video_path, show_progress) ``` +`show_progress` 参数默认为 True,此时会使用 tqdm 显示进度条,而设置为 False 时会通过 logger 打印。 + ### Run 通过 Inference 的 `start()` 即可简单运行整个脚本 diff --git a/pyproject.toml b/pyproject.toml index bbf4830..07027e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "stream_infer" -version = "0.3.1" +version = "0.3.2" description = "Video streaming inference framework, integrating image algorithms and models for real-time/offline video structuring" authors = ["ZaiGie "] maintainers = ["ZaiGie "] @@ -42,7 +42,7 @@ opencv-python-headless = ">=4.5.5.64,<=4.8.1.78" av = ">= 11.0.0" loguru = ">=0.6.0" streamlit = { version = ">=1.29.0", optional = true } - +tqdm = ">=4.62.3,<=4.66.1" [tool.poetry.extras] desktop = ["opencv-python"] diff --git a/src/stream_infer/__init__.py b/src/stream_infer/__init__.py index 02c0e3e..2f5d8b8 100644 --- a/src/stream_infer/__init__.py +++ b/src/stream_infer/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.3.1" +__version__ = "0.3.2" from .inference import Inference from .player import Player