-
Notifications
You must be signed in to change notification settings - Fork 79
Add openai.chat_completions package to support OSS models
#156
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 14 commits
7eaa521
6993bd5
36daca8
840fd35
fb7befc
4639df2
6f353cc
5620334
736ff77
39da564
f3d5b11
6f70b14
3b35983
1bc042e
ae28cce
7d33615
ff4508b
73b1863
83442ca
42c7d17
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,27 +1,63 @@ | ||
| """Video frame utilities.""" | ||
|
|
||
| import io | ||
|
|
||
| import av | ||
| from PIL.Image import Resampling | ||
|
|
||
|
|
||
| def ensure_even_dimensions(frame: av.VideoFrame) -> av.VideoFrame: | ||
| """ | ||
| Ensure frame has even dimensions for H.264 yuv420p encoding. | ||
| Crops by 1 pixel if width or height is odd. | ||
| """ | ||
| needs_width_adjust = frame.width % 2 != 0 | ||
| needs_height_adjust = frame.height % 2 != 0 | ||
|
|
||
| if not needs_width_adjust and not needs_height_adjust: | ||
| return frame | ||
|
|
||
| new_width = frame.width - (1 if needs_width_adjust else 0) | ||
| new_height = frame.height - (1 if needs_height_adjust else 0) | ||
|
|
||
| cropped = frame.reformat(width=new_width, height=new_height) | ||
| cropped.pts = frame.pts | ||
| if frame.time_base is not None: | ||
| cropped.time_base = frame.time_base | ||
|
|
||
| return cropped | ||
|
|
||
|
|
||
| def frame_to_jpeg_bytes( | ||
| frame: av.VideoFrame, target_width: int, target_height: int, quality: int = 85 | ||
| ) -> bytes: | ||
| """ | ||
| Convert a video frame to JPEG bytes with resizing. | ||
| Args: | ||
| frame: an instance of `av.VideoFrame`. | ||
| target_width: target width in pixels. | ||
| target_height: target height in pixels. | ||
| quality: JPEG quality. Default is 85. | ||
| Returns: frame as JPEG bytes. | ||
| """ | ||
| # Convert frame to a PIL image | ||
| img = frame.to_image() | ||
|
|
||
| # Calculate scaling to maintain aspect ratio | ||
| src_width, src_height = img.size | ||
| # Calculate scale factor (fit within target dimensions) | ||
| scale = min(target_width / src_width, target_height / src_height) | ||
| new_width = int(src_width * scale) | ||
| new_height = int(src_height * scale) | ||
|
|
||
| # Resize with aspect ratio maintained | ||
| resized = img.resize((new_width, new_height), Resampling.LANCZOS) | ||
|
|
||
| # Save as JPEG with quality control | ||
| buf = io.BytesIO() | ||
| resized.save(buf, "JPEG", quality=quality, optimize=True) | ||
| return buf.getvalue() | ||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -16,7 +16,7 @@ pip install getstream-plugins-openai | |||||||||||||||||
| ## Usage | ||||||||||||||||||
|
|
||||||||||||||||||
| ```python | ||||||||||||||||||
| from getstream.plugins.openai import OpenAIRealtime | ||||||||||||||||||
| from vision_agents.plugins.openai import Realtime | ||||||||||||||||||
|
|
||||||||||||||||||
| # Initialize with API key | ||||||||||||||||||
| sts = OpenAIRealtime(api_key="your_openai_api_key", voice="alloy") | ||||||||||||||||||
|
Comment on lines
+19
to
22
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fix import/usage mismatch for Realtime. Line 19 imports Apply this diff: -# Initialize with API key
-sts = OpenAIRealtime(api_key="your_openai_api_key", voice="alloy")
+# Initialize with API key
+sts = Realtime(api_key="your_openai_api_key", voice="alloy")📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||||||||||||||
|
|
||||||||||||||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,120 @@ | ||
| # Qwen3-VL hosted on Baseten | ||
| Qwen3-VL is the latest open-source Video Language Model (VLM) from Alibaba. | ||
| This plugin allows developers to easily run the model hosted on [Baseten](https://www.baseten.co/) with Vision Agents. | ||
| The model accepts text and video and responds with text vocalised with the TTS service of your choice. | ||
|
|
||
| ## Features | ||
|
|
||
| - **Video understanding**: Automatically buffers and forwards video frames to Baseten-hosted VLM models | ||
| - **Streaming responses**: Supports streaming text responses with real-time chunk events | ||
| - **Frame buffering**: Configurable frame rate and buffer duration for optimal performance | ||
| - **Event-driven**: Emits LLM events (chunks, completion, errors) for integration with other components | ||
|
|
||
| ## Installation | ||
|
|
||
| ```bash | ||
| uv add vision-agents[openai] | ||
| ``` | ||
|
|
||
| ## Quick Start | ||
|
|
||
| ```python | ||
| from vision_agents.core import Agent, User | ||
| from vision_agents.plugins import openai, getstream, deepgram, elevenlabs, vogent | ||
|
|
||
| async def create_agent(**kwargs) -> Agent: | ||
| # Initialize the Baseten VLM | ||
| # The api key and base url can be passed via OPENAI_API_KEY and OPENAI_BASE_URL environment variables. | ||
| llm = openai.ChatCompletionsVLM(model="qwen3vl") | ||
|
|
||
| # Create an agent with video understanding capabilities | ||
| agent = Agent( | ||
| edge=getstream.Edge(), | ||
| agent_user=User(name="Video Assistant", id="agent"), | ||
| instructions="You're a helpful video AI assistant. Analyze the video frames and respond to user questions about what you see.", | ||
| llm=llm, | ||
| stt=deepgram.STT(), | ||
| tts=elevenlabs.TTS(), | ||
| turn_detection=vogent.TurnDetection(), | ||
| processors=[], | ||
| ) | ||
| return agent | ||
|
|
||
| async def join_call(agent: Agent, call_type: str, call_id: str, **kwargs) -> None: | ||
| await agent.create_user() | ||
| call = await agent.create_call(call_type, call_id) | ||
|
|
||
| with await agent.join(call): | ||
| # The agent will automatically process video frames and respond to user input | ||
| await agent.finish() | ||
| ``` | ||
|
|
||
| ## Configuration | ||
|
|
||
| ### Environment Variables | ||
|
|
||
| - **`OPENAI_API_KEY`**: Your Baseten API key (required) | ||
| - **`OPENAI_BASE_URL`**: The base URL for your Baseten API endpoint (required) | ||
dangusev marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| ### Initialization Parameters | ||
|
|
||
| ```python | ||
| openai.ChatCompletionsVLM( | ||
| model: str, # Baseten model name (e.g., "qwen3vl") | ||
| api_key: Optional[str] = None, # API key (defaults to OPENAI_API_KEY env var) | ||
| base_url: Optional[str] = None, # Base URL (defaults to OPENAI_BASE_URL env var) | ||
| fps: int = 1, # Frames per second to process (default: 1) | ||
| frame_buffer_seconds: int = 10, # Seconds of video to buffer (default: 10) | ||
| client: Optional[AsyncOpenAI] = None, # Custom OpenAI client (optional) | ||
| ) | ||
| ``` | ||
|
|
||
| ### Parameters | ||
|
|
||
| - **`model`**: The name of the Baseten-hosted model to use. Must be a vision-capable model. | ||
| - **`api_key`**: Your Baseten API key. If not provided, reads from `OPENAI_API_KEY` environment variable. | ||
| - **`base_url`**: The base URL for Baseten API. If not provided, reads from `OPENAI_BASE_URL` environment variable. | ||
| - **`fps`**: Number of video frames per second to capture and send to the model. Lower values reduce API costs but may miss fast-moving content. Default is 1 fps. | ||
| - **`frame_buffer_seconds`**: How many seconds of video to buffer. Total buffer size = `fps * frame_buffer_seconds`. Default is 10 seconds. | ||
| - **`client`**: Optional pre-configured `AsyncOpenAI` client. If provided, `api_key` and `base_url` are ignored. | ||
|
|
||
| ## How It Works | ||
|
|
||
| 1. **Video Frame Buffering**: The plugin automatically subscribes to video tracks when the agent joins a call. It buffers frames at the specified FPS for the configured duration. | ||
|
|
||
| 2. **Frame Processing**: When responding to user input, the plugin: | ||
| - Converts buffered video frames to JPEG format | ||
| - Resizes frames to 800x600 (maintaining aspect ratio) | ||
| - Encodes frames as base64 data URLs | ||
|
|
||
| 3. **API Request**: Sends the conversation history (including system instructions) along with all buffered frames to the Baseten model. | ||
|
|
||
| 4. **Streaming Response**: Processes the streaming response and emits events for each chunk and completion. | ||
|
|
||
| ## Events | ||
|
|
||
| The plugin emits the following events: | ||
|
|
||
| - **`LLMResponseChunkEvent`**: Emitted for each text chunk in the streaming response | ||
| - **`LLMResponseCompletedEvent`**: Emitted when the response stream completes | ||
| - **`LLMErrorEvent`**: Emitted if an API request fails | ||
|
|
||
| ## Requirements | ||
|
|
||
| - Python 3.10+ | ||
| - `openai>=2.5.0` | ||
| - `vision-agents` (core framework) | ||
| - Baseten API key and base URL | ||
|
|
||
| ## Notes | ||
|
|
||
| - **Frame Rate**: The default FPS of 1 is optimized for VLM use cases. Higher FPS values will increase API costs and latency. | ||
| - **Frame Size**: Frames are automatically resized to 800x600 pixels while maintaining aspect ratio to optimize API payload size. | ||
| - **Buffer Duration**: The 10-second default buffer provides context for the model while keeping memory usage reasonable. | ||
| - **Tool Calling**: Tool/function calling support is not yet implemented (see TODOs in code). | ||
|
|
||
| ## Troubleshooting | ||
|
|
||
| - **No video processing**: Ensure the agent has joined a call with video tracks available. The plugin automatically subscribes to video when tracks are added. | ||
| - **API errors**: Verify your `OPENAI_API_KEY` and `OPENAI_BASE_URL` are set correctly and the model name is valid. | ||
| - **High latency**: Consider reducing `fps` or `frame_buffer_seconds` to decrease the number of frames sent per request. | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,21 @@ | ||
| [project] | ||
| name = "qwen3-vl-example" | ||
| version = "0.1.0" | ||
| description = "Example using Qwen3 VL hosted on Baseten with Vision Agents" | ||
| requires-python = ">=3.10" | ||
| dependencies = [ | ||
| "vision-agents", | ||
| "vision-agents-plugins-openai", | ||
| "vision-agents-plugins-getstream", | ||
| "vision-agents-plugins-deepgram", | ||
| "vision-agents-plugins-elevenlabs", | ||
| "python-dotenv", | ||
| ] | ||
|
|
||
| [tool.uv.sources] | ||
| vision-agents = { workspace = true } | ||
| vision-agents-plugins-openai = { workspace = true } | ||
| vision-agents-plugins-elevenlabs = { workspace = true } | ||
| vision-agents-plugins-getstream = { workspace = true } | ||
| vision-agents-plugins-deepgram = { workspace = true } | ||
|
|
Uh oh!
There was an error while loading. Please reload this page.