Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add glue extension #378

Merged
merged 2 commits into from
Nov 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions agents/ten_packages/extension/glue_python_async/BUILD.gn
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#
# This file is part of TEN Framework, an open source project.
# Licensed under the Apache License, Version 2.0.
# See the LICENSE file for more information.
#
import("//build/feature/ten_package.gni")

ten_package("glue_python_async") {
package_kind = "extension"

resources = [
"__init__.py",
"addon.py",
"extension.py",
"log.py",
"manifest.json",
"property.json",
"tests",
]
}
37 changes: 37 additions & 0 deletions agents/ten_packages/extension/glue_python_async/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# glue_python_async

This is a python extension for glue service. The schema of glue service is attached in `schema.yml`.

An example of OpenAI wrapper is also attached in `examples/openai_wrapper.py`.

## Features

The extension will record history with count of `max_history`.

- `api_url` (must have): the url for the glue service.
- `token` (must have): use Bearer token to support default auth

The extension support flush that will close the existing http session.

## API

Refer to `api` definition in [manifest.json] and default values in [property.json](property.json).

- In:
- `text_data` [data]: the asr result
- `flush` [cmd]: the flush signal
- Out:
- `flush` [cmd]: the flush signal

## Examples

You can run example using following command, and the wrapper service will listen 8000 by default.

```
> export API_TOKEN="xxx" && export OPENAI_API_KEY="xxx" && python3 openai_wrapper.py

INFO: Started server process [162886]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
```
9 changes: 9 additions & 0 deletions agents/ten_packages/extension/glue_python_async/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#
# This file is part of TEN Framework, an open source project.
# Licensed under the Apache License, Version 2.0.
# See the LICENSE file for more information.
#
from . import addon
from .log import logger

logger.info("glue_python_async extension loaded")
20 changes: 20 additions & 0 deletions agents/ten_packages/extension/glue_python_async/addon.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#
# This file is part of TEN Framework, an open source project.
# Licensed under the Apache License, Version 2.0.
# See the LICENSE file for more information.
#
from ten import (
Addon,
register_addon_as_extension,
TenEnv,
)
from .extension import AsyncGlueExtension
from .log import logger


@register_addon_as_extension("glue_python_async")
class AsyncGlueExtensionAddon(Addon):

def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None:
logger.info("AsyncGlueExtensionAddon on_create_instance")
ten_env.on_create_instance_done(AsyncGlueExtension(name), context)
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
import os
import openai
from openai import AsyncOpenAI
import traceback # Add this import

from typing import List, Union
from pydantic import BaseModel, HttpUrl

from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.responses import StreamingResponse
from fastapi import Depends, FastAPI, HTTPException, Request
import asyncio

app = FastAPI(title="Chat Completion API",
description="API for streaming chat completions with support for text, image, and audio content",
version="1.0.0")

# Set your OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")

class TextContent(BaseModel):
type: str = "text"
text: str

class ImageContent(BaseModel):
type: str = "image"
image_url: HttpUrl

class AudioContent(BaseModel):
type: str = "audio"
audio_url: HttpUrl

class Message(BaseModel):
role: str
content: Union[TextContent, ImageContent, AudioContent, List[Union[TextContent, ImageContent, AudioContent]]]

class ChatCompletionRequest(BaseModel):
messages: List[Message]
model: str
temperature: float = 1.0
stream: bool = True

def format_openai_messages(messages):
formatted_messages = []
for msg in messages:
if isinstance(msg.content, list):
content = []
for item in msg.content:
if item.type == "text":
content.append({"type": "text", "text": item.text})
elif item.type == "image":
content.append({"type": "image_url", "image_url": str(item.image_url)})
elif item.type == "audio":
content.append({"type": "audio_url", "audio_url": str(item.audio_url)})
else:
if msg.content.type == "text":
content = [{"type": "text", "text": msg.content.text}]
elif msg.content.type == "image":
content = [{"type": "image_url", "image_url": {"url": str(msg.content.image_url)}}]
elif msg.content.type == "audio":
content = [{"type": "audio_url", "audio_url": {"url": str(msg.content.audio_url)}}]

formatted_messages.append({"role": msg.role, "content": content})
return formatted_messages

security = HTTPBearer()

def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
token = credentials.credentials
if token != os.getenv("API_TOKEN"):
raise HTTPException(status_code=403, detail="Invalid or missing token")

@app.post("/chat/completions", dependencies=[Depends(verify_token)])
async def create_chat_completion(request: ChatCompletionRequest, req: Request):
try:
messages = format_openai_messages(request.messages)
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
response = await client.chat.completions.create(
model=request.model,
messages=messages,
temperature=request.temperature,
stream=request.stream
)

async def generate():
try:
async for chunk in response:
if chunk.choices[0].delta.content is not None:
yield f"data: {chunk.choices[0].delta.content}\n\n"
yield "data: [DONE]\n\n"
except asyncio.CancelledError:
print("Request was cancelled")
raise

return StreamingResponse(generate(), media_type="text/event-stream")
except asyncio.CancelledError:
print("Request was cancelled")
raise HTTPException(status_code=499, detail="Request was cancelled")
except Exception as e:
traceback_str = ''.join(traceback.format_tb(e.__traceback__))
error_message = f"{str(e)}\n{traceback_str}"
print(error_message)
raise HTTPException(status_code=500, detail=error_message)

if __name__ == "__main__":
import uvicorn
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi import Depends
import traceback

'''
http_proxy = os.getenv("HTTP_PROXY")
https_proxy = os.getenv("HTTPS_PROXY")

if http_proxy or https_proxy:
proxies = {
"http": http_proxy,
"https": https_proxy
}
openai.proxy = proxies
'''

uvicorn.run(app, host="0.0.0.0", port=8000)
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
uvicorn
fastapi
openai
Loading