From 69a3b44511957b451ed20514c2d7b1803e71903d Mon Sep 17 00:00:00 2001 From: zhangqianze Date: Mon, 23 Dec 2024 16:54:57 +0800 Subject: [PATCH] fix: fix circular imports and breaks --- .../bingsearch_tool_python/extension.py | 2 +- .../extension/coze_python_async/extension.py | 11 +++++------ .../extension/deepgram_asr_python/extension.py | 2 +- .../extension/dify_python/extension.py | 7 +++---- .../extension/gemini_v2v_python/extension.py | 6 +++--- .../extension/glue_python_async/extension.py | 15 ++++++++++----- .../interrupt_detector_python/extension.py | 2 +- .../openai_chatgpt_python/extension.py | 2 +- .../extension/openai_v2v_python/extension.py | 13 ++++++++----- .../vision_analyze_tool_python/extension.py | 2 +- .../extension/vision_tool_python/extension.py | 7 ++----- .../weatherapi_tool_python/extension.py | 4 ++-- .../interface/ten_ai_base/__init__.py | 17 ++++++++++++++--- .../ten_ai_base/interface/ten_ai_base/llm.py | 7 +++---- 14 files changed, 55 insertions(+), 42 deletions(-) diff --git a/agents/ten_packages/extension/bingsearch_tool_python/extension.py b/agents/ten_packages/extension/bingsearch_tool_python/extension.py index ec706fc7..4cabe88a 100644 --- a/agents/ten_packages/extension/bingsearch_tool_python/extension.py +++ b/agents/ten_packages/extension/bingsearch_tool_python/extension.py @@ -14,7 +14,7 @@ ) from ten.async_ten_env import AsyncTenEnv from ten_ai_base.helper import get_properties_string -from ten_ai_base.llm_tool import AsyncLLMToolBaseExtension +from ten_ai_base import AsyncLLMToolBaseExtension from ten_ai_base.types import LLMToolMetadata, LLMToolMetadataParameter, LLMToolResult from .log import logger diff --git a/agents/ten_packages/extension/coze_python_async/extension.py b/agents/ten_packages/extension/coze_python_async/extension.py index b5c739f8..9af8b767 100644 --- a/agents/ten_packages/extension/coze_python_async/extension.py +++ b/agents/ten_packages/extension/coze_python_async/extension.py @@ -24,14 +24,13 @@ Data, ) -from ten_ai_base import BaseConfig, ChatMemory -from ten_ai_base.llm import ( +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ChatMemory +from ten_ai_base import ( AsyncLLMBaseExtension, - LLMCallCompletionArgs, - LLMDataCompletionArgs, - LLMToolMetadata, ) -from ten_ai_base.types import LLMChatCompletionUserMessageParam + +from ten_ai_base.types import LLMChatCompletionUserMessageParam, LLMCallCompletionArgs, LLMDataCompletionArgs, LLMToolMetadata, CMD_IN_FLUSH = "flush" CMD_IN_ON_USER_JOINED = "on_user_joined" diff --git a/agents/ten_packages/extension/deepgram_asr_python/extension.py b/agents/ten_packages/extension/deepgram_asr_python/extension.py index e3e5c6d7..712eb805 100644 --- a/agents/ten_packages/extension/deepgram_asr_python/extension.py +++ b/agents/ten_packages/extension/deepgram_asr_python/extension.py @@ -18,7 +18,7 @@ ) from dataclasses import dataclass -from ten_ai_base import BaseConfig +from ten_ai_base.config import BaseConfig DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" DATA_OUT_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" diff --git a/agents/ten_packages/extension/dify_python/extension.py b/agents/ten_packages/extension/dify_python/extension.py index b4bed528..f9ae52b7 100644 --- a/agents/ten_packages/extension/dify_python/extension.py +++ b/agents/ten_packages/extension/dify_python/extension.py @@ -12,12 +12,11 @@ import aiohttp from ten import AsyncTenEnv, AudioFrame, Cmd, CmdResult, Data, StatusCode, VideoFrame -from ten_ai_base import BaseConfig -from ten_ai_base.llm import ( +from ten_ai_base.config import BaseConfig +from ten_ai_base import ( AsyncLLMBaseExtension, - LLMDataCompletionArgs, ) -from ten_ai_base.types import LLMChatCompletionUserMessageParam +from ten_ai_base.types import LLMChatCompletionUserMessageParam, LLMDataCompletionArgs CMD_IN_FLUSH = "flush" CMD_IN_ON_USER_JOINED = "on_user_joined" diff --git a/agents/ten_packages/extension/gemini_v2v_python/extension.py b/agents/ten_packages/extension/gemini_v2v_python/extension.py index 6d833b61..ee681cc3 100644 --- a/agents/ten_packages/extension/gemini_v2v_python/extension.py +++ b/agents/ten_packages/extension/gemini_v2v_python/extension.py @@ -29,9 +29,9 @@ from ten_ai_base.const import CMD_PROPERTY_RESULT, CMD_TOOL_CALL from ten_ai_base.llm import AsyncLLMBaseExtension from dataclasses import dataclass -from ten_ai_base import ( - BaseConfig, - ChatMemory, +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ChatMemory +from ten_ai_base.usage import ( LLMUsage, LLMCompletionTokensDetails, LLMPromptTokensDetails, diff --git a/agents/ten_packages/extension/glue_python_async/extension.py b/agents/ten_packages/extension/glue_python_async/extension.py index 7617d248..6b5fb18c 100644 --- a/agents/ten_packages/extension/glue_python_async/extension.py +++ b/agents/ten_packages/extension/glue_python_async/extension.py @@ -25,21 +25,26 @@ Data, ) -from ten_ai_base import ( - BaseConfig, +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ( ChatMemory, + EVENT_MEMORY_APPENDED, +) +from ten_ai_base.usage import ( LLMUsage, LLMCompletionTokensDetails, LLMPromptTokensDetails, - EVENT_MEMORY_APPENDED, ) -from ten_ai_base.llm import ( +from ten_ai_base import ( AsyncLLMBaseExtension, +) +from ten_ai_base.types import ( + LLMChatCompletionUserMessageParam, + LLMToolResult, LLMCallCompletionArgs, LLMDataCompletionArgs, LLMToolMetadata, ) -from ten_ai_base.types import LLMChatCompletionUserMessageParam, LLMToolResult CMD_IN_FLUSH = "flush" CMD_IN_ON_USER_JOINED = "on_user_joined" diff --git a/agents/ten_packages/extension/interrupt_detector_python/extension.py b/agents/ten_packages/extension/interrupt_detector_python/extension.py index 3521dbca..a01cdc8a 100644 --- a/agents/ten_packages/extension/interrupt_detector_python/extension.py +++ b/agents/ten_packages/extension/interrupt_detector_python/extension.py @@ -34,7 +34,7 @@ def send_flush_cmd(self, ten: TenEnv) -> None: flush_cmd = Cmd.create(CMD_NAME_FLUSH) ten.send_cmd( flush_cmd, - lambda ten, result: ten.log_info("send_cmd done"), + lambda ten, result, _: ten.log_info("send_cmd done"), ) ten.log_info(f"sent cmd: {CMD_NAME_FLUSH}") diff --git a/agents/ten_packages/extension/openai_chatgpt_python/extension.py b/agents/ten_packages/extension/openai_chatgpt_python/extension.py index 0768e298..74b3cce6 100644 --- a/agents/ten_packages/extension/openai_chatgpt_python/extension.py +++ b/agents/ten_packages/extension/openai_chatgpt_python/extension.py @@ -17,7 +17,7 @@ get_property_bool, get_property_string, ) -from ten_ai_base.llm import AsyncLLMBaseExtension +from ten_ai_base import AsyncLLMBaseExtension from ten_ai_base.types import ( LLMCallCompletionArgs, LLMChatCompletionContentPartParam, diff --git a/agents/ten_packages/extension/openai_v2v_python/extension.py b/agents/ten_packages/extension/openai_v2v_python/extension.py index a85ac431..fc0fefa2 100644 --- a/agents/ten_packages/extension/openai_v2v_python/extension.py +++ b/agents/ten_packages/extension/openai_v2v_python/extension.py @@ -25,13 +25,15 @@ ) from ten.audio_frame import AudioFrameDataFmt from ten_ai_base.const import CMD_PROPERTY_RESULT, CMD_TOOL_CALL -from ten_ai_base.llm import AsyncLLMBaseExtension +from ten_ai_base import AsyncLLMBaseExtension from dataclasses import dataclass -from ten_ai_base import ( - BaseConfig, +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ( ChatMemory, EVENT_MEMORY_EXPIRED, EVENT_MEMORY_APPENDED, +) +from ten_ai_base.usage import ( LLMUsage, LLMCompletionTokensDetails, LLMPromptTokensDetails, @@ -350,7 +352,8 @@ def get_time_ms() -> int: f"On response done {msg_resp_id} {status} {message.response.usage}" ) if message.response.usage: - await self._update_usage(message.response.usage) + pass + # await self._update_usage(message.response.usage) case ResponseAudioTranscriptDelta(): self.ten_env.log_info( f"On response transcript delta {message.response_id} {message.output_index} {message.content_index} {message.delta}" @@ -586,7 +589,7 @@ def tool_dict(tool: LLMToolMetadata): async def on_tools_update(self, _: AsyncTenEnv, tool: LLMToolMetadata) -> None: """Called when a new tool is registered. Implement this method to process the new tool.""" self.ten_env.log_info(f"on tools update {tool}") - await self._update_session() + # await self._update_session() def _replace(self, prompt: str) -> str: result = prompt diff --git a/agents/ten_packages/extension/vision_analyze_tool_python/extension.py b/agents/ten_packages/extension/vision_analyze_tool_python/extension.py index 3f9588a1..7b565706 100644 --- a/agents/ten_packages/extension/vision_analyze_tool_python/extension.py +++ b/agents/ten_packages/extension/vision_analyze_tool_python/extension.py @@ -17,7 +17,7 @@ from base64 import b64encode from ten_ai_base.const import CMD_CHAT_COMPLETION_CALL -from ten_ai_base.llm_tool import AsyncLLMToolBaseExtension +from ten_ai_base import AsyncLLMToolBaseExtension from ten_ai_base.types import ( LLMChatCompletionUserMessageParam, LLMToolMetadata, diff --git a/agents/ten_packages/extension/vision_tool_python/extension.py b/agents/ten_packages/extension/vision_tool_python/extension.py index 13f645dd..f3cfc957 100644 --- a/agents/ten_packages/extension/vision_tool_python/extension.py +++ b/agents/ten_packages/extension/vision_tool_python/extension.py @@ -3,11 +3,8 @@ # Licensed under the Apache License, Version 2.0. # See the LICENSE file for more information. # -from ten_ai_base.llm_tool import ( - AsyncLLMToolBaseExtension, - LLMToolMetadata, - LLMToolResult, -) +from ten_ai_base import AsyncLLMToolBaseExtension +from ten_ai_base.types import LLMToolMetadata, LLMToolResult from ten import ( AudioFrame, VideoFrame, diff --git a/agents/ten_packages/extension/weatherapi_tool_python/extension.py b/agents/ten_packages/extension/weatherapi_tool_python/extension.py index bf985cc9..17365b61 100644 --- a/agents/ten_packages/extension/weatherapi_tool_python/extension.py +++ b/agents/ten_packages/extension/weatherapi_tool_python/extension.py @@ -15,8 +15,8 @@ from ten import Cmd from ten.async_ten_env import AsyncTenEnv -from ten_ai_base import BaseConfig -from ten_ai_base.llm_tool import AsyncLLMToolBaseExtension +from ten_ai_base.config import BaseConfig +from ten_ai_base import AsyncLLMToolBaseExtension from ten_ai_base.types import LLMToolMetadata, LLMToolMetadataParameter, LLMToolResult CMD_TOOL_REGISTER = "tool_register" diff --git a/agents/ten_packages/system/ten_ai_base/interface/ten_ai_base/__init__.py b/agents/ten_packages/system/ten_ai_base/interface/ten_ai_base/__init__.py index 167e6afe..f31a766d 100644 --- a/agents/ten_packages/system/ten_ai_base/interface/ten_ai_base/__init__.py +++ b/agents/ten_packages/system/ten_ai_base/interface/ten_ai_base/__init__.py @@ -4,13 +4,19 @@ # See the LICENSE file for more information. # -from .types import LLMCallCompletionArgs, LLMDataCompletionArgs, LLMToolMetadata, LLMToolResult, LLMChatCompletionMessageParam +from .types import ( + LLMCallCompletionArgs, + LLMDataCompletionArgs, + LLMToolMetadata, + LLMToolResult, + LLMChatCompletionMessageParam, +) from .usage import LLMUsage, LLMCompletionTokensDetails, LLMPromptTokensDetails -from .llm import AsyncLLMBaseExtension -from .llm_tool import AsyncLLMToolBaseExtension from .chat_memory import ChatMemory, EVENT_MEMORY_APPENDED, EVENT_MEMORY_EXPIRED from .helper import AsyncQueue, AsyncEventEmitter from .config import BaseConfig +from .llm import AsyncLLMBaseExtension +from .llm_tool import AsyncLLMToolBaseExtension # Specify what should be imported when a user imports * from the # ten_ai_base package. @@ -26,4 +32,9 @@ "AsyncEventEmitter", "BaseConfig", "LLMChatCompletionMessageParam", + "LLMUsage", + "LLMCompletionTokensDetails", + "LLMPromptTokensDetails", + "EVENT_MEMORY_APPENDED", + "EVENT_MEMORY_EXPIRED", ] diff --git a/agents/ten_packages/system/ten_ai_base/interface/ten_ai_base/llm.py b/agents/ten_packages/system/ten_ai_base/interface/ten_ai_base/llm.py index eb8128e8..5ef942d3 100644 --- a/agents/ten_packages/system/ten_ai_base/interface/ten_ai_base/llm.py +++ b/agents/ten_packages/system/ten_ai_base/interface/ten_ai_base/llm.py @@ -5,6 +5,7 @@ # from abc import ABC, abstractmethod import asyncio +import traceback from ten import ( AsyncExtension, @@ -74,9 +75,7 @@ async def on_cmd(self, async_ten_env: AsyncTenEnv, cmd: Cmd) -> None: async_ten_env.log_debug(f"on_cmd name {cmd_name}") if cmd_name == CMD_TOOL_REGISTER: try: - tool_metadata_json = json.loads( - cmd.get_property_to_json(CMD_PROPERTY_TOOL) - ) + tool_metadata_json = cmd.get_property_to_json(CMD_PROPERTY_TOOL) async_ten_env.log_info(f"register tool: {tool_metadata_json}") tool_metadata = LLMToolMetadata.model_validate_json(tool_metadata_json) async with self.available_tools_lock: @@ -84,7 +83,7 @@ async def on_cmd(self, async_ten_env: AsyncTenEnv, cmd: Cmd) -> None: await self.on_tools_update(async_ten_env, tool_metadata) await async_ten_env.return_result(CmdResult.create(StatusCode.OK), cmd) except Exception as err: - async_ten_env.log_warn(f"on_cmd failed: {err}") + async_ten_env.log_warn(f"on_cmd failed: {traceback.format_exc()}") await async_ten_env.return_result( CmdResult.create(StatusCode.ERROR), cmd )