diff --git a/cookbook/providers/google/basic.py b/cookbook/providers/google/basic.py index 63f2d98df..0ee12bb5d 100644 --- a/cookbook/providers/google/basic.py +++ b/cookbook/providers/google/basic.py @@ -1,7 +1,7 @@ from phi.agent import Agent, RunResponse # noqa from phi.model.google import Gemini -agent = Agent(model=Gemini(id="gemini-1.5-flash"), markdown=True) +agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), markdown=True) # Get the response in a variable # run: RunResponse = agent.run("Share a 2 sentence horror story") diff --git a/cookbook/providers/google/basic_stream.py b/cookbook/providers/google/basic_stream.py index 44ca0f5e4..692fa3ac5 100644 --- a/cookbook/providers/google/basic_stream.py +++ b/cookbook/providers/google/basic_stream.py @@ -2,7 +2,7 @@ from phi.agent import Agent, RunResponse # noqa from phi.model.google import Gemini -agent = Agent(model=Gemini(id="gemini-1.5-flash"), markdown=True) +agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), markdown=True) # Get the response in a variable # run_response: Iterator[RunResponse] = agent.run("Share a 2 sentence horror story", stream=True) diff --git a/cookbook/providers/google/data_analyst.py b/cookbook/providers/google/data_analyst.py index d5802d7b7..de93434a7 100644 --- a/cookbook/providers/google/data_analyst.py +++ b/cookbook/providers/google/data_analyst.py @@ -11,7 +11,7 @@ ) agent = Agent( - model=Gemini(id="gemini-1.5-flash"), + model=Gemini(id="gemini-2.0-flash-exp"), tools=[duckdb_tools], markdown=True, show_tool_calls=True, diff --git a/cookbook/providers/google/finance_agent.py b/cookbook/providers/google/finance_agent.py index 4876f12ac..6a6f6ddf8 100644 --- a/cookbook/providers/google/finance_agent.py +++ b/cookbook/providers/google/finance_agent.py @@ -5,7 +5,7 @@ from phi.tools.yfinance import YFinanceTools agent = Agent( - model=Gemini(id="gemini-1.5-flash"), + model=Gemini(id="gemini-2.0-flash-exp"), tools=[YFinanceTools(stock_price=True, analyst_recommendations=True, stock_fundamentals=True)], show_tool_calls=True, description="You are an investment analyst that researches stocks and helps users make informed decisions.", diff --git a/cookbook/providers/google/knowledge.py b/cookbook/providers/google/knowledge.py index bdf7299c7..37d4f3bb1 100644 --- a/cookbook/providers/google/knowledge.py +++ b/cookbook/providers/google/knowledge.py @@ -14,7 +14,7 @@ knowledge_base.load(recreate=True) # Comment out after first run agent = Agent( - model=Gemini(id="gemini-1.5-flash"), + model=Gemini(id="gemini-2.0-flash-exp"), knowledge_base=knowledge_base, use_tools=True, show_tool_calls=True, diff --git a/cookbook/providers/google/storage.py b/cookbook/providers/google/storage.py index 8fae33515..ed28aaf75 100644 --- a/cookbook/providers/google/storage.py +++ b/cookbook/providers/google/storage.py @@ -8,7 +8,7 @@ db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai" agent = Agent( - model=Gemini(id="gemini-1.5-flash"), + model=Gemini(id="gemini-2.0-flash-exp"), storage=PgAgentStorage(table_name="agent_sessions", db_url=db_url), tools=[DuckDuckGo()], add_history_to_messages=True, diff --git a/cookbook/providers/google/structured_output.py b/cookbook/providers/google/structured_output.py index 377f45544..eecd5c4c6 100644 --- a/cookbook/providers/google/structured_output.py +++ b/cookbook/providers/google/structured_output.py @@ -17,7 +17,7 @@ class MovieScript(BaseModel): movie_agent = Agent( - model=Gemini(id="gemini-1.5-flash"), + model=Gemini(id="gemini-2.0-flash-exp"), description="You help people write movie scripts.", response_model=MovieScript, ) diff --git a/cookbook/providers/google/web_search.py b/cookbook/providers/google/web_search.py index e3746c5aa..7c446b66d 100644 --- a/cookbook/providers/google/web_search.py +++ b/cookbook/providers/google/web_search.py @@ -4,5 +4,5 @@ from phi.model.google import Gemini from phi.tools.duckduckgo import DuckDuckGo -agent = Agent(model=Gemini(id="gemini-1.5-flash"), tools=[DuckDuckGo()], show_tool_calls=True, markdown=True) +agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), tools=[DuckDuckGo()], show_tool_calls=True, markdown=True) agent.print_response("Whats happening in France?", stream=True) diff --git a/cookbook/providers/openai/image_agent.py b/cookbook/providers/openai/image_agent.py new file mode 100644 index 000000000..1c1b05485 --- /dev/null +++ b/cookbook/providers/openai/image_agent.py @@ -0,0 +1,17 @@ +from phi.agent import Agent +from phi.model.openai import OpenAIChat +from phi.tools.duckduckgo import DuckDuckGo + +agent = Agent( + model=OpenAIChat(id="gpt-4o"), + tools=[DuckDuckGo()], + markdown=True, +) + +agent.print_response( + "Tell me about this image and give me the latest news about it.", + images=[ + "https://upload.wikimedia.org/wikipedia/commons/b/bf/Krakow_-_Kosciol_Mariacki.jpg", + ], + stream=True, +) diff --git a/phi/agent/agent.py b/phi/agent/agent.py index 21b456fde..f623f5a58 100644 --- a/phi/agent/agent.py +++ b/phi/agent/agent.py @@ -2645,7 +2645,7 @@ def _create_run_data(self) -> Dict[str, Any]: run_data.update( { "run_input": self.run_input, - "run_response": self.run_response, + "run_response": self.run_response.to_dict(), "run_response_format": run_response_format, } ) diff --git a/phi/agent/session.py b/phi/agent/session.py index e2bc1be5f..ee50c6c3e 100644 --- a/phi/agent/session.py +++ b/phi/agent/session.py @@ -27,7 +27,18 @@ class AgentSession(BaseModel): model_config = ConfigDict(from_attributes=True) def monitoring_data(self) -> Dict[str, Any]: - return self.model_dump() + monitoring_data = self.model_dump(exclude={"memory"}) + # Google Gemini adds a "parts" field to the messages, which is not serializable + # If there are runs in the memory, remove the "parts" from the messages + if self.memory is not None and "runs" in self.memory: + _runs = self.memory["runs"] + if len(_runs) > 0: + for _run in _runs: + if "messages" in _run: + for m in _run["messages"]: + if isinstance(m, dict): + m.pop("parts", None) + return monitoring_data def telemetry_data(self) -> Dict[str, Any]: return self.model_dump(include={"model", "created_at", "updated_at"}) diff --git a/phi/model/base.py b/phi/model/base.py index df46e9919..a98dc72af 100644 --- a/phi/model/base.py +++ b/phi/model/base.py @@ -422,7 +422,7 @@ def _process_bytes_image(self, image: bytes) -> Dict[str, Any]: image_url = f"data:image/jpeg;base64,{base64_image}" return {"type": "image_url", "image_url": {"url": image_url}} - def _process_image(self, image: Union[str, Dict, bytes]) -> Optional[Dict[str, Any]]: + def process_image(self, image: Union[str, Dict, bytes]) -> Optional[Dict[str, Any]]: """Process an image based on the format.""" if isinstance(image, dict): @@ -468,7 +468,7 @@ def add_images_to_message( # Add images to the message content for image in images: try: - image_data = self._process_image(image) + image_data = self.process_image(image) if image_data: message_content_with_image.append(image_data) except Exception as e: diff --git a/phi/model/google/gemini.py b/phi/model/google/gemini.py index 4aeb08a1d..add7828d8 100644 --- a/phi/model/google/gemini.py +++ b/phi/model/google/gemini.py @@ -61,8 +61,7 @@ class Gemini(Model): Gemini model class for Google's Generative AI models. Attributes: - - id (str): Model ID. Default is `gemini-1.5-flash`. + id (str): Model ID. Default is `gemini-2.0-flash-exp`. name (str): The name of this chat model instance. Default is `Gemini`. provider (str): Model provider. Default is `Google`. function_declarations (List[FunctionDeclaration]): List of function declarations. @@ -73,7 +72,7 @@ class Gemini(Model): client (GenerativeModel): Generative model client. """ - id: str = "gemini-1.5-flash" + id: str = "gemini-2.0-flash-exp" name: str = "Gemini" provider: str = "Google" @@ -100,13 +99,13 @@ def get_client(self) -> GenerativeModel: if self.client: return self.client - _client_params: Dict[str, Any] = {} + client_params: Dict[str, Any] = {} # Set client parameters if they are provided if self.api_key: - _client_params["api_key"] = self.api_key + client_params["api_key"] = self.api_key if self.client_params: - _client_params.update(self.client_params) - genai.configure(**_client_params) + client_params.update(self.client_params) + genai.configure(**client_params) return genai.GenerativeModel(model_name=self.id, **self.request_kwargs) @property @@ -117,18 +116,18 @@ def request_kwargs(self) -> Dict[str, Any]: Returns: Dict[str, Any]: The request keyword arguments. """ - _request_params: Dict[str, Any] = {} + request_params: Dict[str, Any] = {} if self.generation_config: - _request_params["generation_config"] = self.generation_config + request_params["generation_config"] = self.generation_config if self.safety_settings: - _request_params["safety_settings"] = self.safety_settings + request_params["safety_settings"] = self.safety_settings if self.generative_model_kwargs: - _request_params.update(self.generative_model_kwargs) + request_params.update(self.generative_model_kwargs) if self.function_declarations: - _request_params["tools"] = [GeminiTool(function_declarations=self.function_declarations)] - return _request_params + request_params["tools"] = [GeminiTool(function_declarations=self.function_declarations)] + return request_params - def _format_messages(self, messages: List[Message]) -> List[Dict[str, Any]]: + def format_messages(self, messages: List[Message]) -> List[Dict[str, Any]]: """ Converts a list of Message objects to the Gemini-compatible format. @@ -139,11 +138,17 @@ def _format_messages(self, messages: List[Message]) -> List[Dict[str, Any]]: List[Dict[str, Any]]: The formatted_messages list of messages. """ formatted_messages: List = [] - for msg in messages: - content = msg.content - role = "model" if msg.role == "system" else "user" if msg.role == "tool" else msg.role - if not content or msg.role == "tool": - parts = msg.parts # type: ignore + for message in messages: + message_for_model = {} + + # Add role to the message for the model + role = "model" if message.role == "system" else "user" if message.role == "tool" else message.role + message_for_model["role"] = role + + # Add content to the message for the model + content = message.content + if not content or message.role == "tool": + parts = message.parts # type: ignore else: if isinstance(content, str): parts = [content] @@ -151,10 +156,11 @@ def _format_messages(self, messages: List[Message]) -> List[Dict[str, Any]]: parts = content else: parts = [" "] - formatted_messages.append({"role": role, "parts": parts}) + message_for_model["parts"] = parts + formatted_messages.append(message_for_model) return formatted_messages - def _format_functions(self, params: Dict[str, Any]) -> Dict[str, Any]: + def format_functions(self, params: Dict[str, Any]) -> Dict[str, Any]: """ Converts function parameters to a Gemini-compatible format. @@ -223,7 +229,7 @@ def add_tool( function_declaration = FunctionDeclaration( name=func.name, description=func.description, - parameters=self._format_functions(func.parameters), + parameters=self.format_functions(func.parameters), ) self.function_declarations.append(function_declaration) logger.debug(f"Function {name} from {tool.name} added to model.") @@ -236,7 +242,7 @@ def add_tool( function_declaration = FunctionDeclaration( name=tool.name, description=tool.description, - parameters=self._format_functions(tool.parameters), + parameters=self.format_functions(tool.parameters), ) self.function_declarations.append(function_declaration) logger.debug(f"Function {tool.name} added to model.") @@ -250,7 +256,7 @@ def add_tool( function_declaration = FunctionDeclaration( name=func.name, description=func.description, - parameters=self._format_functions(func.parameters), + parameters=self.format_functions(func.parameters), ) self.function_declarations.append(function_declaration) logger.debug(f"Function '{func.name}' added to model.") @@ -267,7 +273,7 @@ def invoke(self, messages: List[Message]): Returns: GenerateContentResponse: The response from the model. """ - return self.get_client().generate_content(contents=self._format_messages(messages)) + return self.get_client().generate_content(contents=self.format_messages(messages)) def invoke_stream(self, messages: List[Message]): """ @@ -280,18 +286,11 @@ def invoke_stream(self, messages: List[Message]): Iterator[GenerateContentResponse]: The response from the model as a stream. """ yield from self.get_client().generate_content( - contents=self._format_messages(messages), + contents=self.format_messages(messages), stream=True, ) - def _log_messages(self, messages: List[Message]) -> None: - """ - Log messages for debugging. - """ - for m in messages: - m.log() - - def _update_usage_metrics( + def update_usage_metrics( self, assistant_message: Message, usage: Optional[ResultGenerateContentResponse] = None, @@ -325,9 +324,9 @@ def _update_usage_metrics( assistant_message.metrics["time_to_first_token"] = metrics.time_to_first_token self.metrics["time_to_first_token"] = metrics.time_to_first_token - def _create_assistant_message(self, response: GenerateContentResponse, metrics: Metrics) -> Message: + def create_assistant_message(self, response: GenerateContentResponse, metrics: Metrics) -> Message: """ - Create an assistant message from the model response. + Create an assistant message from the response. Args: response (GenerateContentResponse): The model response. @@ -375,11 +374,10 @@ def _create_assistant_message(self, response: GenerateContentResponse, metrics: assistant_message.tool_calls = message_data.response_tool_calls # -*- Update usage metrics - self._update_usage_metrics(assistant_message, message_data.response_usage, metrics) - + self.update_usage_metrics(assistant_message, message_data.response_usage, metrics) return assistant_message - def _get_function_calls_to_run( + def get_function_calls_to_run( self, assistant_message: Message, messages: List[Message], @@ -407,7 +405,7 @@ def _get_function_calls_to_run( function_calls_to_run.append(_function_call) return function_calls_to_run - def _format_function_call_results( + def format_function_call_results( self, function_call_results: List[Message], messages: List[Message], @@ -431,16 +429,9 @@ def _format_function_call_results( ) combined_content.append(result.content) combined_parts.append(function_response) + messages.append(Message(role="tool", content="\n".join(combined_content), parts=combined_parts)) # type: ignore - messages.append( - Message( - role="tool", - content="\n".join(combined_content), - parts=combined_parts - ) - ) - - def _handle_tool_calls(self, assistant_message: Message, messages: List[Message], model_response: ModelResponse): + def handle_tool_calls(self, assistant_message: Message, messages: List[Message], model_response: ModelResponse): """ Handle tool calls in the assistant message. @@ -454,7 +445,7 @@ def _handle_tool_calls(self, assistant_message: Message, messages: List[Message] """ if assistant_message.tool_calls and self.run_tools: model_response.content = assistant_message.get_content_string() or "" - function_calls_to_run = self._get_function_calls_to_run(assistant_message, messages) + function_calls_to_run = self.get_function_calls_to_run(assistant_message, messages) if self.show_tool_calls: if len(function_calls_to_run) == 1: @@ -472,7 +463,7 @@ def _handle_tool_calls(self, assistant_message: Message, messages: List[Message] ): pass - self._format_function_call_results(function_call_results, messages) + self.format_function_call_results(function_call_results, messages) return model_response return None @@ -492,12 +483,14 @@ def response(self, messages: List[Message]) -> ModelResponse: model_response = ModelResponse() metrics = Metrics() + # -*- Generate response metrics.response_timer.start() response: GenerateContentResponse = self.invoke(messages=messages) metrics.response_timer.stop() # -*- Create assistant message - assistant_message = self._create_assistant_message(response=response, metrics=metrics) + assistant_message = self.create_assistant_message(response=response, metrics=metrics) + # -*- Add assistant message to messages messages.append(assistant_message) @@ -505,7 +498,12 @@ def response(self, messages: List[Message]) -> ModelResponse: assistant_message.log() metrics.log() - if self._handle_tool_calls(assistant_message, messages, model_response): + # -*- Update model response with assistant message content + if assistant_message.content is not None: + model_response.content = assistant_message.get_content_string() + + # -*- Handle tool calls + if self.handle_tool_calls(assistant_message, messages, model_response) is not None: response_after_tool_calls = self.response(messages=messages) if response_after_tool_calls.content is not None: if model_response.content is None: @@ -513,18 +511,14 @@ def response(self, messages: List[Message]) -> ModelResponse: model_response.content += response_after_tool_calls.content return model_response - if assistant_message.content is not None: - model_response.content = assistant_message.get_content_string() - # -*- Remove parts from messages # for m in messages: # if hasattr(m, "parts"): # m.parts = None - logger.debug("---------- Gemini Response End ----------") return model_response - def _handle_stream_tool_calls(self, assistant_message: Message, messages: List[Message]): + def handle_stream_tool_calls(self, assistant_message: Message, messages: List[Message]): """ Parse and run function calls and append the results to messages. @@ -536,7 +530,7 @@ def _handle_stream_tool_calls(self, assistant_message: Message, messages: List[M Iterator[ModelResponse]: Yields model responses during function execution. """ if assistant_message.tool_calls and self.run_tools: - function_calls_to_run = self._get_function_calls_to_run(assistant_message, messages) + function_calls_to_run = self.get_function_calls_to_run(assistant_message, messages) if self.show_tool_calls: if len(function_calls_to_run) == 1: @@ -553,7 +547,7 @@ def _handle_stream_tool_calls(self, assistant_message: Message, messages: List[M ): yield intermediate_model_response - self._format_function_call_results(function_call_results, messages) + self.format_function_call_results(function_call_results, messages) def response_stream(self, messages: List[Message]) -> Iterator[ModelResponse]: """ @@ -606,7 +600,6 @@ def response_stream(self, messages: List[Message]) -> Iterator[ModelResponse]: } ) message_data.response_usage = response.usage_metadata - metrics.response_timer.stop() # -*- Create assistant message @@ -621,7 +614,7 @@ def response_stream(self, messages: List[Message]) -> Iterator[ModelResponse]: assistant_message.tool_calls = message_data.response_tool_calls # -*- Update usage metrics - self._update_usage_metrics(assistant_message, message_data.response_usage, metrics) + self.update_usage_metrics(assistant_message, message_data.response_usage, metrics) # -*- Add assistant message to messages messages.append(assistant_message) @@ -631,12 +624,11 @@ def response_stream(self, messages: List[Message]) -> Iterator[ModelResponse]: metrics.log() if assistant_message.tool_calls is not None and len(assistant_message.tool_calls) > 0 and self.run_tools: - yield from self._handle_stream_tool_calls(assistant_message, messages) + yield from self.handle_stream_tool_calls(assistant_message, messages) yield from self.response_stream(messages=messages) # -*- Remove parts from messages # for m in messages: # if hasattr(m, "parts"): # m.parts = None - logger.debug("---------- Gemini Response End ----------") diff --git a/phi/model/groq/groq.py b/phi/model/groq/groq.py index 6d7777c8a..1e8cbee54 100644 --- a/phi/model/groq/groq.py +++ b/phi/model/groq/groq.py @@ -247,7 +247,7 @@ def to_dict(self) -> Dict[str, Any]: model_dict["tool_choice"] = self.tool_choice return model_dict - def process_message(self, message: Message) -> Dict[str, Any]: + def format_message(self, message: Message) -> Dict[str, Any]: """ Format a message into the format expected by OpenAI. @@ -277,7 +277,7 @@ def invoke(self, messages: List[Message]) -> ChatCompletion: """ return self.get_client().chat.completions.create( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore **self.request_kwargs, ) @@ -293,7 +293,7 @@ async def ainvoke(self, messages: List[Message]) -> ChatCompletion: """ return await self.get_async_client().chat.completions.create( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore **self.request_kwargs, ) @@ -309,7 +309,7 @@ def invoke_stream(self, messages: List[Message]) -> Iterator[ChatCompletionChunk """ yield from self.get_client().chat.completions.create( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore stream=True, **self.request_kwargs, ) @@ -326,7 +326,7 @@ async def ainvoke_stream(self, messages: List[Message]) -> Any: """ async_stream = await self.get_async_client().chat.completions.create( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore stream=True, **self.request_kwargs, ) diff --git a/phi/model/ollama/chat.py b/phi/model/ollama/chat.py index 153a6e481..cf6136298 100644 --- a/phi/model/ollama/chat.py +++ b/phi/model/ollama/chat.py @@ -157,7 +157,7 @@ def to_dict(self) -> Dict[str, Any]: model_dict["request_params"] = self.request_params return model_dict - def process_message(self, message: Message) -> Dict[str, Any]: + def format_message(self, message: Message) -> Dict[str, Any]: """ Format a message into the format expected by Ollama. @@ -171,8 +171,9 @@ def process_message(self, message: Message) -> Dict[str, Any]: "role": message.role, "content": message.content, } - if message.images is not None: - _message["images"] = message.images + if message.role == "user": + if message.images is not None: + _message["images"] = message.images return _message def invoke(self, messages: List[Message]) -> Mapping[str, Any]: @@ -195,7 +196,7 @@ def invoke(self, messages: List[Message]) -> Mapping[str, Any]: return self.get_client().chat( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore **request_kwargs, ) # type: ignore @@ -219,7 +220,7 @@ async def ainvoke(self, messages: List[Message]) -> Mapping[str, Any]: return await self.get_async_client().chat( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore **request_kwargs, ) # type: ignore @@ -235,7 +236,7 @@ def invoke_stream(self, messages: List[Message]) -> Iterator[Mapping[str, Any]]: """ yield from self.get_client().chat( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore stream=True, **self.request_kwargs, ) # type: ignore @@ -252,7 +253,7 @@ async def ainvoke_stream(self, messages: List[Message]) -> Any: """ async_stream = await self.get_async_client().chat( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore stream=True, **self.request_kwargs, ) diff --git a/phi/model/openai/chat.py b/phi/model/openai/chat.py index cb5277597..ba916bf37 100644 --- a/phi/model/openai/chat.py +++ b/phi/model/openai/chat.py @@ -278,7 +278,7 @@ def to_dict(self) -> Dict[str, Any]: model_dict["tool_choice"] = self.tool_choice return model_dict - def process_message(self, message: Message) -> Dict[str, Any]: + def format_message(self, message: Message) -> Dict[str, Any]: """ Format a message into the format expected by OpenAI. @@ -311,7 +311,7 @@ def invoke(self, messages: List[Message]) -> Union[ChatCompletion, ParsedChatCom if isinstance(self.response_format, type) and issubclass(self.response_format, BaseModel): return self.get_client().beta.chat.completions.parse( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore **self.request_kwargs, ) else: @@ -321,7 +321,7 @@ def invoke(self, messages: List[Message]) -> Union[ChatCompletion, ParsedChatCom return self.get_client().chat.completions.create( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore **self.request_kwargs, ) @@ -340,7 +340,7 @@ async def ainvoke(self, messages: List[Message]) -> Union[ChatCompletion, Parsed if isinstance(self.response_format, type) and issubclass(self.response_format, BaseModel): return await self.get_async_client().beta.chat.completions.parse( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore **self.request_kwargs, ) else: @@ -350,7 +350,7 @@ async def ainvoke(self, messages: List[Message]) -> Union[ChatCompletion, Parsed return await self.get_async_client().chat.completions.create( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore **self.request_kwargs, ) @@ -366,7 +366,7 @@ def invoke_stream(self, messages: List[Message]) -> Iterator[ChatCompletionChunk """ yield from self.get_client().chat.completions.create( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore stream=True, stream_options={"include_usage": True}, **self.request_kwargs, @@ -384,7 +384,7 @@ async def ainvoke_stream(self, messages: List[Message]) -> Any: """ async_stream = await self.get_async_client().chat.completions.create( model=self.id, - messages=[self.process_message(m) for m in messages], # type: ignore + messages=[self.format_message(m) for m in messages], # type: ignore stream=True, stream_options={"include_usage": True}, **self.request_kwargs, diff --git a/phi/run/response.py b/phi/run/response.py index 32bd149e8..b486ea935 100644 --- a/phi/run/response.py +++ b/phi/run/response.py @@ -56,6 +56,15 @@ class RunResponse(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) + def to_dict(self) -> Dict[str, Any]: + _dict = self.model_dump( + exclude_none=True, + exclude={"messages"}, + ) + if self.messages is not None: + _dict["messages"] = [m.to_dict() for m in self.messages] + return _dict + def get_content_as_string(self, **kwargs) -> str: import json diff --git a/pyproject.toml b/pyproject.toml index b3c1fa0a8..6bd10c075 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "phidata" -version = "2.6.6" +version = "2.6.7" description = "Build multi-modal Agents with memory, knowledge and tools." requires-python = ">=3.7" readme = "README.md"