Skip to content

Commit

Permalink
v2.6.7
Browse files Browse the repository at this point in the history
  • Loading branch information
ashpreetbedi committed Dec 11, 2024
1 parent 958c0fa commit 25d1fcf
Show file tree
Hide file tree
Showing 18 changed files with 126 additions and 96 deletions.
2 changes: 1 addition & 1 deletion cookbook/providers/google/basic.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from phi.agent import Agent, RunResponse # noqa
from phi.model.google import Gemini

agent = Agent(model=Gemini(id="gemini-1.5-flash"), markdown=True)
agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), markdown=True)

# Get the response in a variable
# run: RunResponse = agent.run("Share a 2 sentence horror story")
Expand Down
2 changes: 1 addition & 1 deletion cookbook/providers/google/basic_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from phi.agent import Agent, RunResponse # noqa
from phi.model.google import Gemini

agent = Agent(model=Gemini(id="gemini-1.5-flash"), markdown=True)
agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), markdown=True)

# Get the response in a variable
# run_response: Iterator[RunResponse] = agent.run("Share a 2 sentence horror story", stream=True)
Expand Down
2 changes: 1 addition & 1 deletion cookbook/providers/google/data_analyst.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
)

agent = Agent(
model=Gemini(id="gemini-1.5-flash"),
model=Gemini(id="gemini-2.0-flash-exp"),
tools=[duckdb_tools],
markdown=True,
show_tool_calls=True,
Expand Down
2 changes: 1 addition & 1 deletion cookbook/providers/google/finance_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from phi.tools.yfinance import YFinanceTools

agent = Agent(
model=Gemini(id="gemini-1.5-flash"),
model=Gemini(id="gemini-2.0-flash-exp"),
tools=[YFinanceTools(stock_price=True, analyst_recommendations=True, stock_fundamentals=True)],
show_tool_calls=True,
description="You are an investment analyst that researches stocks and helps users make informed decisions.",
Expand Down
2 changes: 1 addition & 1 deletion cookbook/providers/google/knowledge.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
knowledge_base.load(recreate=True) # Comment out after first run

agent = Agent(
model=Gemini(id="gemini-1.5-flash"),
model=Gemini(id="gemini-2.0-flash-exp"),
knowledge_base=knowledge_base,
use_tools=True,
show_tool_calls=True,
Expand Down
2 changes: 1 addition & 1 deletion cookbook/providers/google/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"

agent = Agent(
model=Gemini(id="gemini-1.5-flash"),
model=Gemini(id="gemini-2.0-flash-exp"),
storage=PgAgentStorage(table_name="agent_sessions", db_url=db_url),
tools=[DuckDuckGo()],
add_history_to_messages=True,
Expand Down
2 changes: 1 addition & 1 deletion cookbook/providers/google/structured_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class MovieScript(BaseModel):


movie_agent = Agent(
model=Gemini(id="gemini-1.5-flash"),
model=Gemini(id="gemini-2.0-flash-exp"),
description="You help people write movie scripts.",
response_model=MovieScript,
)
Expand Down
2 changes: 1 addition & 1 deletion cookbook/providers/google/web_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@
from phi.model.google import Gemini
from phi.tools.duckduckgo import DuckDuckGo

agent = Agent(model=Gemini(id="gemini-1.5-flash"), tools=[DuckDuckGo()], show_tool_calls=True, markdown=True)
agent = Agent(model=Gemini(id="gemini-2.0-flash-exp"), tools=[DuckDuckGo()], show_tool_calls=True, markdown=True)
agent.print_response("Whats happening in France?", stream=True)
17 changes: 17 additions & 0 deletions cookbook/providers/openai/image_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from phi.agent import Agent
from phi.model.openai import OpenAIChat
from phi.tools.duckduckgo import DuckDuckGo

agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[DuckDuckGo()],
markdown=True,
)

agent.print_response(
"Tell me about this image and give me the latest news about it.",
images=[
"https://upload.wikimedia.org/wikipedia/commons/b/bf/Krakow_-_Kosciol_Mariacki.jpg",
],
stream=True,
)
2 changes: 1 addition & 1 deletion phi/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -2645,7 +2645,7 @@ def _create_run_data(self) -> Dict[str, Any]:
run_data.update(
{
"run_input": self.run_input,
"run_response": self.run_response,
"run_response": self.run_response.to_dict(),
"run_response_format": run_response_format,
}
)
Expand Down
13 changes: 12 additions & 1 deletion phi/agent/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,18 @@ class AgentSession(BaseModel):
model_config = ConfigDict(from_attributes=True)

def monitoring_data(self) -> Dict[str, Any]:
return self.model_dump()
monitoring_data = self.model_dump(exclude={"memory"})
# Google Gemini adds a "parts" field to the messages, which is not serializable
# If there are runs in the memory, remove the "parts" from the messages
if self.memory is not None and "runs" in self.memory:
_runs = self.memory["runs"]
if len(_runs) > 0:
for _run in _runs:
if "messages" in _run:
for m in _run["messages"]:
if isinstance(m, dict):
m.pop("parts", None)
return monitoring_data

def telemetry_data(self) -> Dict[str, Any]:
return self.model_dump(include={"model", "created_at", "updated_at"})
4 changes: 2 additions & 2 deletions phi/model/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ def _process_bytes_image(self, image: bytes) -> Dict[str, Any]:
image_url = f"data:image/jpeg;base64,{base64_image}"
return {"type": "image_url", "image_url": {"url": image_url}}

def _process_image(self, image: Union[str, Dict, bytes]) -> Optional[Dict[str, Any]]:
def process_image(self, image: Union[str, Dict, bytes]) -> Optional[Dict[str, Any]]:
"""Process an image based on the format."""

if isinstance(image, dict):
Expand Down Expand Up @@ -468,7 +468,7 @@ def add_images_to_message(
# Add images to the message content
for image in images:
try:
image_data = self._process_image(image)
image_data = self.process_image(image)
if image_data:
message_content_with_image.append(image_data)
except Exception as e:
Expand Down
Loading

0 comments on commit 25d1fcf

Please sign in to comment.