Skip to content

Commit

Permalink
Merge branch 'main' into u/xiaoyun/0206
Browse files Browse the repository at this point in the history
  • Loading branch information
LittleLittleCloud authored Feb 7, 2024
2 parents 683db81 + e0fa6ee commit 8deaee9
Show file tree
Hide file tree
Showing 12 changed files with 358 additions and 5 deletions.
3 changes: 2 additions & 1 deletion .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
"extensions": [
"ms-python.python",
"ms-toolsai.jupyter",
"visualstudioexptteam.vscodeintellicode"
"visualstudioexptteam.vscodeintellicode",
"GitHub.copilot"
],
"settings": {
"terminal.integrated.profiles.linux": {
Expand Down
7 changes: 5 additions & 2 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from collections import defaultdict
from typing import Any, Awaitable, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union
import warnings
from openai import BadRequestError

from ..oai.client import OpenAIWrapper, ModelClient
from ..cache.cache import Cache
Expand Down Expand Up @@ -832,8 +833,10 @@ def _summarize_chat(
if not isinstance(prompt, str):
raise ValueError("The summary_prompt must be a string.")
msg_list = agent._groupchat.messages if hasattr(agent, "_groupchat") else agent.chat_messages[self]

summary = self._llm_response_preparer(prompt, msg_list, llm_agent=agent, cache=cache)
try:
summary = self._llm_response_preparer(prompt, msg_list, llm_agent=agent, cache=cache)
except BadRequestError as e:
warnings.warn(f"Cannot extract summary using reflection_with_llm: {e}", UserWarning)
else:
warnings.warn("No summary_method provided or summary_method is not supported: ")
return summary
Expand Down
2 changes: 1 addition & 1 deletion notebook/agentchat_langchain.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"id": "ae1f50ec"
},
"source": [
"<a href=\"https://colab.research.google.com/github/microsoft/autogen/blob/main/notebook/agentchat_function_call.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
"<a href=\"https://colab.research.google.com/github/microsoft/autogen/blob/main/notebook/agentchat_langchain.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
Expand Down
43 changes: 43 additions & 0 deletions samples/apps/auto-anny/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
<div align="center">
<img src="images/icon.png" alt="Repo Icon" width="100" height="100">
</div>

# AutoAnny

AutoAnny is a Discord bot built using AutoGen to help with AutoGen's Discord server.
Actually Anny can help with any OSS GitHub project (set `ANNY_GH_REPO` below).

## Features

- **`/heyanny help`**: Lists commands.
- **`/heyanny ghstatus`**: Summarizes GitHub activity.
- **`/heyanny ghgrowth`**: Shows GitHub repo growth indicators.
- **`/heyanny ghunattended`**: Lists unattended issues and PRs.

## Installation

1. Clone the AutoGen repository and `cd samples/apps/auto-anny`
2. Install dependencies: `pip install -r requirements.txt`
3. Export Discord token and GitHub API token,
```
export OAI_CONFIG_LIST=your-autogen-config-list
export DISCORD_TOKEN=your-bot-token
export GH_TOKEN=your-gh-token
export ANNY_GH_REPO=microsoft/autogen # you may choose a different repo name
```
To get a Discord token, you will need to set up your Discord bot using these [instructions](https://discordpy.readthedocs.io/en/stable/discord.html).
4. Start the bot: `python bot.py`
Note: By default Anny will log data to `autoanny.log`.
## Roadmap
- Enable access control
- Enable a richer set of commands
- Enrich agents with tool use
## Contributing
Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
36 changes: 36 additions & 0 deletions samples/apps/auto-anny/agent_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json


async def solve_task(task):
config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
assistant = AssistantAgent("assistant", llm_config={"config_list": config_list})
user_proxy = UserProxyAgent(
"user_proxy",
code_execution_config={"work_dir": "coding", "use_docker": False},
human_input_mode="NEVER",
is_termination_msg=lambda msg: "TERMINATE" in msg.get("content", ""),
)
await user_proxy.a_initiate_chat(assistant, message=task)

await user_proxy.a_send(
f"""Based on the results in above conversation, create a response for the user.
While computing the response, remember that this conversation was your inner mono-logue.
The user does not need to know every detail of the conversation.
All they want to see is the appropriate result for their task (repeated below) in
a manner that would be most useful. Response should be less than 1500 characters.
The task was: {task}
There is no need to use the word TERMINATE in this response.
""",
assistant,
request_reply=False,
silent=True,
)
response = await assistant.a_generate_reply(assistant.chat_messages[user_proxy], user_proxy)
await assistant.a_send(response, user_proxy, request_reply=False, silent=True)

last_message = assistant.chat_messages[user_proxy][-1]["content"]

return last_message[:2000]
153 changes: 153 additions & 0 deletions samples/apps/auto-anny/bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
import os
import logging
import logging.handlers

import discord
from discord.ext import commands

from agent_utils import solve_task

logger = logging.getLogger("anny")
logger.setLevel(logging.INFO)
logging.getLogger("discord.http").setLevel(logging.INFO)

handler = logging.handlers.RotatingFileHandler(
filename="autoanny.log",
encoding="utf-8",
maxBytes=32 * 1024 * 1024, # 32 MiB
backupCount=5, # Rotate through 5 files
)
dt_fmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter("[{asctime}] [{levelname:<8}] {name}: {message}", dt_fmt, style="{")
handler.setFormatter(formatter)
logger.addHandler(handler)

required_env_vars = ["OAI_CONFIG_LIST", "DISCORD_TOKEN", "GH_TOKEN", "ANNY_GH_REPO"]
for var in required_env_vars:
if var not in os.environ:
raise ValueError(f"{var} environment variable is not set.")

# read token from environment variable
DISCORD_TOKEN = os.environ["DISCORD_TOKEN"]
REPO = os.environ["ANNY_GH_REPO"]

intents = discord.Intents.default()
intents.message_content = True
intents.reactions = True
bot = commands.Bot(command_prefix="/", intents=intents)


@bot.event
async def on_message(message):
logger.info({"message": message.content, "author": message.author, "id": message.id})
await bot.process_commands(message)


@bot.event
async def on_reaction_add(reaction, user):
message = reaction.message
logger.info(
{
"message": message.content,
"author": message.author,
"id": message.id,
"reaction": reaction.emoji,
"reactor": user,
}
)


@bot.event
async def on_ready():
logger.info("Logged in", extra={"user": bot.user})


@bot.command(description="Invoke Anny to solve a task.")
async def heyanny(ctx, task: str = None):
if not task or task == "help":
response = help_msg()
await ctx.send(response)
return

task_map = {
"ghstatus": ghstatus,
"ghgrowth": ghgrowth,
"ghunattended": ghunattended,
"ghstudio": ghstudio,
}

if task in task_map:
await ctx.send("Working on it...")
response = await task_map[task](ctx)
await ctx.send(response)
else:
response = "Invalid command! Please type /heyanny help for the list of commands."
await ctx.send(response)


def help_msg():
response = f"""
Hi this is Anny an AutoGen-powered Discord bot to help with `{REPO}`. I can help you with the following tasks:
- ghstatus: Find the most recent issues and PRs from today.
- ghgrowth: Find the number of stars, forks, and indicators of growth.
- ghunattended: Find the most issues and PRs from today from today that haven't received a response/comment.
You can invoke me by typing `/heyanny <task>`.
"""
return response


async def ghstatus(ctx):
response = await solve_task(
f"""
Find the most recent issues and PRs from `{REPO}` in last 24 hours.
Separate issues and PRs.
Final response should contains title, number, date/time, URLs of the issues and PRs.
Markdown formatted response will make it look nice.
Make sure date/time is in PST and readily readable.
You can access github token from the environment variable called GH_TOKEN.
"""
)
return response


async def ghgrowth(ctx):
response = await solve_task(
f"""
Find the number of stars, forks, and indicators of growth of `{REPO}`.
Compare the stars of `{REPO}` this week vs last week.
Make sure date/time is in PST and readily readable.
You can access github token from the environment variable called GH_TOKEN.
"""
)
return response


async def ghunattended(ctx):
response = await solve_task(
f"""
Find the issues *created* in the last 24 hours from `{REPO}` that haven't
received a response/comment. Modified issues don't count.
Final response should contains title, number, date/time, URLs of the issues and PRs.
Make sure date/time is in PST and readily readable.
You can access github token from the environment variable called GH_TOKEN.
"""
)
return response


async def ghstudio(ctx):
# TODO: Generalize to feature name
response = await solve_task(
f"""
Find issues and PRs from `{REPO}` that are related to the AutoGen Studio.
The title or the body of the issue or PR should give you a hint whether its related.
Summarize the top 5 common complaints or issues. Cite the issue/PR number and URL.
Explain why you think this is a common issue in 2 sentences.
You can access github token from the environment variable called GH_TOKEN.
"""
)
return response


bot.run(DISCORD_TOKEN, log_handler=None)
Binary file added samples/apps/auto-anny/images/icon.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 2 additions & 0 deletions samples/apps/auto-anny/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
discord.py
pyautogen
69 changes: 68 additions & 1 deletion test/agentchat/test_chats.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@
import pytest
from conftest import skip_openai
import autogen
from typing import Literal

from pydantic import BaseModel, Field
from typing_extensions import Annotated


@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
Expand Down Expand Up @@ -127,6 +131,7 @@ def test_chats():

financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Investigate the reasons.""",
"""Pros and cons of the companies I'm interested in. Keep it short.""",
]

Expand Down Expand Up @@ -197,6 +202,68 @@ def test_chats():
# print(blogpost.summary, insights_and_blogpost)


@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
def test_chats_w_func():
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)

llm_config = {
"config_list": config_list,
"timeout": 120,
}

chatbot = autogen.AssistantAgent(
name="chatbot",
system_message="For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.",
llm_config=llm_config,
)

# create a UserProxyAgent instance named "user_proxy"
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
code_execution_config={
"last_n_messages": 1,
"work_dir": "tasks",
"use_docker": False,
},
)

CurrencySymbol = Literal["USD", "EUR"]

def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float:
if base_currency == quote_currency:
return 1.0
elif base_currency == "USD" and quote_currency == "EUR":
return 1 / 1.1
elif base_currency == "EUR" and quote_currency == "USD":
return 1.1
else:
raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}")

@user_proxy.register_for_execution()
@chatbot.register_for_llm(description="Currency exchange calculator.")
def currency_calculator(
base_amount: Annotated[float, "Amount of currency in base_currency"],
base_currency: Annotated[CurrencySymbol, "Base currency"] = "USD",
quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR",
) -> str:
quote_amount = exchange_rate(base_currency, quote_currency) * base_amount
return f"{quote_amount} {quote_currency}"

res = user_proxy.initiate_chat(
chatbot,
message="How much is 123.45 USD in EUR?",
summary_method="reflection_with_llm",
)
print(res.summary, res.cost, res.chat_history)


if __name__ == "__main__":
# test_chats()
test_chats_group()
# test_chats_group()
test_chats_w_func()
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 8deaee9

Please sign in to comment.