Skip to content

Commit

Permalink
chore: add mode Qwen models, fix doc bug (#1178)
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan authored Nov 13, 2024
1 parent 333c9d9 commit 260407d
Show file tree
Hide file tree
Showing 7 changed files with 125 additions and 19 deletions.
12 changes: 12 additions & 0 deletions camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,10 @@ class ModelType(UnifiedModelType, Enum):
QWEN_MATH_PLUS = "qwen-math-plus"
QWEN_MATH_TURBO = "qwen-math-turbo"
QWEN_CODER_TURBO = "qwen-coder-turbo"
QWEN_2_5_CODER_32B = "qwen2.5-coder-32b-instruct"
QWEN_2_5_72B = "qwen2.5-72b-instruct"
QWEN_2_5_32B = "qwen2.5-32b-instruct"
QWEN_2_5_14B = "qwen2.5-14b-instruct"

# Yi models (01-ai)
YI_LIGHTNING = "yi-lightning"
Expand Down Expand Up @@ -273,6 +277,10 @@ def is_qwen(self) -> bool:
ModelType.QWEN_MATH_PLUS,
ModelType.QWEN_MATH_TURBO,
ModelType.QWEN_CODER_TURBO,
ModelType.QWEN_2_5_CODER_32B,
ModelType.QWEN_2_5_72B,
ModelType.QWEN_2_5_32B,
ModelType.QWEN_2_5_14B,
}

@property
Expand Down Expand Up @@ -344,6 +352,10 @@ def token_limit(self) -> int:
ModelType.MISTRAL_PIXTRAL_12B,
ModelType.MISTRAL_8B,
ModelType.MISTRAL_3B,
ModelType.QWEN_2_5_CODER_32B,
ModelType.QWEN_2_5_72B,
ModelType.QWEN_2_5_32B,
ModelType.QWEN_2_5_14B,
}:
return 128_000
elif self in {
Expand Down
4 changes: 2 additions & 2 deletions docs/cookbooks/agents_with_rag.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -320,17 +320,17 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "mHdEsIzSH_AD"
},
"source": [
"## 2. Auto RAG\n",
"In this section we will run the `AutoRetriever` with default settings. It uses `OpenAIEmbedding` as default embedding model and `Milvus` as default vector storage.\n",
"In this section we will run the `AutoRetriever` with default settings. It uses `OpenAIEmbedding` as default embedding model and `Qdrant` as default vector storage.\n",
"\n",
"What you need to do is:\n",
"- Set content input paths, which can be local paths or remote urls\n",
"- Set remote url and api key for Milvus\n",
"- Give a query\n",
"\n",
"The Auto RAG pipeline would create collections for given content input paths, the collection name will be set automaticlly based on the content input path name, if the collection exists, it will do the retrieve directly."
Expand Down
4 changes: 4 additions & 0 deletions docs/key_modules/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@ The following table lists currently supported model platforms by CAMEL.
| Qwen | qwen-math-plus | N |
| Qwen | qwen-math-turbo | N |
| Qwen | qwen-coder-turbo | N |
| Qwen | qwen2.5-coder-32b-instruct | N |
| Qwen | qwen2.5-72b-instruct | N |
| Qwen | qwen2.5-32b-instruct | N |
| Qwen | qwen2.5-14b-instruct | N |
| ZhipuAI | glm-4v | Y |
| ZhipuAI | glm-4 | N |
| ZhipuAI | glm-3-turbo | N |
Expand Down
2 changes: 1 addition & 1 deletion docs/key_modules/workforce.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ has been failed for a certain number of times (3 by default).

Here is a diagram illustrating the workflow with a simple example.

![Workforce Example](https://lh3.googleusercontent.com/pw/AP1GczMFbGi7pInBgiXoPbS8lOfIuGijWoo3EeRlz0OWPe7im1FWYXnD1xnbQpEbD_p4DVHtpWhQQHicGaEc1RaoyaEqg9396oGNPQYi4XQ8U3SBRxQV53KSrqzcE9RBMkGv7PgMMxXWVEiWA5rVe6oE9C8=w1315-h1324-s-no?authuser=0)
![Workforce Example](https://i.postimg.cc/261dDM9W/work-force.png)

## Get Started

Expand Down
100 changes: 93 additions & 7 deletions examples/models/qwen_model_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@

model = ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_MAX,
model_config_dict=QwenConfig(temperature=0.2, stream=False).as_dict(),
model_type=ModelType.QWEN_2_5_CODER_32B,
model_config_dict=QwenConfig(temperature=0.2).as_dict(),
)

# Define system message
Expand All @@ -29,17 +29,103 @@
# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model)

user_msg = """Say hi to CAMEL AI, one open-source community
dedicated to the study of autonomous and communicative agents."""
user_msg = """give me python code to develop a trading bot"""

# Get response information
response = camel_agent.step(user_msg)
print(response.msgs[0].content)

'''
===============================================================================
Hello CAMEL AI community! It's great to see such dedication to the study of
autonomous and communicative agents. Your work is truly inspiring and
contributes significantly to the field of AI. Keep up the fantastic work!
Creating a trading bot involves several steps, including data acquisition,
strategy development, backtesting, and live trading. Below is a simplified
example of a trading bot using Python. This example will use the `ccxt`
library to interact with cryptocurrency exchanges and `pandas` for data
manipulation. The strategy used here is a simple moving average crossover
strategy.
First, you need to install the required libraries:
```bash
pip install ccxt pandas
```
Here's a basic example of a trading bot:
```python
import ccxt
import pandas as pd
import time
# Initialize the exchange
exchange = ccxt.binance({
'apiKey': 'YOUR_API_KEY',
'secret': 'YOUR_SECRET_KEY',
})
# Define the trading parameters
symbol = 'BTC/USDT'
timeframe = '1h'
short_window = 50
long_window = 200
amount_to_trade = 0.001 # Amount of BTC to trade
def fetch_ohlcv(symbol, timeframe):
ohlcv = exchange.fetch_ohlcv(symbol, timeframe)
df = pd.DataFrame(ohlcv, columns=['timestamp', 'open', 'high', 'low',
'close', 'volume'])
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
return df
def calculate_moving_averages(df, short_window, long_window):
df['short_mavg'] = df['close'].rolling(window=short_window, min_periods=1).
mean()
df['long_mavg'] = df['close'].rolling(window=long_window, min_periods=1).
mean()
return df
def get_signal(df):
if df['short_mavg'].iloc[-1] > df['long_mavg'].iloc[-1] and df
['short_mavg'].iloc[-2] <= df['long_mavg'].iloc[-2]:
return 'buy'
elif df['short_mavg'].iloc[-1] < df['long_mavg'].iloc[-1] and df
['short_mavg'].iloc[-2] >= df['long_mavg'].iloc[-2]:
return 'sell'
else:
return 'hold'
def execute_trade(signal, symbol, amount):
if signal == 'buy':
order = exchange.create_market_buy_order(symbol, amount)
print(f"Executed BUY order: {order}")
elif signal == 'sell':
order = exchange.create_market_sell_order(symbol, amount)
print(f"Executed SELL order: {order}")
def main():
while True:
try:
# Fetch OHLCV data
df = fetch_ohlcv(symbol, timeframe)
# Calculate moving averages
df = calculate_moving_averages(df, short_window, long_window)
# Get trading signal
signal = get_signal(df)
# Execute trade based on signal
execute_trade(signal, symbol, amount_to_trade)
# Wait for the next candle
time.sleep(60 * 60) # Sleep for 1 hour
except Exception as e:
print(f"An error occurred: {e}")
time.sleep(60) # Sleep for 1 minute before retrying
if __name__ == "__main__":
main()
```
===============================================================================
'''
18 changes: 9 additions & 9 deletions test/models/test_model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
'model_platform, model_type',
[
(ModelPlatformType.OPENAI, ModelType.GPT_3_5_TURBO),
(ModelPlatformType.OPENAI, ModelType.GPT_4_TURBO),
(ModelPlatformType.OPENAI, ModelType.GPT_4O_MINI),
],
)

Expand All @@ -50,11 +50,11 @@
),
(
ModelPlatformType.OPENAI,
ModelType.GPT_4,
ModelType.GPT_4O_MINI,
ChatGPTConfig().as_dict(),
None,
OpenAITokenCounter,
ModelType.GPT_4,
ModelType.GPT_4O_MINI,
),
# Test Stub model
# Stub model uses StubTokenCounter as default
Expand All @@ -70,9 +70,9 @@
ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
ModelType.STUB,
ChatGPTConfig().as_dict(),
OpenAITokenCounter(ModelType.GPT_4),
OpenAITokenCounter(ModelType.GPT_4O_MINI),
OpenAITokenCounter,
ModelType.GPT_4,
ModelType.GPT_4O_MINI,
),
# Test Anthropic model
# Anthropic model uses AnthropicTokenCounter as default
Expand All @@ -97,9 +97,9 @@
ModelPlatformType.GEMINI,
ModelType.GEMINI_1_5_FLASH,
GeminiConfig().as_dict(),
OpenAITokenCounter(ModelType.GPT_4),
OpenAITokenCounter(ModelType.GPT_4O_MINI),
OpenAITokenCounter,
ModelType.GPT_4,
ModelType.GPT_4O_MINI,
),
# Test Ollama model
(
Expand All @@ -114,9 +114,9 @@
ModelPlatformType.OLLAMA,
"gpt-3.5-turbo",
OllamaConfig().as_dict(),
OpenAITokenCounter(ModelType.GPT_4),
OpenAITokenCounter(ModelType.GPT_4O_MINI),
OpenAITokenCounter,
ModelType.GPT_4,
ModelType.GPT_4O_MINI,
),
],
)
Expand Down
4 changes: 4 additions & 0 deletions test/models/test_qwen_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@
ModelType.QWEN_MATH_PLUS,
ModelType.QWEN_MATH_TURBO,
ModelType.QWEN_CODER_TURBO,
ModelType.QWEN_2_5_CODER_32B,
ModelType.QWEN_2_5_72B,
ModelType.QWEN_2_5_32B,
ModelType.QWEN_2_5_14B,
],
)
def test_qwen_model(model_type: ModelType):
Expand Down

0 comments on commit 260407d

Please sign in to comment.