Skip to content

Commit 5ed3467

Browse files
authored
Merge pull request #2272 from kqlio67/main
Integrate New Providers and Enhance Existing Functionality
2 parents 7e09ba3 + dd17d5a commit 5ed3467

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+3170
-1664
lines changed

docs/async_client.md

+93-50
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
12
# How to Use the G4F AsyncClient API
23

34
The AsyncClient API is the asynchronous counterpart to the standard G4F Client API. It offers the same functionality as the synchronous API, but with the added benefit of improved performance due to its asynchronous nature.
@@ -25,7 +26,7 @@ from g4f.Provider import BingCreateImages, OpenaiChat, Gemini
2526
client = AsyncClient(
2627
provider=OpenaiChat,
2728
image_provider=Gemini,
28-
...
29+
# Add any other necessary parameters
2930
)
3031
```
3132

@@ -43,7 +44,7 @@ from g4f.client import AsyncClient
4344
client = AsyncClient(
4445
api_key="your_api_key_here",
4546
proxies="http://user:pass@host",
46-
...
47+
# Add any other necessary parameters
4748
)
4849
```
4950

@@ -57,28 +58,44 @@ client = AsyncClient(
5758
You can use the `ChatCompletions` endpoint to generate text completions. Here’s how you can do it:
5859

5960
```python
60-
response = await client.chat.completions.create(
61-
model="gpt-3.5-turbo",
62-
messages=[{"role": "user", "content": "Say this is a test"}],
63-
...
64-
)
65-
print(response.choices[0].message.content)
61+
import asyncio
62+
63+
from g4f.client import Client
64+
65+
async def main():
66+
client = Client()
67+
response = await client.chat.completions.async_create(
68+
model="gpt-3.5-turbo",
69+
messages=[{"role": "user", "content": "say this is a test"}],
70+
# Add any other necessary parameters
71+
)
72+
print(response.choices[0].message.content)
73+
74+
asyncio.run(main())
6675
```
6776

6877
### Streaming Completions
6978

7079
The `AsyncClient` also supports streaming completions. This allows you to process the response incrementally as it is generated:
7180

7281
```python
73-
stream = client.chat.completions.create(
74-
model="gpt-4",
75-
messages=[{"role": "user", "content": "Say this is a test"}],
76-
stream=True,
77-
...
78-
)
79-
async for chunk in stream:
80-
if chunk.choices[0].delta.content:
81-
print(chunk.choices[0].delta.content or "", end="")
82+
import asyncio
83+
84+
from g4f.client import Client
85+
86+
async def main():
87+
client = Client()
88+
stream = await client.chat.completions.async_create(
89+
model="gpt-4",
90+
messages=[{"role": "user", "content": "say this is a test"}],
91+
stream=True,
92+
# Add any other necessary parameters
93+
)
94+
async for chunk in stream:
95+
if chunk.choices[0].delta.content:
96+
print(chunk.choices[0].delta.content or "", end="")
97+
98+
asyncio.run(main())
8299
```
83100

84101
In this example:
@@ -89,48 +106,69 @@ In this example:
89106
The following code snippet demonstrates how to use a vision model to analyze an image and generate a description based on the content of the image. This example shows how to fetch an image, send it to the model, and then process the response.
90107

91108
```python
109+
import g4f
92110
import requests
111+
import asyncio
112+
93113
from g4f.client import Client
94-
from g4f.Provider import Bing
95114

96-
client = AsyncClient(
97-
provider=Bing
98-
)
115+
image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
116+
# Or: image = open("docs/cat.jpeg", "rb")
99117

100-
image = requests.get("https://my_website/image.jpg", stream=True).raw
101-
# Or: image = open("local_path/image.jpg", "rb")
102118

103-
response = client.chat.completions.create(
104-
"",
105-
messages=[{"role": "user", "content": "what is in this picture?"}],
106-
image=image
107-
)
108-
print(response.choices[0].message.content)
119+
async def main():
120+
client = Client()
121+
response = await client.chat.completions.async_create(
122+
model=g4f.models.default,
123+
provider=g4f.Provider.Bing,
124+
messages=[{"role": "user", "content": "What are on this image?"}],
125+
image=image
126+
# Add any other necessary parameters
127+
)
128+
print(response.choices[0].message.content)
129+
130+
asyncio.run(main())
109131
```
110132

111133
### Image Generation:
112134

113135
You can generate images using a specified prompt:
114136

115137
```python
116-
response = await client.images.generate(
117-
model="dall-e-3",
118-
prompt="a white siamese cat",
119-
...
120-
)
138+
import asyncio
139+
from g4f.client import Client
140+
141+
async def main():
142+
client = Client()
143+
response = await client.images.async_generate(
144+
prompt="a white siamese cat",
145+
model="dall-e-3",
146+
# Add any other necessary parameters
147+
)
148+
image_url = response.data[0].url
149+
print(f"Generated image URL: {image_url}")
121150

122-
image_url = response.data[0].url
151+
asyncio.run(main())
123152
```
124153

125154
#### Base64 as the response format
126155

127156
```python
128-
response = await client.images.generate(
129-
prompt="a cool cat",
130-
response_format="b64_json"
131-
)
157+
import asyncio
158+
from g4f.client import Client
132159

133-
base64_text = response.data[0].b64_json
160+
async def main():
161+
client = Client()
162+
response = await client.images.async_generate(
163+
prompt="a white siamese cat",
164+
model="dall-e-3",
165+
response_format="b64_json"
166+
# Add any other necessary parameters
167+
)
168+
base64_text = response.data[0].b64_json
169+
print(base64_text)
170+
171+
asyncio.run(main())
134172
```
135173

136174
### Example usage with asyncio.gather
@@ -140,27 +178,32 @@ Start two tasks at the same time:
140178
```python
141179
import asyncio
142180

143-
from g4f.client import AsyncClient
144-
from g4f.Provider import BingCreateImages, OpenaiChat, Gemini
181+
from g4f.client import Client
145182

146183
async def main():
147-
client = AsyncClient(
148-
provider=OpenaiChat,
149-
image_provider=Gemini,
150-
# other parameters...
151-
)
184+
client = Client()
152185

153-
task1 = client.chat.completions.create(
186+
task1 = client.chat.completions.async_create(
154187
model="gpt-3.5-turbo",
155188
messages=[{"role": "user", "content": "Say this is a test"}],
156189
)
157190
task2 = client.images.generate(
158191
model="dall-e-3",
159192
prompt="a white siamese cat",
160193
)
194+
161195
responses = await asyncio.gather(task1, task2)
196+
197+
chat_response, image_response = responses
162198

163-
print(responses)
199+
print("Chat Response:")
200+
print(chat_response.choices[0].message.content)
201+
202+
print("\nImage Response:")
203+
image_url = image_response.data[0].url
204+
print(image_url)
164205

165206
asyncio.run(main())
166-
```
207+
```
208+
209+
[Return to Home](/)

docs/client.md

+49-9
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
12
### G4F - Client API
23

34
#### Introduction
@@ -33,7 +34,7 @@ from g4f.Provider import BingCreateImages, OpenaiChat, Gemini
3334
client = Client(
3435
provider=OpenaiChat,
3536
image_provider=Gemini,
36-
...
37+
# Add any other necessary parameters
3738
)
3839
```
3940

@@ -48,7 +49,7 @@ from g4f.client import Client
4849
client = Client(
4950
api_key="...",
5051
proxies="http://user:pass@host",
51-
...
52+
# Add any other necessary parameters
5253
)
5354
```
5455

@@ -59,23 +60,30 @@ client = Client(
5960
You can use the `ChatCompletions` endpoint to generate text completions as follows:
6061

6162
```python
63+
from g4f.client import Client
64+
client = Client()
65+
6266
response = client.chat.completions.create(
6367
model="gpt-3.5-turbo",
6468
messages=[{"role": "user", "content": "Say this is a test"}],
65-
...
69+
# Add any other necessary parameters
6670
)
6771
print(response.choices[0].message.content)
6872
```
6973

7074
Also streaming are supported:
7175

7276
```python
77+
from g4f.client import Client
78+
79+
client = Client()
80+
7381
stream = client.chat.completions.create(
7482
model="gpt-4",
7583
messages=[{"role": "user", "content": "Say this is a test"}],
7684
stream=True,
77-
...
7885
)
86+
7987
for chunk in stream:
8088
if chunk.choices[0].delta.content:
8189
print(chunk.choices[0].delta.content or "", end="")
@@ -86,27 +94,35 @@ for chunk in stream:
8694
Generate images using a specified prompt:
8795

8896
```python
97+
from g4f.client import Client
98+
99+
client = Client()
89100
response = client.images.generate(
90101
model="dall-e-3",
91102
prompt="a white siamese cat",
92-
...
103+
# Add any other necessary parameters
93104
)
94105

95106
image_url = response.data[0].url
107+
print(f"Generated image URL: {image_url}")
96108
```
97109

98110
**Creating Image Variations:**
99111

100112
Create variations of an existing image:
101113

102114
```python
115+
from g4f.client import Client
116+
117+
client = Client()
103118
response = client.images.create_variation(
104119
image=open("cat.jpg", "rb"),
105120
model="bing",
106-
...
121+
# Add any other necessary parameters
107122
)
108123

109124
image_url = response.data[0].url
125+
print(f"Generated image URL: {image_url}")
110126
```
111127
Original / Variant:
112128

@@ -120,6 +136,7 @@ from g4f.Provider import RetryProvider, Phind, FreeChatgpt, Liaobots
120136

121137
import g4f.debug
122138
g4f.debug.logging = True
139+
g4f.debug.version_check = False
123140

124141
client = Client(
125142
provider=RetryProvider([Phind, FreeChatgpt, Liaobots], shuffle=False)
@@ -154,22 +171,45 @@ response = client.chat.completions.create(
154171
)
155172
print(response.choices[0].message.content)
156173
```
174+
157175
```
158176
User: What are on this image?
159177
```
160-
![Waterfall](/docs/waterfall.jpeg)
161178

179+
![Waterfall](/docs/waterfall.jpeg)
162180
```
163181
Bot: There is a waterfall in the middle of a jungle. There is a rainbow over...
164182
```
165183

184+
### Example: Using a Vision Model
185+
The following code snippet demonstrates how to use a vision model to analyze an image and generate a description based on the content of the image. This example shows how to fetch an image, send it to the model, and then process the response.
186+
187+
```python
188+
import g4f
189+
import requests
190+
from g4f.client import Client
191+
192+
image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
193+
# Or: image = open("docs/cat.jpeg", "rb")
194+
195+
client = Client()
196+
response = client.chat.completions.create(
197+
model=g4f.models.default,
198+
messages=[{"role": "user", "content": "What are on this image?"}],
199+
provider=g4f.Provider.Bing,
200+
image=image,
201+
# Add any other necessary parameters
202+
)
203+
print(response.choices[0].message.content)
204+
```
205+
166206
#### Advanced example: A command-line program
167207
```python
168208
import g4f
169209
from g4f.client import Client
170210

171211
# Initialize the GPT client with the desired provider
172-
client = Client(provider=g4f.Provider.Bing)
212+
client = Client()
173213

174214
# Initialize an empty conversation history
175215
messages = []
@@ -203,4 +243,4 @@ while True:
203243
print(f"An error occurred: {e}")
204244
```
205245

206-
[Return to Home](/)
246+
[Return to Home](/)

0 commit comments

Comments
 (0)