diff --git a/docs/async_client.md b/docs/async_client.md
index cc4c5806504..891a2356731 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -292,7 +292,7 @@ client = AsyncClient(provider=g4f.Provider.OpenaiChat)
response = await client.chat.completions.create(
model="gpt-4",
- provider=g4f.Provider.Bing,
+ provider=g4f.Provider.CopilotAccount,
messages=[
{
"role": "user",
diff --git a/docs/legacy.md b/docs/legacy.md
index d5cd5a36a2d..393e3c397b5 100644
--- a/docs/legacy.md
+++ b/docs/legacy.md
@@ -7,7 +7,7 @@ import g4f
g4f.debug.logging = True # Enable debug logging
g4f.debug.version_check = False # Disable automatic version checking
-print(g4f.Provider.Bing.params) # Print supported args for Bing
+print(g4f.Provider.Gemini.params) # Print supported args for Bing
# Using automatic a provider for the given model
## Streamed completion
@@ -78,7 +78,7 @@ for message in response:
Image upload and generation are supported by three main providers:
-- **Bing & Other GPT-4 Providers:** Utilizes Microsoft's Image Creator.
+- **Microsoft Copilot & Other GPT-4 Providers:** Utilizes Microsoft's Image Creator.
- **Google Gemini:** Available for free accounts with IP addresses outside Europe.
- **OpenaiChat with GPT-4:** Accessible for users with a Plus subscription.
diff --git a/etc/unittest/client.py b/etc/unittest/client.py
index 97f9f6c8dc4..663d5f282eb 100644
--- a/etc/unittest/client.py
+++ b/etc/unittest/client.py
@@ -2,7 +2,10 @@
import unittest
-from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk
+from g4f.errors import ModelNotFoundError
+from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk, get_model_and_provider
+from g4f.Provider.Copilot import Copilot
+from g4f.models import gpt_4o
from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock
DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
@@ -104,5 +107,35 @@ def test_stop(self):
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("How are you?", response.choices[0].message.content)
+ def test_model_not_found(self):
+ def run_exception():
+ client = Client()
+ client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
+ self.assertRaises(ModelNotFoundError, run_exception)
+
+ def test_best_provider(self):
+ not_default_model = "gpt-4o"
+ model, provider = get_model_and_provider(not_default_model, None, False)
+ self.assertTrue(hasattr(provider, "create_completion"))
+ self.assertEqual(model, not_default_model)
+
+ def test_default_model(self):
+ default_model = ""
+ model, provider = get_model_and_provider(default_model, None, False)
+ self.assertTrue(hasattr(provider, "create_completion"))
+ self.assertEqual(model, default_model)
+
+ def test_provider_as_model(self):
+ provider_as_model = Copilot.__name__
+ model, provider = get_model_and_provider(provider_as_model, None, False)
+ self.assertTrue(hasattr(provider, "create_completion"))
+ self.assertIsInstance(model, str)
+ self.assertEqual(model, Copilot.default_model)
+
+ def test_get_model(self):
+ model, provider = get_model_and_provider(gpt_4o.name, None, False)
+ self.assertTrue(hasattr(provider, "create_completion"))
+ self.assertEqual(model, gpt_4o.name)
+
if __name__ == '__main__':
unittest.main()
\ No newline at end of file
diff --git a/etc/unittest/main.py b/etc/unittest/main.py
index a3949216fff..0c71aea10bf 100644
--- a/etc/unittest/main.py
+++ b/etc/unittest/main.py
@@ -1,7 +1,9 @@
import unittest
import asyncio
+
import g4f
from g4f import ChatCompletion, get_last_provider
+from g4f.errors import VersionNotFoundError
from g4f.Provider import RetryProvider
from .mocks import ProviderMock
@@ -27,4 +29,11 @@ def test_get_last_provider_as_dict(self):
last_provider_dict = get_last_provider(True)
self.assertIsInstance(last_provider_dict, dict)
self.assertIn('name', last_provider_dict)
- self.assertEqual(ProviderMock.__name__, last_provider_dict['name'])
\ No newline at end of file
+ self.assertEqual(ProviderMock.__name__, last_provider_dict['name'])
+
+ def test_get_latest_version(self):
+ try:
+ self.assertIsInstance(g4f.version.utils.current_version, str)
+ except VersionNotFoundError:
+ pass
+ self.assertIsInstance(g4f.version.utils.latest_version, str)
\ No newline at end of file
diff --git a/g4f/Provider/ChatGpt.py b/g4f/Provider/ChatGpt.py
index 02bbbcc4166..ebfc7c2d9f7 100644
--- a/g4f/Provider/ChatGpt.py
+++ b/g4f/Provider/ChatGpt.py
@@ -205,6 +205,7 @@ def create_completion(
response = session.post('https://chatgpt.com/backend-anon/conversation',
headers=headers, json=json_data, stream=True)
+ response.raise_for_status()
replace = ''
for line in response.iter_lines():
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index 5721f37701e..3c431df20e7 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -23,7 +23,7 @@
from ..errors import MissingRequirementsError
from ..requests.raise_for_status import raise_for_status
from ..providers.asyncio import get_running_loop
-from ..Provider.openai.har_file import NoValidHarFileError, get_headers
+from ..Provider.openai.har_file import NoValidHarFileError, get_headers, get_har_files
from ..requests import get_nodriver
from ..image import ImageResponse, to_bytes, is_accepted_format
from ..cookies import get_cookies_dir
@@ -188,16 +188,9 @@ async def get_access_token_and_cookies(cls, proxy: str = None):
return access_token, cookies
def readHAR():
- harPath = []
- for root, _, files in os.walk(get_cookies_dir()):
- for file in files:
- if file.endswith(".har"):
- harPath.append(os.path.join(root, file))
- if not harPath:
- raise NoValidHarFileError("No .har file found")
api_key = None
cookies = None
- for path in harPath:
+ for path in get_har_files():
with open(path, 'rb') as file:
try:
harFile = json.loads(file.read())
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 37bdf0742c8..392d4d5da62 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -424,10 +424,10 @@ async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: C
for element in c.get("parts"):
if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer":
image = cls.get_generated_image(session, cls._headers, element)
- if image is not None:
- generated_images.append(image)
+ generated_images.append(image)
for image_response in await asyncio.gather(*generated_images):
- yield image_response
+ if image_response is not None:
+ yield image_response
if m.get("author", {}).get("role") == "assistant":
fields.message_id = v.get("message", {}).get("id")
return
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
index e863b6acf77..819952cda14 100644
--- a/g4f/Provider/openai/har_file.py
+++ b/g4f/Provider/openai/har_file.py
@@ -45,7 +45,7 @@ def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):
self.arkCookies = arkCookies
self.userAgent = userAgent
-def readHAR():
+def get_har_files():
harPath = []
for root, _, files in os.walk(get_cookies_dir()):
for file in files:
@@ -53,7 +53,11 @@ def readHAR():
harPath.append(os.path.join(root, file))
if not harPath:
raise NoValidHarFileError("No .har file found")
- for path in harPath:
+ harPath.sort(key=lambda x: os.path.getmtime(x))
+ return harPath
+
+def readHAR():
+ for path in get_har_files():
with open(path, 'rb') as file:
try:
harFile = json.loads(file.read())
diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py
index 86a810493b9..0b01fe0ff6a 100644
--- a/g4f/client/__init__.py
+++ b/g4f/client/__init__.py
@@ -8,7 +8,6 @@
import base64
from typing import Union, AsyncIterator, Iterator, Coroutine, Optional
-from ..providers.base_provider import AsyncGeneratorProvider
from ..image import ImageResponse, copy_images, images_dir
from ..typing import Messages, Image, ImageType
from ..providers.types import ProviderType
@@ -58,7 +57,7 @@ def iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
- elif isinstance(chunk, SynthesizeData):
+ elif isinstance(chunk, SynthesizeData) or chunk is None:
continue
chunk = str(chunk)
@@ -121,7 +120,7 @@ async def async_iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
- elif isinstance(chunk, SynthesizeData):
+ elif isinstance(chunk, SynthesizeData) or chunk is None:
continue
chunk = str(chunk)
@@ -292,6 +291,7 @@ async def async_generate(
**kwargs
) -> ImagesResponse:
provider_handler = await self.get_provider_handler(model, provider, BingCreateImages)
+ provider_name = provider.__name__ if hasattr(provider, "__name__") else type(provider).__name__
if proxy is None:
proxy = self.client.proxy
@@ -317,17 +317,17 @@ async def async_generate(
response = item
break
else:
- raise ValueError(f"Provider {getattr(provider_handler, '__name__')} does not support image generation")
+ raise ValueError(f"Provider {provider_name} does not support image generation")
if isinstance(response, ImageResponse):
return await self._process_image_response(
response,
response_format,
proxy,
model,
- getattr(provider_handler, "__name__", None)
+ provider_name
)
if response is None:
- raise NoImageResponseError(f"No image response from {getattr(provider_handler, '__name__')}")
+ raise NoImageResponseError(f"No image response from {provider_name}")
raise NoImageResponseError(f"Unexpected response type: {type(response)}")
def create_variation(
@@ -352,6 +352,7 @@ async def async_create_variation(
**kwargs
) -> ImagesResponse:
provider_handler = await self.get_provider_handler(model, provider, OpenaiAccount)
+ provider_name = provider.__name__ if hasattr(provider, "__name__") else type(provider).__name__
if proxy is None:
proxy = self.client.proxy
@@ -372,14 +373,14 @@ async def async_create_variation(
else:
response = provider_handler.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
else:
- raise NoImageResponseError(f"Provider {provider} does not support image variation")
-
+ raise NoImageResponseError(f"Provider {provider_name} does not support image variation")
+
if isinstance(response, str):
response = ImageResponse([response])
if isinstance(response, ImageResponse):
- return self._process_image_response(response, response_format, proxy, model, getattr(provider, "__name__", None))
+ return self._process_image_response(response, response_format, proxy, model, provider_name)
if response is None:
- raise NoImageResponseError(f"No image response from {getattr(provider, '__name__')}")
+ raise NoImageResponseError(f"No image response from {provider_name}")
raise NoImageResponseError(f"Unexpected response type: {type(response)}")
async def _process_image_response(
diff --git a/g4f/client/service.py b/g4f/client/service.py
index 44533ece9d3..7fd089d43a2 100644
--- a/g4f/client/service.py
+++ b/g4f/client/service.py
@@ -61,18 +61,26 @@ def get_model_and_provider(model : Union[Model, str],
if not provider:
if not model:
model = default
+ provider = model.best_provider
elif isinstance(model, str):
- raise ModelNotFoundError(f'Model not found: {model}')
- provider = model.best_provider
+ if model in ProviderUtils.convert:
+ provider = ProviderUtils.convert[model]
+ model = provider.default_model if hasattr(provider, "default_model") else ""
+ else:
+ raise ModelNotFoundError(f'Model not found: {model}')
+ else:
+ provider = model.best_provider
if not provider:
raise ProviderNotFoundError(f'No provider found for model: {model}')
+ provider_name = provider.__name__ if hasattr(provider, "__name__") else type(provider).__name__
+
if isinstance(model, Model):
model = model.name
if not ignore_working and not provider.working:
- raise ProviderNotWorkingError(f'{provider.__name__} is not working')
+ raise ProviderNotWorkingError(f"{provider_name} is not working")
if isinstance(provider, BaseRetryProvider):
if not ignore_working:
@@ -81,12 +89,12 @@ def get_model_and_provider(model : Union[Model, str],
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
if not ignore_stream and not provider.supports_stream and stream:
- raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
+ raise StreamNotSupportedError(f'{provider_name} does not support "stream" argument')
if model:
- debug.log(f'Using {provider.__name__} provider and {model} model')
+ debug.log(f'Using {provider_name} provider and {model} model')
else:
- debug.log(f'Using {provider.__name__} provider')
+ debug.log(f'Using {provider_name} provider')
debug.last_provider = provider
debug.last_model = model
@@ -109,7 +117,7 @@ def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, st
if as_dict:
if last:
return {
- "name": last.__name__,
+ "name": last.__name__ if hasattr(last, "__name__") else type(last).__name__,
"url": last.url,
"model": debug.last_model,
"label": getattr(last, "label", None) if hasattr(last, "label") else None
diff --git a/g4f/client/stubs.py b/g4f/client/stubs.py
index 7367ac75d42..cc137b08cf5 100644
--- a/g4f/client/stubs.py
+++ b/g4f/client/stubs.py
@@ -147,4 +147,4 @@ def model_construct(cls, data: List[Image], created: int = None, model: str = No
model=model,
provider=provider,
created=created
- )
+ )
\ No newline at end of file
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index 977a8908a21..144de7c31a7 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -477,6 +477,7 @@ body.white .gradient{
right: 8px;
top: 8px;
z-index: 1000;
+ cursor: pointer;
}
.count_total {
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 0bf49ac31d5..a1975dd0118 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -1327,6 +1327,8 @@ async function on_api() {
}
async function load_version() {
+ let new_version = document.querySelector(".new_version");
+ if (new_version) return;
const versions = await api("version");
document.title = 'g4f - ' + versions["version"];
let text = "version ~ "
@@ -1334,7 +1336,7 @@ async function load_version() {
let release_url = 'https://github.com/xtekky/gpt4free/releases/latest';
let title = `New version: ${versions["latest_version"]}`;
text += `${versions["version"]} 🆕`;
- const new_version = document.createElement("div");
+ new_version = document.createElement("div");
new_version.classList.add("new_version");
const link = `v${versions["latest_version"]}`;
new_version.innerHTML = `g4f ${link} 🆕`;
@@ -1344,6 +1346,7 @@ async function load_version() {
text += versions["version"];
}
document.getElementById("version_text").innerHTML = text
+ setTimeout(load_version, 1000 * 60 * 60); // 1 hour
}
setTimeout(load_version, 100);
diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py
index 2e576d1900b..ded13866308 100644
--- a/g4f/requests/__init__.py
+++ b/g4f/requests/__init__.py
@@ -123,6 +123,7 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
timeout=timeout,
impersonate="chrome"
)
+
def get_cookie_params_from_dict(cookies: Cookies, url: str = None, domain: str = None) -> list[CookieParam]:
[CookieParam.from_json({
"name": key,
@@ -172,12 +173,12 @@ def merge_cookies(cookies: Iterator[Morsel], response: Response) -> Cookies:
cookies = {}
for cookie in response.cookies.jar:
cookies[cookie.name] = cookie.value
-
+
async def get_nodriver(proxy: str = None, **kwargs)-> Browser:
if not has_nodriver:
raise MissingRequirementsError('Install "nodriver" package | pip install -U nodriver')
user_data_dir = user_config_dir("g4f-nodriver") if has_platformdirs else None
- debug.log(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
+ debug.log(f"Open nodriver with user_dir: {user_data_dir}")
return await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
diff --git a/g4f/version.py b/g4f/version.py
index 403ce370496..3cde30c7e9f 100644
--- a/g4f/version.py
+++ b/g4f/version.py
@@ -88,7 +88,7 @@ def current_version(self) -> str:
raise VersionNotFoundError("Version not found")
- @cached_property
+ @property
def latest_version(self) -> str:
"""
Retrieves the latest version of the 'g4f' package.
diff --git a/requirements-slim.txt b/requirements-slim.txt
index a7e105db282..1ca3bd3e298 100644
--- a/requirements-slim.txt
+++ b/requirements-slim.txt
@@ -3,6 +3,7 @@ pycryptodome
curl_cffi>=0.6.2
aiohttp
certifi
+duckduckgo-search>=6.3.7
nest_asyncio
werkzeug
pillow