Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add duckduckgo-search to slim requirements, #2432

Merged
merged 3 commits into from
Nov 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/async_client.md
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ client = AsyncClient(provider=g4f.Provider.OpenaiChat)

response = await client.chat.completions.create(
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
model="gpt-4",
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
provider=g4f.Provider.Bing,
provider=g4f.Provider.CopilotAccount,
messages=[
{
"role": "user",
Expand Down
4 changes: 2 additions & 2 deletions docs/legacy.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import g4f

g4f.debug.logging = True # Enable debug logging
g4f.debug.version_check = False # Disable automatic version checking
print(g4f.Provider.Bing.params) # Print supported args for Bing
print(g4f.Provider.Gemini.params) # Print supported args for Bing
hlohaus marked this conversation as resolved.
Show resolved Hide resolved

# Using automatic a provider for the given model
## Streamed completion
Expand Down Expand Up @@ -78,7 +78,7 @@ for message in response:

Image upload and generation are supported by three main providers:

- **Bing & Other GPT-4 Providers:** Utilizes Microsoft's Image Creator.
- **Microsoft Copilot & Other GPT-4 Providers:** Utilizes Microsoft's Image Creator.
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
- **Google Gemini:** Available for free accounts with IP addresses outside Europe.
- **OpenaiChat with GPT-4:** Accessible for users with a Plus subscription.

Expand Down
35 changes: 34 additions & 1 deletion etc/unittest/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@

import unittest

hlohaus marked this conversation as resolved.
Show resolved Hide resolved
from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk
from g4f.errors import ModelNotFoundError
from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk, get_model_and_provider
from g4f.Provider.Copilot import Copilot
from g4f.models import gpt_4o
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock

DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
Expand Down Expand Up @@ -104,5 +107,35 @@ def test_stop(self):
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("How are you?", response.choices[0].message.content)

def test_model_not_found(self):
def run_exception():
client = Client()
client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
self.assertRaises(ModelNotFoundError, run_exception)

def test_best_provider(self):
not_default_model = "gpt-4o"
model, provider = get_model_and_provider(not_default_model, None, False)
self.assertTrue(hasattr(provider, "create_completion"))
self.assertEqual(model, not_default_model)

def test_default_model(self):
default_model = ""
model, provider = get_model_and_provider(default_model, None, False)
self.assertTrue(hasattr(provider, "create_completion"))
self.assertEqual(model, default_model)

def test_provider_as_model(self):
provider_as_model = Copilot.__name__
model, provider = get_model_and_provider(provider_as_model, None, False)
self.assertTrue(hasattr(provider, "create_completion"))
self.assertIsInstance(model, str)
self.assertEqual(model, Copilot.default_model)

def test_get_model(self):
model, provider = get_model_and_provider(gpt_4o.name, None, False)
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
self.assertTrue(hasattr(provider, "create_completion"))
self.assertEqual(model, gpt_4o.name)

if __name__ == '__main__':
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
unittest.main()
11 changes: 10 additions & 1 deletion etc/unittest/main.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import unittest
import asyncio

import g4f
from g4f import ChatCompletion, get_last_provider
from g4f.errors import VersionNotFoundError
from g4f.Provider import RetryProvider
from .mocks import ProviderMock

Expand All @@ -27,4 +29,11 @@ def test_get_last_provider_as_dict(self):
last_provider_dict = get_last_provider(True)
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
self.assertIsInstance(last_provider_dict, dict)
self.assertIn('name', last_provider_dict)
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
self.assertEqual(ProviderMock.__name__, last_provider_dict['name'])
self.assertEqual(ProviderMock.__name__, last_provider_dict['name'])

def test_get_latest_version(self):
try:
self.assertIsInstance(g4f.version.utils.current_version, str)
except VersionNotFoundError:
pass
self.assertIsInstance(g4f.version.utils.latest_version, str)
1 change: 1 addition & 0 deletions g4f/Provider/ChatGpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ def create_completion(

response = session.post('https://chatgpt.com/backend-anon/conversation',
headers=headers, json=json_data, stream=True)
response.raise_for_status()

replace = ''
for line in response.iter_lines():
Expand Down
11 changes: 2 additions & 9 deletions g4f/Provider/Copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from ..errors import MissingRequirementsError
from ..requests.raise_for_status import raise_for_status
from ..providers.asyncio import get_running_loop
from ..Provider.openai.har_file import NoValidHarFileError, get_headers
from ..Provider.openai.har_file import NoValidHarFileError, get_headers, get_har_files
from ..requests import get_nodriver
from ..image import ImageResponse, to_bytes, is_accepted_format
from ..cookies import get_cookies_dir
Expand Down Expand Up @@ -188,16 +188,9 @@ async def get_access_token_and_cookies(cls, proxy: str = None):
return access_token, cookies

def readHAR():
harPath = []
for root, _, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
if not harPath:
raise NoValidHarFileError("No .har file found")
api_key = None
cookies = None
for path in harPath:
for path in get_har_files():
with open(path, 'rb') as file:
try:
harFile = json.loads(file.read())
Expand Down
6 changes: 3 additions & 3 deletions g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,10 +424,10 @@ async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: C
for element in c.get("parts"):
if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer":
image = cls.get_generated_image(session, cls._headers, element)
if image is not None:
generated_images.append(image)
generated_images.append(image)
for image_response in await asyncio.gather(*generated_images):
yield image_response
if image_response is not None:
yield image_response
if m.get("author", {}).get("role") == "assistant":
fields.message_id = v.get("message", {}).get("id")
return
Expand Down
8 changes: 6 additions & 2 deletions g4f/Provider/openai/har_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,19 @@ def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):
self.arkCookies = arkCookies
self.userAgent = userAgent

def readHAR():
def get_har_files():
harPath = []
for root, _, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
if not harPath:
raise NoValidHarFileError("No .har file found")
for path in harPath:
harPath.sort(key=lambda x: os.path.getmtime(x))
return harPath

def readHAR():
for path in get_har_files():
with open(path, 'rb') as file:
try:
harFile = json.loads(file.read())
Expand Down
21 changes: 11 additions & 10 deletions g4f/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import base64
from typing import Union, AsyncIterator, Iterator, Coroutine, Optional

from ..providers.base_provider import AsyncGeneratorProvider
from ..image import ImageResponse, copy_images, images_dir
from ..typing import Messages, Image, ImageType
from ..providers.types import ProviderType
Expand Down Expand Up @@ -58,7 +57,7 @@ def iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
elif isinstance(chunk, SynthesizeData):
elif isinstance(chunk, SynthesizeData) or chunk is None:
continue

chunk = str(chunk)
Expand Down Expand Up @@ -121,7 +120,7 @@ async def async_iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
elif isinstance(chunk, SynthesizeData):
elif isinstance(chunk, SynthesizeData) or chunk is None:
continue

chunk = str(chunk)
Expand Down Expand Up @@ -292,6 +291,7 @@ async def async_generate(
**kwargs
) -> ImagesResponse:
provider_handler = await self.get_provider_handler(model, provider, BingCreateImages)
provider_name = provider.__name__ if hasattr(provider, "__name__") else type(provider).__name__
if proxy is None:
proxy = self.client.proxy

Expand All @@ -317,17 +317,17 @@ async def async_generate(
response = item
break
else:
raise ValueError(f"Provider {getattr(provider_handler, '__name__')} does not support image generation")
raise ValueError(f"Provider {provider_name} does not support image generation")
if isinstance(response, ImageResponse):
return await self._process_image_response(
response,
response_format,
proxy,
model,
getattr(provider_handler, "__name__", None)
provider_name
)
if response is None:
raise NoImageResponseError(f"No image response from {getattr(provider_handler, '__name__')}")
raise NoImageResponseError(f"No image response from {provider_name}")
raise NoImageResponseError(f"Unexpected response type: {type(response)}")

def create_variation(
Expand All @@ -352,6 +352,7 @@ async def async_create_variation(
**kwargs
) -> ImagesResponse:
provider_handler = await self.get_provider_handler(model, provider, OpenaiAccount)
provider_name = provider.__name__ if hasattr(provider, "__name__") else type(provider).__name__
if proxy is None:
proxy = self.client.proxy

Expand All @@ -372,14 +373,14 @@ async def async_create_variation(
else:
response = provider_handler.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
else:
raise NoImageResponseError(f"Provider {provider} does not support image variation")
raise NoImageResponseError(f"Provider {provider_name} does not support image variation")

if isinstance(response, str):
response = ImageResponse([response])
if isinstance(response, ImageResponse):
return self._process_image_response(response, response_format, proxy, model, getattr(provider, "__name__", None))
return self._process_image_response(response, response_format, proxy, model, provider_name)
if response is None:
raise NoImageResponseError(f"No image response from {getattr(provider, '__name__')}")
raise NoImageResponseError(f"No image response from {provider_name}")
raise NoImageResponseError(f"Unexpected response type: {type(response)}")

async def _process_image_response(
Expand Down
22 changes: 15 additions & 7 deletions g4f/client/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,18 +61,26 @@ def get_model_and_provider(model : Union[Model, str],
if not provider:
if not model:
model = default
provider = model.best_provider
elif isinstance(model, str):
raise ModelNotFoundError(f'Model not found: {model}')
provider = model.best_provider
if model in ProviderUtils.convert:
provider = ProviderUtils.convert[model]
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
model = provider.default_model if hasattr(provider, "default_model") else ""
else:
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
raise ModelNotFoundError(f'Model not found: {model}')
else:
provider = model.best_provider
hlohaus marked this conversation as resolved.
Show resolved Hide resolved

if not provider:
raise ProviderNotFoundError(f'No provider found for model: {model}')

provider_name = provider.__name__ if hasattr(provider, "__name__") else type(provider).__name__

if isinstance(model, Model):
model = model.name

if not ignore_working and not provider.working:
raise ProviderNotWorkingError(f'{provider.__name__} is not working')
raise ProviderNotWorkingError(f"{provider_name} is not working")

if isinstance(provider, BaseRetryProvider):
if not ignore_working:
Expand All @@ -81,12 +89,12 @@ def get_model_and_provider(model : Union[Model, str],
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]

if not ignore_stream and not provider.supports_stream and stream:
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
raise StreamNotSupportedError(f'{provider_name} does not support "stream" argument')

if model:
debug.log(f'Using {provider.__name__} provider and {model} model')
debug.log(f'Using {provider_name} provider and {model} model')
else:
debug.log(f'Using {provider.__name__} provider')
debug.log(f'Using {provider_name} provider')

debug.last_provider = provider
debug.last_model = model
Expand All @@ -109,7 +117,7 @@ def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, st
if as_dict:
if last:
return {
"name": last.__name__,
"name": last.__name__ if hasattr(last, "__name__") else type(last).__name__,
"url": last.url,
"model": debug.last_model,
"label": getattr(last, "label", None) if hasattr(last, "label") else None
Expand Down
2 changes: 1 addition & 1 deletion g4f/client/stubs.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,4 +147,4 @@ def model_construct(cls, data: List[Image], created: int = None, model: str = No
model=model,
provider=provider,
created=created
)
)
1 change: 1 addition & 0 deletions g4f/gui/client/static/css/style.css
Original file line number Diff line number Diff line change
Expand Up @@ -477,6 +477,7 @@ body.white .gradient{
right: 8px;
top: 8px;
z-index: 1000;
cursor: pointer;
}

.count_total {
Expand Down
5 changes: 4 additions & 1 deletion g4f/gui/client/static/js/chat.v1.js
Original file line number Diff line number Diff line change
Expand Up @@ -1327,14 +1327,16 @@ async function on_api() {
}

async function load_version() {
let new_version = document.querySelector(".new_version");
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
if (new_version) return;
const versions = await api("version");
document.title = 'g4f - ' + versions["version"];
let text = "version ~ "
if (versions["version"] != versions["latest_version"]) {
let release_url = 'https://github.com/xtekky/gpt4free/releases/latest';
let title = `New version: ${versions["latest_version"]}`;
text += `<a href="${release_url}" target="_blank" title="${title}">${versions["version"]}</a> 🆕`;
const new_version = document.createElement("div");
new_version = document.createElement("div");
new_version.classList.add("new_version");
const link = `<a href="${release_url}" target="_blank" title="${title}">v${versions["latest_version"]}</a>`;
new_version.innerHTML = `g4f ${link}&nbsp;&nbsp;🆕`;
Expand All @@ -1344,6 +1346,7 @@ async function load_version() {
text += versions["version"];
}
document.getElementById("version_text").innerHTML = text
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
setTimeout(load_version, 1000 * 60 * 60); // 1 hour
}
setTimeout(load_version, 100);

Expand Down
5 changes: 3 additions & 2 deletions g4f/requests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
timeout=timeout,
impersonate="chrome"
)

def get_cookie_params_from_dict(cookies: Cookies, url: str = None, domain: str = None) -> list[CookieParam]:
[CookieParam.from_json({
"name": key,
Expand Down Expand Up @@ -172,12 +173,12 @@ def merge_cookies(cookies: Iterator[Morsel], response: Response) -> Cookies:
cookies = {}
for cookie in response.cookies.jar:
cookies[cookie.name] = cookie.value

async def get_nodriver(proxy: str = None, **kwargs)-> Browser:
if not has_nodriver:
raise MissingRequirementsError('Install "nodriver" package | pip install -U nodriver')
user_data_dir = user_config_dir("g4f-nodriver") if has_platformdirs else None
debug.log(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
debug.log(f"Open nodriver with user_dir: {user_data_dir}")
return await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
Expand Down
2 changes: 1 addition & 1 deletion g4f/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def current_version(self) -> str:

raise VersionNotFoundError("Version not found")

@cached_property
@property
hlohaus marked this conversation as resolved.
Show resolved Hide resolved
def latest_version(self) -> str:
"""
Retrieves the latest version of the 'g4f' package.
Expand Down
1 change: 1 addition & 0 deletions requirements-slim.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ pycryptodome
curl_cffi>=0.6.2
aiohttp
certifi
duckduckgo-search>=6.3.7
nest_asyncio
werkzeug
pillow
Expand Down