diff --git a/README.md b/README.md index 058a605b98e..6a1df499aef 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ > By using this repository or any code related to it, you agree to the [legal notice](LEGAL_NOTICE.md). The author is not responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses. > [!Note] -> Latest pypi version: [`0.1.8.2`](https://pypi.org/project/g4f/0.1.8.2) +> Latest pypi version: [`0.1.8.7`](https://pypi.org/project/g4f/0.1.8.7) ```sh pip install -U g4f ``` @@ -243,6 +243,41 @@ for message in response: print(message) ``` +##### Using Browser + +Some providers using a browser to bypass the bot protection. +They using the selenium webdriver to control the browser. +The browser settings and the login data are saved in a custom directory. +If the headless mode is enabled, the browser windows are loaded invisibly. +For performance reasons, it is recommended to reuse the browser instances +and close them yourself at the end: + +```python +import g4f +from undetected_chromedriver import Chrome, ChromeOptions +from g4f.Provider import ( + Bard, + Poe, + AItianhuSpace, + MyShell, + Phind, + PerplexityAi, +) + +options = ChromeOptions() +options.add_argument("--incognito"); +browser = Chrome(options=options, headless=True) +for idx in range(10): + response = g4f.ChatCompletion.create( + model=g4f.models.default, + provider=g4f.Provider.Phind, + messages=[{"role": "user", "content": "Suggest me a name."}], + browser=browser + ) + print(f"{idx}:", response) +browser.quit() +``` + ##### Cookies Required Cookies are essential for the proper functioning of some service providers. It is imperative to maintain an active session, typically achieved by logging into your account. @@ -253,18 +288,16 @@ When running the g4f package locally, the package automatically retrieves cookie import g4f from g4f.Provider import ( - Bard, Bing, HuggingChat, OpenAssistant, - OpenaiChat, ) # Usage response = g4f.ChatCompletion.create( model=g4f.models.default, messages=[{"role": "user", "content": "Hello"}], - provider=Bard, + provider=Bing, #cookies=g4f.get_cookies(".google.com"), cookies={"cookie_name": "value", "cookie_name2": "value2"}, auth=True @@ -349,7 +382,7 @@ g4f api or ```sh -python -m g4f.api +python -m g4f.api.run ``` ```python diff --git a/etc/tool/create_provider.py b/etc/tool/create_provider.py index 310e325eec5..ff04f9619e9 100644 --- a/etc/tool/create_provider.py +++ b/etc/tool/create_provider.py @@ -38,9 +38,9 @@ def input_command(): class ChatGpt(AsyncGeneratorProvider): - url = "https://chat-gpt.com" + url = "https://chat-gpt.com" + working = True supports_gpt_35_turbo = True - working = True @classmethod async def create_async_generator( diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py index 2fdcfcde217..fcf9a4fb8a4 100644 --- a/g4f/Provider/AItianhu.py +++ b/g4f/Provider/AItianhu.py @@ -1,7 +1,6 @@ from __future__ import annotations import json -import browser_cookie3 from ..typing import AsyncResult, Messages from ..requests import StreamSession @@ -10,7 +9,7 @@ class AItianhu(AsyncGeneratorProvider): url = "https://www.aitianhu.com" - working = True + working = False supports_gpt_35_turbo = True @classmethod @@ -72,12 +71,11 @@ async def create_async_generator( if "detail" not in line: raise RuntimeError(f"Response: {line}") - if content := line["detail"]["choices"][0]["delta"].get( - "content" - ): + + content = line["detail"]["choices"][0]["delta"].get("content") + if content: yield content - @classmethod @property def params(cls): diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py index d316fc6fd0d..fabe6b475e5 100644 --- a/g4f/Provider/AItianhuSpace.py +++ b/g4f/Provider/AItianhuSpace.py @@ -1,95 +1,116 @@ from __future__ import annotations -import random, json -from .. import debug -from ..typing import AsyncResult, Messages -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies +import time +import random -domains = { - "gpt-3.5-turbo": "aitianhu.space", - "gpt-4": "aitianhu.website", -} +from ..typing import CreateResult, Messages +from .base_provider import BaseProvider +from .helper import WebDriver, WebDriverSession, format_prompt, get_random_string +from .. import debug -class AItianhuSpace(AsyncGeneratorProvider): +class AItianhuSpace(BaseProvider): url = "https://chat3.aiyunos.top/" working = True + supports_stream = True supports_gpt_35_turbo = True + _domains = ["aitianhu.com", "aitianhu1.top"] @classmethod - async def create_async_generator(cls, - model: str, - messages: Messages, - proxy: str = None, - domain: str = None, - cookies: dict = None, - timeout: int = 10, **kwargs) -> AsyncResult: - + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + domain: str = None, + proxy: str = None, + timeout: int = 120, + web_driver: WebDriver = None, + headless: bool = True, + **kwargs + ) -> CreateResult: if not model: model = "gpt-3.5-turbo" - - elif model not in domains: - raise ValueError(f"Model are not supported: {model}") - if not domain: - chars = 'abcdefghijklmnopqrstuvwxyz0123456789' - rand = ''.join(random.choice(chars) for _ in range(6)) - domain = f"{rand}.{domains[model]}" - + rand = get_random_string(6) + domain = random.choice(cls._domains) + domain = f"{rand}.{domain}" if debug.logging: print(f"AItianhuSpace | using domain: {domain}") + url = f"https://{domain}" + prompt = format_prompt(messages) - if not cookies: - cookies = get_cookies('.aitianhu.space') - if not cookies: - raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://{domain} on chrome]") + with WebDriverSession(web_driver, "", headless=headless, proxy=proxy) as driver: + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC - url = f'https://{domain}' - async with StreamSession(proxies={"https": proxy}, - cookies=cookies, timeout=timeout, impersonate="chrome110", verify=False) as session: - - data = { - "prompt": format_prompt(messages), - "options": {}, - "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - "temperature": 0.8, - "top_p": 1, - **kwargs - } - headers = { - "Authority": url, - "Accept": "application/json, text/plain, */*", - "Origin": url, - "Referer": f"{url}/" - } - async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response: - response.raise_for_status() - async for line in response.iter_lines(): - if line == b"