(.*?)<\/div>',
response,
- ):
+ )
+
+ if result:
user_id = result.group(1)
else:
raise RuntimeError("No user id found")
@@ -59,5 +62,7 @@ async def create_async_generator(
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:-1])
- if chunk := line["choices"][0]["delta"].get("content"):
+
+ chunk = line["choices"][0]["delta"].get("content")
+ if chunk:
yield chunk
\ No newline at end of file
diff --git a/g4f/Provider/ChatgptDemoAi.py b/g4f/Provider/ChatgptDemoAi.py
index 90d0c096f24..a8c98b65134 100644
--- a/g4f/Provider/ChatgptDemoAi.py
+++ b/g4f/Provider/ChatgptDemoAi.py
@@ -9,7 +9,7 @@
class ChatgptDemoAi(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.ai"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_message_history = True
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py
index 48d6c396611..b9b2544762b 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/ChatgptFree.py
@@ -65,7 +65,8 @@ async def create_async(
raise RuntimeError("No post id found")
cls._post_id = result.group(1)
- if result := re.search(r'data-nonce="(.*?)"', response):
+ result = re.search(r'data-nonce="(.*?)"', response)
+ if result:
cls._nonce = result.group(1)
else:
diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py
index 206e4a89f90..037e0a6ede9 100644
--- a/g4f/Provider/ChatgptLogin.py
+++ b/g4f/Provider/ChatgptLogin.py
@@ -45,10 +45,12 @@ async def create_async_generator(
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
- if result := re.search(
+ result = re.search(
r'
(.*?)<\/div>',
response,
- ):
+ )
+
+ if result:
cls._user_id = result.group(1)
else:
raise RuntimeError("No user id found")
@@ -67,9 +69,10 @@ async def create_async_generator(
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
- if content := json.loads(line[6:])["choices"][0][
- "delta"
- ].get("content"):
+
+ content = json.loads(line[6:])["choices"][0]["delta"].get("content")
+ if content:
yield content
+
async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response:
response.raise_for_status()
\ No newline at end of file
diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py
index c4e7209912e..c8b9375ac26 100644
--- a/g4f/Provider/ChatgptX.py
+++ b/g4f/Provider/ChatgptX.py
@@ -12,7 +12,7 @@
class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de"
supports_gpt_35_turbo = True
- working = True
+ working = False
@classmethod
async def create_async_generator(
@@ -35,15 +35,21 @@ async def create_async_generator(
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response = await response.text()
- if result := re.search(
+
+ result = re.search(
r'
AsyncResult:
- if not model:
- model = "meta-llama/Llama-2-70b-chat-hf"
+ @staticmethod
+ def create_completion(model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs) -> CreateResult:
+
headers = {
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
- "Accept": "text/event-stream",
- "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
- "Accept-Encoding": "gzip, deflate, br",
- "Referer": f"{cls.url}/",
- "Content-Type": "application/json",
- "X-Deepinfra-Source": "web-page",
- "Origin": cls.url,
- "Connection": "keep-alive",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-site",
- "Pragma": "no-cache",
- "Cache-Control": "no-cache",
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Content-Type': 'application/json',
+ 'Origin': 'https://deepinfra.com',
+ 'Pragma': 'no-cache',
+ 'Referer': 'https://deepinfra.com/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
+ 'X-Deepinfra-Source': 'web-embed',
+ 'accept': 'text/event-stream',
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
}
- async with ClientSession(headers=headers) as session:
- data = {
- "model": model,
- "messages": messages,
- "stream": True,
- }
- async with session.post(
- "https://api.deepinfra.com/v1/openai/chat/completions",
- json=data,
- proxy=proxy
- ) as response:
- response.raise_for_status()
- first = True
- async for line in response.content:
- if line.startswith(b"data: [DONE]"):
- break
- elif line.startswith(b"data: "):
- chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
+
+ json_data = json.dumps({
+ 'model' : 'meta-llama/Llama-2-70b-chat-hf',
+ 'messages': messages,
+ 'stream' : True}, separators=(',', ':'))
+
+ response = requests.post('https://api.deepinfra.com/v1/openai/chat/completions',
+ headers=headers, data=json_data, stream=True)
+
+ response.raise_for_status()
+ first = True
+
+ for line in response.iter_content(chunk_size=1024):
+ if line.startswith(b"data: [DONE]"):
+ break
+
+ elif line.startswith(b"data: "):
+ chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
+
+ if chunk:
+ if first:
+ chunk = chunk.lstrip()
if chunk:
- if first:
- chunk = chunk.lstrip()
- if chunk:
- first = False
- yield chunk
\ No newline at end of file
+ first = False
+
+ yield (chunk)
\ No newline at end of file
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 22c6c9aa435..15232c8df60 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -47,16 +47,6 @@ async def create_async_generator(
raise RuntimeError("Rate limit reached")
yield chunk
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = ""):
data = f"{timestamp}:{message}:{secret}"
diff --git a/g4f/Provider/GeekGpt.py b/g4f/Provider/GeekGpt.py
index 8c449745058..9ed9c09b981 100644
--- a/g4f/Provider/GeekGpt.py
+++ b/g4f/Provider/GeekGpt.py
@@ -70,16 +70,4 @@ def create_completion(
raise RuntimeError(f'error | {e} :', json_data)
if content:
- yield content
-
- @classmethod
- @property
- def params(cls):
- params = [
- ('model', 'str'),
- ('messages', 'list[dict[str, str]]'),
- ('stream', 'bool'),
- ('temperature', 'float'),
- ]
- param = ', '.join([': '.join(p) for p in params])
- return f'g4f.provider.{cls.__name__} supports: ({param})'
+ yield content
\ No newline at end of file
diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py
index ac3f7fe8946..538bb7b63a9 100644
--- a/g4f/Provider/GptGo.py
+++ b/g4f/Provider/GptGo.py
@@ -2,6 +2,7 @@
from aiohttp import ClientSession
import json
+import base64
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -23,9 +24,12 @@ async def create_async_generator(
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
- "Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "Accept-language": "en-US",
"Origin": cls.url,
"Referer": f"{cls.url}/",
+ "sec-ch-ua": '"Google Chrome";v="116", "Chromium";v="116", "Not?A_Brand";v="24"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Windows"',
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
@@ -33,50 +37,26 @@ async def create_async_generator(
async with ClientSession(
headers=headers
) as session:
- async with session.get(
- "https://gptgo.ai/action_get_token.php",
- params={
- "q": format_prompt(messages),
- "hlgpt": "default",
- "hl": "en"
- },
+ async with session.post(
+ "https://gptgo.ai/get_token.php",
+ data={"ask": format_prompt(messages)},
proxy=proxy
) as response:
response.raise_for_status()
- token = (await response.json(content_type=None))["token"]
+ token = await response.text();
+ token = base64.b64decode(token[10:-20]).decode()
async with session.get(
- "https://gptgo.ai/action_ai_gpt.php",
- params={
- "token": token,
- },
- proxy=proxy
- ) as response:
+ "https://api.gptgo.ai/web.php",
+ params={"array_chat": token},
+ proxy=proxy
+ ) as response:
response.raise_for_status()
- start = "data: "
async for line in response.content:
- line = line.decode()
- if line.startswith("data: "):
- if line.startswith("data: [DONE]"):
- break
- line = json.loads(line[len(start):-1])
- if line["choices"][0]["finish_reason"] == "stop":
- break
-
- content = line["choices"][0]["delta"].get("content"):
- if content:
+ if line.startswith(b"data: [DONE]"):
+ break
+ if line.startswith(b"data: "):
+ line = json.loads(line[6:])
+ content = line["choices"][0]["delta"].get("content")
+ if content and content != "\n#GPTGO ":
yield content
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
diff --git a/g4f/Provider/GptGod.py b/g4f/Provider/GptGod.py
index a10a391dba9..08d9269e511 100644
--- a/g4f/Provider/GptGod.py
+++ b/g4f/Provider/GptGod.py
@@ -47,12 +47,15 @@ async def create_async_generator(
response.raise_for_status()
event = None
async for line in response.content:
- print(line)
+ # print(line)
if line.startswith(b'event: '):
event = line[7:-1]
+
elif event == b"data" and line.startswith(b"data: "):
- if data := json.loads(line[6:-1]):
+ data = json.loads(line[6:-1])
+ if data:
yield data
+
elif event == b"done":
break
\ No newline at end of file
diff --git a/g4f/Provider/GptTalkRu.py b/g4f/Provider/GptTalkRu.py
new file mode 100644
index 00000000000..16d69f3c8a4
--- /dev/null
+++ b/g4f/Provider/GptTalkRu.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+
+
+class GptTalkRu(AsyncGeneratorProvider):
+ url = "https://gpttalk.ru"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ headers = {
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "en-US",
+ "Connection": "keep-alive",
+ "Content-Type": "application/json",
+ "Origin": "https://gpttalk.ru",
+ "Referer": "https://gpttalk.ru/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
+ "sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": model,
+ "modelType": 1,
+ "prompt": messages,
+ "responseType": "stream",
+ }
+ async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ yield chunk.decode()
\ No newline at end of file
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 109f7e2d359..807b44247b7 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -97,17 +97,3 @@ async def create_async_generator(
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("auth", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py
index a1c8d33590c..5c9c4fe6f0d 100644
--- a/g4f/Provider/MyShell.py
+++ b/g4f/Provider/MyShell.py
@@ -4,7 +4,8 @@
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession, format_prompt
+from .helper import format_prompt
+from .webdriver import WebDriver, WebDriverSession
class MyShell(BaseProvider):
url = "https://app.myshell.ai/chat"
@@ -20,10 +21,10 @@ def create_completion(
stream: bool,
proxy: str = None,
timeout: int = 120,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
**kwargs
) -> CreateResult:
- with WebDriverSession(web_driver, "", proxy=proxy) as driver:
+ with WebDriverSession(webdriver, "", proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@@ -52,15 +53,16 @@ def create_completion(
"body": '{body}',
"method": "POST"
})
-window.reader = response.body.getReader();
+window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
"""
driver.execute_script(script.replace("{body}", json.dumps(data)))
script = """
-chunk = await window.reader.read();
-if (chunk['done']) return null;
-text = (new TextDecoder()).decode(chunk['value']);
+chunk = await window._reader.read();
+if (chunk['done']) {
+ return null;
+}
content = '';
-text.split('\\n').forEach((line, index) => {
+chunk['value'].split('\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
try {
const data = JSON.parse(line.substring('data: '.length));
diff --git a/g4f/Provider/Opchatgpts.py b/g4f/Provider/Opchatgpts.py
index 8abdf39b30b..8c2987face3 100644
--- a/g4f/Provider/Opchatgpts.py
+++ b/g4f/Provider/Opchatgpts.py
@@ -56,16 +56,4 @@ async def create_async_generator(
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
- break
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ break
\ No newline at end of file
diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py
index c0b2412e2d9..03353a957f4 100644
--- a/g4f/Provider/PerplexityAi.py
+++ b/g4f/Provider/PerplexityAi.py
@@ -4,7 +4,8 @@
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession, format_prompt
+from .helper import format_prompt
+from .webdriver import WebDriver, WebDriverSession
class PerplexityAi(BaseProvider):
url = "https://www.perplexity.ai"
@@ -20,12 +21,12 @@ def create_completion(
stream: bool,
proxy: str = None,
timeout: int = 120,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
virtual_display: bool = True,
copilot: bool = False,
**kwargs
) -> CreateResult:
- with WebDriverSession(web_driver, "", virtual_display=virtual_display, proxy=proxy) as driver:
+ with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py
index 32f6366525a..9d2acc751b3 100644
--- a/g4f/Provider/Phind.py
+++ b/g4f/Provider/Phind.py
@@ -1,103 +1,74 @@
from __future__ import annotations
-import time
-from urllib.parse import quote
+from datetime import datetime
-from ..typing import CreateResult, Messages
-from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession, format_prompt
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from ..requests import StreamSession
-class Phind(BaseProvider):
+class Phind(AsyncGeneratorProvider):
url = "https://www.phind.com"
working = True
supports_gpt_4 = True
supports_stream = True
+ supports_message_history = True
@classmethod
- def create_completion(
+ async def create_async_generator(
cls,
model: str,
messages: Messages,
- stream: bool,
proxy: str = None,
timeout: int = 120,
- web_driver: WebDriver = None,
- creative_mode: bool = None,
+ creative_mode: bool = False,
**kwargs
- ) -> CreateResult:
- with WebDriverSession(web_driver, "", proxy=proxy) as driver:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
-
- prompt = quote(format_prompt(messages))
- driver.get(f"{cls.url}/search?q={prompt}&source=searchbox")
-
- # Register fetch hook
- driver.execute_script("""
-window._fetch = window.fetch;
-window.fetch = (url, options) => {
- // Call parent fetch method
- const result = window._fetch(url, options);
- if (url != "/api/infer/answer") {
- return result;
- }
- // Load response reader
- result.then((response) => {
- if (!response.body.locked) {
- window._reader = response.body.getReader();
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "*/*",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/search",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
}
- });
- // Return dummy response
- return new Promise((resolve, reject) => {
- resolve(new Response(new ReadableStream()))
- });
-}
-""")
-
- # Need to change settings
- if model.startswith("gpt-4") or creative_mode:
- wait = WebDriverWait(driver, timeout)
- # Open settings dropdown
- wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle")))
- driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click()
- # Wait for dropdown toggle
- wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']")))
- # Enable GPT-4
- if model.startswith("gpt-4"):
- driver.find_element(By.XPATH, "//button[text()='GPT-4']").click()
- # Enable creative mode
- if creative_mode or creative_mode == None:
- driver.find_element(By.ID, "Creative Mode").click()
- # Submit changes
- driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click()
- # Wait for page reload
- wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".search-container")))
-
- while True:
- chunk = driver.execute_script("""
-if(window._reader) {
- chunk = await window._reader.read();
- if (chunk['done']) return null;
- text = (new TextDecoder()).decode(chunk['value']);
- content = '';
- text.split('\\r\\n').forEach((line, index) => {
- if (line.startsWith('data: ')) {
- line = line.substring('data: '.length);
- if (!line.startsWith('
')) {
- if (line) content += line;
- else content += '\\n';
+ async with StreamSession(
+ impersonate="chrome110",
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
+ prompt = messages[-1]["content"]
+ data = {
+ "question": prompt,
+ "questionHistory": [
+ message["content"] for message in messages[:-1] if message["role"] == "user"
+ ],
+ "answerHistory": [
+ message["content"] for message in messages if message["role"] == "assistant"
+ ],
+ "webResults": [],
+ "options": {
+ "date": datetime.now().strftime("%d.%m.%Y"),
+ "language": "en-US",
+ "detailed": True,
+ "anonUserId": "",
+ "answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind Model",
+ "creativeMode": creative_mode,
+ "customLinks": []
+ },
+ "context": "",
+ "rewrittenQuestion": prompt
}
- }
- });
- return content.replace('\\n\\n', '\\n');
-} else {
- return ''
-}
-""")
- if chunk:
- yield chunk
- elif chunk != "":
- break
- else:
- time.sleep(0.1)
\ No newline at end of file
+ async with session.post(f"{cls.url}/api/infer/followup/answer", headers=headers, json=data) as response:
+ new_line = False
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk.startswith(b""):
+ pass
+ elif chunk:
+ yield chunk.decode()
+ elif new_line:
+ yield "\n"
+ new_line = False
+ else:
+ new_line = True
\ No newline at end of file
diff --git a/g4f/Provider/TalkAi.py b/g4f/Provider/TalkAi.py
index 20ba65b583d..0edd9f6b7dc 100644
--- a/g4f/Provider/TalkAi.py
+++ b/g4f/Provider/TalkAi.py
@@ -4,7 +4,7 @@
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession
+from .webdriver import WebDriver, WebDriverSession
class TalkAi(BaseProvider):
url = "https://talkai.info"
@@ -19,10 +19,10 @@ def create_completion(
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
**kwargs
) -> CreateResult:
- with WebDriverSession(web_driver, "", virtual_display=True, proxy=proxy) as driver:
+ with WebDriverSession(webdriver, "", virtual_display=True, proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index a7bbc496635..3e2109255a8 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -6,10 +6,9 @@
from .base_provider import BaseProvider
from ..debug import logging
-
class Vercel(BaseProvider):
url = 'https://sdk.vercel.ai'
- working = True
+ working = False
supports_message_history = True
supports_gpt_35_turbo = True
supports_stream = True
diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py
index 136921390b7..11fe497faf0 100644
--- a/g4f/Provider/Ylokh.py
+++ b/g4f/Provider/Ylokh.py
@@ -55,21 +55,4 @@ async def create_async_generator(
yield content
else:
chat = await response.json()
- yield chat["choices"][0]["message"].get("content")
-
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("timeout", "int"),
- ("temperature", "float"),
- ("top_p", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ yield chat["choices"][0]["message"].get("content")
\ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index c214089f3d5..2b47b07120d 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,5 +1,12 @@
from __future__ import annotations
+from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
+from .retry_provider import RetryProvider
+from .deprecated import *
+from .needs_auth import *
+from .unfinished import *
+from .selenium import *
+
from .AiAsk import AiAsk
from .Aichat import Aichat
from .AiChatOnline import AiChatOnline
@@ -26,6 +33,7 @@
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
+from .GptTalkRu import GptTalkRu
from .Hashnode import Hashnode
from .Koala import Koala
from .Liaobots import Liaobots
@@ -43,12 +51,6 @@
from .Yqcloud import Yqcloud
from .GeekGpt import GeekGpt
-from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
-from .retry_provider import RetryProvider
-from .deprecated import *
-from .needs_auth import *
-from .unfinished import *
-
import sys
__modules__: list = [
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 47ea6ff8bda..f3959634024 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,12 +1,17 @@
from __future__ import annotations
-from asyncio import AbstractEventLoop
+import sys
+from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
-from abc import ABC, abstractmethod
-
-from .helper import get_event_loop, get_cookies, format_prompt
-from ..typing import CreateResult, AsyncResult, Messages
+from abc import ABC, abstractmethod
+from inspect import signature, Parameter
+from .helper import get_event_loop, get_cookies, format_prompt
+from ..typing import CreateResult, AsyncResult, Messages
+if sys.version_info < (3, 10):
+ NoneType = type(None)
+else:
+ from types import NoneType
class BaseProvider(ABC):
url: str
@@ -52,17 +57,42 @@ def create_func() -> str:
executor,
create_func
)
-
+
@classmethod
@property
def params(cls) -> str:
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ if issubclass(cls, AsyncGeneratorProvider):
+ sig = signature(cls.create_async_generator)
+ elif issubclass(cls, AsyncProvider):
+ sig = signature(cls.create_async)
+ else:
+ sig = signature(cls.create_completion)
+
+ def get_type_name(annotation: type) -> str:
+ if hasattr(annotation, "__name__"):
+ annotation = annotation.__name__
+ elif isinstance(annotation, NoneType):
+ annotation = "None"
+ return str(annotation)
+
+ args = ""
+ for name, param in sig.parameters.items():
+ if name in ("self", "kwargs"):
+ continue
+ if name == "stream" and not cls.supports_stream:
+ continue
+ if args:
+ args += ", "
+ args += "\n"
+ args += " " + name
+ if name != "model" and param.annotation is not Parameter.empty:
+ args += f": {get_type_name(param.annotation)}"
+ if param.default == "":
+ args += ' = ""'
+ elif param.default is not Parameter.empty:
+ args += f" = {param.default}"
+
+ return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
class AsyncProvider(BaseProvider):
diff --git a/g4f/Provider/deprecated/Aibn.py b/g4f/Provider/deprecated/Aibn.py
index 60cef1e48c4..0bbfb436caf 100644
--- a/g4f/Provider/deprecated/Aibn.py
+++ b/g4f/Provider/deprecated/Aibn.py
@@ -39,18 +39,6 @@ async def create_async_generator(
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
diff --git a/g4f/Provider/deprecated/Ails.py b/g4f/Provider/deprecated/Ails.py
index 5244fd7570c..e87ceb32dd8 100644
--- a/g4f/Provider/deprecated/Ails.py
+++ b/g4f/Provider/deprecated/Ails.py
@@ -77,19 +77,6 @@ async def create_async_generator(
yield token
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
-
-
def _hash(json_data: dict[str, str]) -> SHA256:
base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
diff --git a/g4f/Provider/deprecated/Aivvm.py b/g4f/Provider/deprecated/Aivvm.py
index 12fd387db86..8b5a9e05a4b 100644
--- a/g4f/Provider/deprecated/Aivvm.py
+++ b/g4f/Provider/deprecated/Aivvm.py
@@ -69,16 +69,4 @@ def create_completion(cls,
try:
yield chunk.decode("utf-8")
except UnicodeDecodeError:
- yield chunk.decode("unicode-escape")
-
- @classmethod
- @property
- def params(cls):
- params = [
- ('model', 'str'),
- ('messages', 'list[dict[str, str]]'),
- ('stream', 'bool'),
- ('temperature', 'float'),
- ]
- param = ', '.join([': '.join(p) for p in params])
- return f'g4f.provider.{cls.__name__} supports: ({param})'
+ yield chunk.decode("unicode-escape")
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/ChatgptDuo.py b/g4f/Provider/deprecated/ChatgptDuo.py
index c77c6a1c6be..c2d2de7ab42 100644
--- a/g4f/Provider/deprecated/ChatgptDuo.py
+++ b/g4f/Provider/deprecated/ChatgptDuo.py
@@ -44,15 +44,4 @@ async def create_async(
@classmethod
def get_sources(cls):
- return cls._sources
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ return cls._sources
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/CodeLinkAva.py b/g4f/Provider/deprecated/CodeLinkAva.py
index 64ce1af9379..22f4468ab92 100644
--- a/g4f/Provider/deprecated/CodeLinkAva.py
+++ b/g4f/Provider/deprecated/CodeLinkAva.py
@@ -46,18 +46,7 @@ async def create_async_generator(
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
- if content := line["choices"][0]["delta"].get("content"):
- yield content
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ content = line["choices"][0]["delta"].get("content")
+ if content:
+ yield content
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/DfeHub.py b/g4f/Provider/deprecated/DfeHub.py
index 4ea7501f760..4458bac65af 100644
--- a/g4f/Provider/deprecated/DfeHub.py
+++ b/g4f/Provider/deprecated/DfeHub.py
@@ -60,18 +60,3 @@ def create_completion(
if b"content" in chunk:
data = json.loads(chunk.decode().split("data: ")[1])
yield (data["choices"][0]["delta"]["content"])
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("presence_penalty", "int"),
- ("frequency_penalty", "int"),
- ("top_p", "int"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/deprecated/EasyChat.py b/g4f/Provider/deprecated/EasyChat.py
index bd49c09c418..3142f243a6d 100644
--- a/g4f/Provider/deprecated/EasyChat.py
+++ b/g4f/Provider/deprecated/EasyChat.py
@@ -87,21 +87,4 @@ def create_completion(
splitData = chunk.decode().split("data:")
if len(splitData) > 1:
- yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("presence_penalty", "int"),
- ("frequency_penalty", "int"),
- ("top_p", "int"),
- ("active_server", "int"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/Equing.py b/g4f/Provider/deprecated/Equing.py
index 5ba125a3191..9f510e50f19 100644
--- a/g4f/Provider/deprecated/Equing.py
+++ b/g4f/Provider/deprecated/Equing.py
@@ -65,16 +65,7 @@ def create_completion(
if line:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
- if token := line_json['choices'][0]['delta'].get('content'):
- yield token
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ token = line_json['choices'][0]['delta'].get('content')
+ if token:
+ yield token
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/FastGpt.py b/g4f/Provider/deprecated/FastGpt.py
index 17b21b3740b..3af8c213366 100644
--- a/g4f/Provider/deprecated/FastGpt.py
+++ b/g4f/Provider/deprecated/FastGpt.py
@@ -69,20 +69,11 @@ def create_completion(
try:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
- if token := line_json['choices'][0]['delta'].get(
+ token = line_json['choices'][0]['delta'].get(
'content'
- ):
+ )
+
+ if token:
yield token
except:
- continue
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ continue
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/GetGpt.py b/g4f/Provider/deprecated/GetGpt.py
index 0fbb5b875a7..a7f4695cc85 100644
--- a/g4f/Provider/deprecated/GetGpt.py
+++ b/g4f/Provider/deprecated/GetGpt.py
@@ -55,22 +55,6 @@ def create_completion(
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
yield (line_json['choices'][0]['delta']['content'])
- @classmethod
- @property
- def params(cls):
- params = [
- ('model', 'str'),
- ('messages', 'list[dict[str, str]]'),
- ('stream', 'bool'),
- ('temperature', 'float'),
- ('presence_penalty', 'int'),
- ('frequency_penalty', 'int'),
- ('top_p', 'int'),
- ('max_tokens', 'int'),
- ]
- param = ', '.join([': '.join(p) for p in params])
- return f'g4f.provider.{cls.__name__} supports: ({param})'
-
def _encrypt(e: str):
t = os.urandom(8).hex().encode('utf-8')
diff --git a/g4f/Provider/deprecated/H2o.py b/g4f/Provider/deprecated/H2o.py
index cead17e1678..ba4ca507c08 100644
--- a/g4f/Provider/deprecated/H2o.py
+++ b/g4f/Provider/deprecated/H2o.py
@@ -86,22 +86,4 @@ async def create_async_generator(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
) as response:
- response.raise_for_status()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("truncate", "int"),
- ("max_new_tokens", "int"),
- ("do_sample", "bool"),
- ("repetition_penalty", "float"),
- ("return_full_text", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ response.raise_for_status()
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py
index 5acfbfbf861..f885672d0d2 100644
--- a/g4f/Provider/deprecated/Lockchat.py
+++ b/g4f/Provider/deprecated/Lockchat.py
@@ -38,6 +38,7 @@ def create_completion(
for token in response.iter_lines():
if b"The model: `gpt-4` does not exist" in token:
print("error, retrying...")
+
Lockchat.create_completion(
model = model,
messages = messages,
@@ -47,17 +48,7 @@ def create_completion(
if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
- if token := token["choices"][0]["delta"].get("content"):
- yield (token)
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ token = token["choices"][0]["delta"].get("content")
+
+ if token:
+ yield (token)
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/Myshell.py b/g4f/Provider/deprecated/Myshell.py
index 85731325f45..2487440d041 100644
--- a/g4f/Provider/deprecated/Myshell.py
+++ b/g4f/Provider/deprecated/Myshell.py
@@ -98,18 +98,6 @@ async def create_async_generator(
raise RuntimeError(f"Received unexpected message: {data_type}")
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
-
-
def generate_timestamp() -> str:
return str(
int(
diff --git a/g4f/Provider/deprecated/V50.py b/g4f/Provider/deprecated/V50.py
index f4f4d823a6c..e24ac2d488f 100644
--- a/g4f/Provider/deprecated/V50.py
+++ b/g4f/Provider/deprecated/V50.py
@@ -58,17 +58,4 @@ def create_completion(
)
if "https://fk1.v50.ltd" not in response.text:
- yield response.text
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("top_p", "int"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ yield response.text
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/Vitalentum.py b/g4f/Provider/deprecated/Vitalentum.py
index d6ba9336f18..8f466a52d78 100644
--- a/g4f/Provider/deprecated/Vitalentum.py
+++ b/g4f/Provider/deprecated/Vitalentum.py
@@ -49,19 +49,7 @@ async def create_async_generator(
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
- if content := line["choices"][0]["delta"].get("content"):
- yield content
+ content = line["choices"][0]["delta"].get("content")
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ if content:
+ yield content
\ No newline at end of file
diff --git a/g4f/Provider/deprecated/Wuguokai.py b/g4f/Provider/deprecated/Wuguokai.py
index 079f0541e71..87877198e3c 100644
--- a/g4f/Provider/deprecated/Wuguokai.py
+++ b/g4f/Provider/deprecated/Wuguokai.py
@@ -54,15 +54,4 @@ def create_completion(
if len(_split) > 1:
yield _split[1].strip()
else:
- yield _split[0].strip()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool")
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file
+ yield _split[0].strip()
\ No newline at end of file
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index 03e9ba94a76..2171f0b787b 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -6,7 +6,6 @@
import random
import string
import secrets
-import time
from os import path
from asyncio import AbstractEventLoop
from platformdirs import user_config_dir
@@ -21,26 +20,8 @@
firefox,
BrowserCookieError
)
-try:
- from selenium.webdriver.remote.webdriver import WebDriver
-except ImportError:
- class WebDriver():
- pass
-try:
- from undetected_chromedriver import Chrome, ChromeOptions
-except ImportError:
- class Chrome():
- def __init__():
- raise RuntimeError('Please install the "undetected_chromedriver" package')
- class ChromeOptions():
- def add_argument():
- pass
-try:
- from pyvirtualdisplay import Display
-except ImportError:
- pass
-from ..typing import Dict, Messages, Union, Tuple
+from ..typing import Dict, Messages
from .. import debug
# Change event loop policy on windows
@@ -135,74 +116,11 @@ def format_prompt(messages: Messages, add_special_tokens=False) -> str:
return f"{formatted}\nAssistant:"
-def get_browser(
- user_data_dir: str = None,
- headless: bool = False,
- proxy: str = None,
- options: ChromeOptions = None
-) -> Chrome:
- if user_data_dir == None:
- user_data_dir = user_config_dir("g4f")
- if proxy:
- if not options:
- options = ChromeOptions()
- options.add_argument(f'--proxy-server={proxy}')
- return Chrome(options=options, user_data_dir=user_data_dir, headless=headless)
-
-class WebDriverSession():
- def __init__(
- self,
- web_driver: WebDriver = None,
- user_data_dir: str = None,
- headless: bool = False,
- virtual_display: bool = False,
- proxy: str = None,
- options: ChromeOptions = None
- ):
- self.web_driver = web_driver
- self.user_data_dir = user_data_dir
- self.headless = headless
- self.virtual_display = virtual_display
- self.proxy = proxy
- self.options = options
-
- def reopen(
- self,
- user_data_dir: str = None,
- headless: bool = False,
- virtual_display: bool = False
- ) -> WebDriver:
- if user_data_dir == None:
- user_data_dir = self.user_data_dir
- self.default_driver.quit()
- if not virtual_display and self.virtual_display:
- self.virtual_display.stop()
- self.default_driver = get_browser(user_data_dir, headless, self.proxy)
- return self.default_driver
-
- def __enter__(self) -> WebDriver:
- if self.web_driver:
- return self.web_driver
- if self.virtual_display == True:
- self.virtual_display = Display(size=(1920,1080))
- self.virtual_display.start()
- self.default_driver = get_browser(self.user_data_dir, self.headless, self.proxy, self.options)
- return self.default_driver
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- if self.default_driver:
- self.default_driver.close()
- time.sleep(0.1)
- self.default_driver.quit()
- if self.virtual_display:
- self.virtual_display.stop()
-
def get_random_string(length: int = 10) -> str:
return ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(length)
)
-
def get_random_hex() -> str:
return secrets.token_hex(16).zfill(32)
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index 77c029b8517..2c1f6121382 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -4,7 +4,8 @@
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
class Bard(BaseProvider):
url = "https://bard.google.com"
@@ -18,13 +19,13 @@ def create_completion(
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
user_data_dir: str = None,
headless: bool = True,
**kwargs
) -> CreateResult:
prompt = format_prompt(messages)
- session = WebDriverSession(web_driver, user_data_dir, headless, proxy=proxy)
+ session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -36,8 +37,8 @@ def create_completion(
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
except:
# Reopen browser for login
- if not web_driver:
- driver = session.reopen(headless=False)
+ if not webdriver:
+ driver = session.reopen()
driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 68c6713befc..59e2da73529 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -59,17 +59,4 @@ async def create_async_generator(
break
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
- response.raise_for_status()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ response.raise_for_status()
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenAssistant.py b/g4f/Provider/needs_auth/OpenAssistant.py
index de62636cb8b..e549b517ccd 100644
--- a/g4f/Provider/needs_auth/OpenAssistant.py
+++ b/g4f/Provider/needs_auth/OpenAssistant.py
@@ -87,15 +87,3 @@ async def create_async_generator(
}
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
response.raise_for_status()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 9fd90812295..8c9dd1e0530 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -6,7 +6,8 @@
from async_property import async_cached_property
from ..base_provider import AsyncGeneratorProvider
-from ..helper import get_browser, get_event_loop
+from ..helper import get_event_loop
+from ..webdriver import get_browser
from ...typing import AsyncResult, Messages
from ...requests import StreamSession
@@ -38,7 +39,10 @@ async def create(
**kwargs
) -> Response:
if prompt:
- messages.append({"role": "user", "content": prompt})
+ messages.append({
+ "role": "user",
+ "content": prompt
+ })
generator = cls.create_async_generator(
model,
messages,
@@ -49,12 +53,9 @@ async def create(
response_fields=True,
**kwargs
)
- fields: ResponseFields = await anext(generator)
- if "access_token" not in kwargs:
- kwargs["access_token"] = cls._access_token
return Response(
generator,
- fields,
+ await anext(generator),
action,
messages,
kwargs
@@ -87,7 +88,6 @@ async def create_async_generator(
headers = {
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
- "Cookie": 'intercom-device-id-dgkjq2bp=0f047573-a750-46c8-be62-6d54b56e7bf0; ajs_user_id=user-iv3vxisaoNodwWpxmNpMfekH; ajs_anonymous_id=fd91be0b-0251-4222-ac1e-84b1071e9ec1; __Host-next-auth.csrf-token=d2b5f67d56f7dd6a0a42ae4becf2d1a6577b820a5edc88ab2018a59b9b506886%7Ce5c33eecc460988a137cbc72d90ee18f1b4e2f672104f368046df58e364376ac; _cfuvid=gt_mA.q6rue1.7d2.AR0KHpbVBS98i_ppfi.amj2._o-1700353424353-0-604800000; cf_clearance=GkHCfPSFU.NXGcHROoe4FantnqmnNcluhTNHz13Tk.M-1700353425-0-1-dfe77f81.816e9bc2.714615da-0.2.1700353425; __Secure-next-auth.callback-url=https%3A%2F%2Fchat.openai.com; intercom-session-dgkjq2bp=UWdrS1hHazk5VXN1c0V5Q1F0VXdCQmsyTU9pVjJMUkNpWnFnU3dKWmtIdGwxTC9wbjZuMk5hcEc0NWZDOGdndS0tSDNiaDNmMEdIL1RHU1dFWDBwOHFJUT09--f754361b91fddcd23a13b288dcb2bf8c7f509e91; _uasid="Z0FBQUFBQmxXVnV0a3dmVno4czRhcDc2ZVcwaUpSNUdZejlDR25YSk5NYTJQQkpyNmRvOGxjTHMyTlAxWmJhaURrMVhjLXZxQXdZeVpBbU1aczA5WUpHT2dwaS1MOWc4MnhyNWFnbGRzeGdJcGFKT0ZRdnBTMVJHcGV2MGNTSnVQY193c0hqUWIycHhQRVF4dENlZ3phcDdZeHgxdVhoalhrZmtZME9NbWhMQjdVR3Vzc3FRRk0ybjJjNWMwTWtIRjdPb19lUkFtRmV2MDVqd1kwWU11QTYtQkdZenEzVHhLMGplY1hZM3FlYUt1cVZaNWFTRldleEJETzJKQjk1VTJScy1GUnMxUVZWMnVxYklxMjdockVZbkZyd1R4U1RtMnA1ZzlSeXphdmVOVk9xeEdrRkVOSjhwTVd1QzFtQjhBcWdDaE92Q1VlM2pwcjFQTXRuLVJNRVlZSGpIdlZ0aGV3PT0="; _dd_s=rum=0&expire=1700356244884; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..3aK6Fbdy2_8f07bf.8eT2xgonrCnz7ySY6qXFsg3kzL6UQfXKAYaw3tyn-6_X9657zy47k9qGvmi9mF0QKozj5jau3_Ca62AQQ7FmeC6Y2F1urtzqrXqwTTsQ2LuzFPIQkx6KKb2DXc8zW2-oyEzJ_EY5yxfLB2RlRkSh3M7bYNZh4_ltEcfkj38s_kIPGMxv34udtPWGWET99MCjkdwQWXylJag4s0fETA0orsBAKnGCyqAUNJbb_D7BYtGSV-MQ925kZMG6Di_QmfO0HQWURDYjmdRNcuy1PT_xJ1DJko8sjL42i4j3RhkNDkhqCIqyYImz2eHFWHW7rYKxTkrBhlCPMS5hRdcCswD7JYPcSBiwnVRYgyOocFGXoFvQgIZ2FX9NiZ3SMEVM1VwIGSE-qH0H2nMa8_iBvsOgOWJgKjVAvzzyzZvRVDUUHzJrikSFPNONVDU3h-04c1kVL4qIu9DfeTPN7n8AvNmYwMbro0L9-IUAeXNo4-pwF0Kt-AtTsamqWvMqnK4O_YOyLnDDlvkmnOvDC2d5uinwlQIxr6APO6qFfGLlHiLZemKoekxEE1Fx70dl-Ouhk1VIzbF3OC6XNNxeBm9BUYUiHdL0wj2H9rHgX4cz6ZmS_3VTgpD6UJh-evu5KJ2gIvjYmVbyzEN0aPNDxfvBaOm-Ezpy4bUJ2bUrOwNn-0knWkDiTvjYmNhCyefPCtCF6rpKNay8PCw_yh79C4SdEP6Q4V7LI0Tvdi5uz7kLCiBC4AT9L0ao1WDX03mkUOpjvzHDvPLmj8chW3lTVm_kA0eYGQY4wT0jzleWlfV0Q8rB2oYECNLWksA3F1zlGfcl4lQjprvTXRePkvAbMpoJEsZD3Ylq7-foLDLk4-M2LYAFZDs282AY04sFjAjQBxTELFCCuDgTIgTXSIskY_XCxpVXDbdLlbCJY7XVK45ybwtfqwlKRp8Mo0B131uQAFc-migHaUaoGujxJJk21bP8F0OmhNYHBo4FQqE1rQm2JH5bNM7txKeh5KXdJgVUVbRSr7OIp_OF5-Bx_v9eRBGAIDkue26E2-O8Rnrp5zQ5TnvecQLDaUzWavCLPwsZ0_gsOLBxNOmauNYZtF8IElCsQSFDdhoiMxXsYUm4ZYKEAy3GWq8HGTAvBhNkh1hvnI7y-d8-DOaZf_D_D98-olZfm-LUkeosLNpPB9rxYMqViCiW3KrXE9Yx0wlFm5ePKaVvR7Ym_EPhSOhJBKFPCvdTdMZSNPUcW0ZJBVByq0A9sxD51lYq3gaFyqh94S4s_ox182AQ3szGzHkdgLcnQmJG9OYvKxAVcd43eg6_gODAYhx02GjbMw-7JTAhyXSeCrlMteHyOXl8hai-3LilC3PmMzi7Vbu49dhF1s4LcVlUowen5ira44rQQaB26mdaOUoQfodgt66M3RTWGPXyK1Nb72AzSXsCKyaQPbzeb6cN0fdGSdG4ktwvR04eFNEkquo_3aKu2GmUKTD0XcRx9dYrfXjgY-X1DDTVs1YND2gRhdx7FFEeBVjtbj2UqmG3Rvd4IcHGe7OnYWw2MHDcol68SsR1KckXWwWREz7YTGUnDB2M1kx_H4W2mjclytnlHOnYU3RflegRPeSTbdzUZJvGKXCCz45luHkQWN_4DExE76D-9YqbFIz-RY5yL4h-Zs-i2xjm2K-4xCMM9nQIOqhLMqixIZQ2ldDAidKoYtbs5ppzbcBLyrZM96bq9DwRBY3aacqWdlRd-TfX0wv5KO4fo0sSh5FsuhuN0zcEV_NNXgqIEM_p14EcPqgbrAvCBQ8os70TRBQLXiF0EniSofGjxwF8kQvUk3C6Wfc8cTTeN-E6GxCVTn91HBwA1iSEZlRLMVb8_BcRJNqwbgnb_07jR6-eo42u88CR3KQdAWwbQRdMxsURFwZ0ujHXVGG0Ll6qCFBcHXWyDO1x1yHdHnw8_8yF26pnA2iPzrFR-8glMgIA-639sLuGAxjO1_ZuvJ9CAB41Az9S_jaZwaWy215Hk4-BRYD-MKmHtonwo3rrxhE67WJgbbu14efsw5nT6ow961pffgwXov5VA1Rg7nv1E8RvQOx7umWW6o8R4W6L8f2COsmPTXfgwIjoJKkjhUqAQ8ceG7cM0ET-38yaC0ObU8EkXfdGGgxI28qTEZWczG66_iM4hw7QEGCY5Cz2kbO6LETAiw9OsSigtBvDS7f0Ou0bZ41pdK7G3FmvdZAnjWPjObnDF4k4uWfn7mzt0fgj3FyqK20JezRDyGuAbUUhOvtZpc9sJpzxR34eXEZTouuALrHcGuNij4z6rx51FrQsaMtiup8QVrhtZbXtKLMYnWYSbkhuTeN2wY-xV1ZUsQlakIZszzGF7kuIG87KKWMpuPMvbXjz6Pp_gWJiIC6aQuk8xl5g0iBPycf_6Q-MtpuYxzNE2TpI1RyR9mHeXmteoRzrFiWp7yEC-QGNFyAJgxTqxM3CjHh1Jt6IddOsmn89rUo1dZM2Smijv_fbIv3avXLkIPX1KZjILeJCtpU0wAdsihDaRiRgDdx8fG__F8zuP0n7ziHas73cwrfg-Ujr6DhC0gTNxyd9dDA_oho9N7CQcy6EFmfNF2te7zpLony0859jtRv2t1TnpzAa1VvMK4u6mXuJ2XDo04_6GzLO3aPHinMdl1BcIAWnqAqWAu3euGFLTHOhXlfijut9N1OCifd_zWjhVtzlR39uFeCQBU5DyQArzQurdoMx8U1ETsnWgElxGSStRW-YQoPsAJ87eg9trqKspFpTVlAVN3t1GtoEAEhcwhe81SDssLmKGLc.7PqS6jRGTIfgTPlO7Ognvg; __cf_bm=VMWoAKEB45hQSwxXtnYXcurPaGZDJS4dMi6dIMFLwdw-1700355394-0-ATVsbq97iCaTaJbtYr8vtg1Zlbs3nLrJLKVBHYa2Jn7hhkGclqAy8Gbyn5ePEhDRqj93MsQmtayfYLqY5n4WiLY=; __cflb=0H28vVfF4aAyg2hkHFH9CkdHRXPsfCUf6VpYf2kz3RX'
}
async with StreamSession(
proxies={"https": proxy},
@@ -95,24 +95,22 @@ async def create_async_generator(
headers=headers,
timeout=timeout
) as session:
- data = {
- "action": action,
- "arkose_token": await get_arkose_token(proxy, timeout),
- "conversation_id": conversation_id,
- "parent_message_id": parent_id,
- "model": models[model],
- "history_and_training_disabled": history_disabled and not auto_continue,
- }
- if action != "continue":
- data["messages"] = [{
- "id": str(uuid.uuid4()),
- "author": {"role": "user"},
- "content": {"content_type": "text", "parts": [messages[-1]["content"]]},
- }]
- first = True
end_turn = EndTurn()
- while first or auto_continue and not end_turn.is_end:
- first = False
+ while not end_turn.is_end:
+ data = {
+ "action": action,
+ "arkose_token": await get_arkose_token(proxy, timeout),
+ "conversation_id": conversation_id,
+ "parent_message_id": parent_id,
+ "model": models[model],
+ "history_and_training_disabled": history_disabled and not auto_continue,
+ }
+ if action != "continue":
+ data["messages"] = [{
+ "id": str(uuid.uuid4()),
+ "author": {"role": "user"},
+ "content": {"content_type": "text", "parts": [messages[-1]["content"]]},
+ }]
async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response:
try:
response.raise_for_status()
@@ -120,43 +118,38 @@ async def create_async_generator(
raise RuntimeError(f"Error {response.status_code}: {await response.text()}")
last_message = 0
async for line in response.iter_lines():
- if line.startswith(b"data: "):
- line = line[6:]
- if line == b"[DONE]":
- break
- try:
- line = json.loads(line)
- except:
- continue
- if "message" not in line:
- continue
- if "error" in line and line["error"]:
- raise RuntimeError(line["error"])
- if "message_type" not in line["message"]["metadata"]:
- continue
- if line["message"]["author"]["role"] != "assistant":
- continue
- if line["message"]["metadata"]["message_type"] in ("next", "continue", "variant"):
- conversation_id = line["conversation_id"]
- parent_id = line["message"]["id"]
- if response_fields:
- response_fields = False
- yield ResponseFields(conversation_id, parent_id, end_turn)
- new_message = line["message"]["content"]["parts"][0]
- yield new_message[last_message:]
- last_message = len(new_message)
- if "finish_details" in line["message"]["metadata"]:
- if line["message"]["metadata"]["finish_details"]["type"] == "max_tokens":
- end_turn.end()
-
- data = {
- "action": "continue",
- "arkose_token": await get_arkose_token(proxy, timeout),
- "conversation_id": conversation_id,
- "parent_message_id": parent_id,
- "model": models[model],
- "history_and_training_disabled": False,
- }
+ if not line.startswith(b"data: "):
+ continue
+ line = line[6:]
+ if line == b"[DONE]":
+ break
+ try:
+ line = json.loads(line)
+ except:
+ continue
+ if "message" not in line:
+ continue
+ if "error" in line and line["error"]:
+ raise RuntimeError(line["error"])
+ if "message_type" not in line["message"]["metadata"]:
+ continue
+ if line["message"]["author"]["role"] != "assistant":
+ continue
+ if line["message"]["metadata"]["message_type"] in ("next", "continue", "variant"):
+ conversation_id = line["conversation_id"]
+ parent_id = line["message"]["id"]
+ if response_fields:
+ response_fields = False
+ yield ResponseFields(conversation_id, parent_id, end_turn)
+ new_message = line["message"]["content"]["parts"][0]
+ yield new_message[last_message:]
+ last_message = len(new_message)
+ if "finish_details" in line["message"]["metadata"]:
+ if line["message"]["metadata"]["finish_details"]["type"] == "stop":
+ end_turn.end()
+ if not auto_continue:
+ break
+ action = "continue"
await asyncio.sleep(5)
@classmethod
@@ -167,7 +160,7 @@ def browse() -> str:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
- driver = get_browser("~/openai", proxy=proxy)
+ driver = get_browser(proxy=proxy)
except ImportError:
return
try:
@@ -193,18 +186,6 @@ async def get_access_token(cls, proxy: str = None) -> str:
raise RuntimeError("Read access token failed")
return cls._access_token
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("access_token", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
async def get_arkose_token(proxy: str = None, timeout: int = None) -> str:
config = {
@@ -293,7 +274,7 @@ async def do_continue(self, **kwargs) -> Response:
async def variant(self, **kwargs) -> Response:
if self.action != "next":
- raise RuntimeError("Can't create variant with continue or variant request.")
+ raise RuntimeError("Can't create variant from continue or variant request.")
return await OpenaiChat.create(
**self._options,
messages=self._messages,
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
index 1c8c97d74db..99f6945b3bc 100644
--- a/g4f/Provider/needs_auth/Poe.py
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -4,7 +4,8 @@
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
models = {
"meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"},
@@ -33,7 +34,7 @@ def create_completion(
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
user_data_dir: str = None,
headless: bool = True,
**kwargs
@@ -44,7 +45,7 @@ def create_completion(
raise ValueError(f"Model are not supported: {model}")
prompt = format_prompt(messages)
- session = WebDriverSession(web_driver, user_data_dir, headless, proxy=proxy)
+ session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -80,8 +81,8 @@ class ProxiedWebSocket extends WebSocket {
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
except:
# Reopen browser for login
- if not web_driver:
- driver = session.reopen(headless=False)
+ if not webdriver:
+ driver = session.reopen()
driver.get(f"{cls.url}/{models[model]['name']}")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index 4570fd9fad9..d7be98ac8ee 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -60,18 +60,3 @@ def create_completion(
token = completion_chunk['text']
if token != None:
yield token
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("top_p", "int"),
- ("model", "str"),
- ("auth", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index cf33f0c6579..49ee174b8c0 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -4,7 +4,8 @@
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
models = {
"theb-ai": "TheB.AI",
@@ -44,14 +45,14 @@ def create_completion(
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
virtual_display: bool = True,
**kwargs
) -> CreateResult:
if model in models:
model = models[model]
prompt = format_prompt(messages)
- web_session = WebDriverSession(web_driver, virtual_display=virtual_display, proxy=proxy)
+ web_session = WebDriverSession(webdriver, virtual_display=virtual_display, proxy=proxy)
with web_session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -61,22 +62,16 @@ def create_completion(
# Register fetch hook
script = """
window._fetch = window.fetch;
-window.fetch = (url, options) => {
+window.fetch = async (url, options) => {
// Call parent fetch method
- const result = window._fetch(url, options);
+ const response = await window._fetch(url, options);
if (!url.startsWith("/api/conversation")) {
return result;
}
- // Load response reader
- result.then((response) => {
- if (!response.body.locked) {
- window._reader = response.body.getReader();
- }
- });
- // Return dummy response
- return new Promise((resolve, reject) => {
- resolve(new Response(new ReadableStream()))
- });
+ // Copy response
+ copy = response.clone();
+ window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+ return copy;
}
window._last_message = "";
"""
@@ -97,7 +92,6 @@ def create_completion(
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
- time.sleep(200)
try:
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
@@ -134,9 +128,8 @@ def create_completion(
if (chunk['done']) {
return null;
}
- text = (new TextDecoder()).decode(chunk['value']);
message = '';
- text.split('\\r\\n').forEach((line, index) => {
+ chunk['value'].split('\\r\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
try {
line = JSON.parse(line.substring('data: '.length));
diff --git a/g4f/Provider/selenium/Phind.py b/g4f/Provider/selenium/Phind.py
new file mode 100644
index 00000000000..b9a37f97166
--- /dev/null
+++ b/g4f/Provider/selenium/Phind.py
@@ -0,0 +1,104 @@
+from __future__ import annotations
+
+import time
+from urllib.parse import quote
+
+from ...typing import CreateResult, Messages
+from ..base_provider import BaseProvider
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
+
+class Phind(BaseProvider):
+ url = "https://www.phind.com"
+ working = True
+ supports_gpt_4 = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ timeout: int = 120,
+ webdriver: WebDriver = None,
+ creative_mode: bool = None,
+ **kwargs
+ ) -> CreateResult:
+ driver.start_session
+ with WebDriverSession(webdriver, "", proxy=proxy) as driver:
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
+
+ # Register fetch hook
+ source = """
+window._fetch = window.fetch;
+window.fetch = async (url, options) => {
+ const response = await window._fetch(url, options);
+ if (url != "/api/infer/answer") {
+ return response;
+ }
+ copy = response.clone();
+ window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+ return copy;
+}
+"""
+ driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
+ "source": source
+ })
+
+ prompt = quote(format_prompt(messages))
+ driver.get(f"{cls.url}/search?q={prompt}&source=searchbox")
+
+ # Need to change settings
+ wait = WebDriverWait(driver, timeout)
+ def open_dropdown():
+ # Open settings dropdown
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle")))
+ driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click()
+ # Wait for dropdown toggle
+ wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']")))
+ if model.startswith("gpt-4") or creative_mode:
+ # Enable GPT-4
+ if model.startswith("gpt-4"):
+ open_dropdown()
+ driver.find_element(By.XPATH, "//button[text()='GPT-4']").click()
+ # Enable creative mode
+ if creative_mode or creative_mode == None:
+ open_dropdown()
+ driver.find_element(By.ID, "Creative Mode").click()
+ # Submit changes
+ driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click()
+ # Wait for page reload
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".search-container")))
+
+ while True:
+ chunk = driver.execute_script("""
+if(window._reader) {
+ chunk = await window._reader.read();
+ if (chunk['done']) {
+ return null;
+ }
+ content = '';
+ chunk['value'].split('\\r\\n').forEach((line, index) => {
+ if (line.startsWith('data: ')) {
+ line = line.substring('data: '.length);
+ if (!line.startsWith('')) {
+ if (line) content += line;
+ else content += '\\n';
+ }
+ }
+ });
+ return content.replace('\\n\\n', '\\n');
+} else {
+ return ''
+}
+""")
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
\ No newline at end of file
diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py
new file mode 100644
index 00000000000..80c48d14cdd
--- /dev/null
+++ b/g4f/Provider/selenium/__init__.py
@@ -0,0 +1 @@
+from .Phind import Phind
\ No newline at end of file
diff --git a/g4f/Provider/unfinished/ChatAiGpt.py b/g4f/Provider/unfinished/ChatAiGpt.py
index 9d050093d6b..bc962623456 100644
--- a/g4f/Provider/unfinished/ChatAiGpt.py
+++ b/g4f/Provider/unfinished/ChatAiGpt.py
@@ -43,9 +43,12 @@ async def create_async_generator(
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
- if result := re.search(
+
+ result = re.search(
r'data-nonce=(.*?) data-post-id=([0-9]+)', response
- ):
+ )
+
+ if result:
cls._nonce, cls._post_id = result.group(1), result.group(2)
else:
raise RuntimeError("No nonce found")
diff --git a/g4f/Provider/unfinished/MikuChat.py b/g4f/Provider/unfinished/MikuChat.py
index 970fb0bb67d..bf19631f4b5 100644
--- a/g4f/Provider/unfinished/MikuChat.py
+++ b/g4f/Provider/unfinished/MikuChat.py
@@ -48,7 +48,8 @@ async def create_async_generator(
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = json.loads(line[6:])
- if chunk := line["choices"][0]["delta"].get("content"):
+ chunk = line["choices"][0]["delta"].get("content")
+ if chunk:
yield chunk
def k(e: str, t: int):
diff --git a/g4f/Provider/webdriver.py b/g4f/Provider/webdriver.py
new file mode 100644
index 00000000000..da3b13ed762
--- /dev/null
+++ b/g4f/Provider/webdriver.py
@@ -0,0 +1,92 @@
+from __future__ import annotations
+
+import time
+from platformdirs import user_config_dir
+try:
+ from selenium.webdriver.remote.webdriver import WebDriver
+except ImportError:
+ class WebDriver():
+ pass
+try:
+ from undetected_chromedriver import Chrome, ChromeOptions
+except ImportError:
+ class Chrome():
+ def __init__():
+ raise RuntimeError('Please install the "undetected_chromedriver" package')
+ class ChromeOptions():
+ def add_argument():
+ pass
+try:
+ from pyvirtualdisplay import Display
+ has_pyvirtualdisplay = True
+except ImportError:
+ has_pyvirtualdisplay = False
+
+def get_browser(
+ user_data_dir: str = None,
+ headless: bool = False,
+ proxy: str = None,
+ options: ChromeOptions = None
+) -> Chrome:
+ if user_data_dir == None:
+ user_data_dir = user_config_dir("g4f")
+ if proxy:
+ if not options:
+ options = ChromeOptions()
+ options.add_argument(f'--proxy-server={proxy}')
+ return Chrome(options=options, user_data_dir=user_data_dir, headless=headless)
+
+class WebDriverSession():
+ def __init__(
+ self,
+ webdriver: WebDriver = None,
+ user_data_dir: str = None,
+ headless: bool = False,
+ virtual_display: bool = False,
+ proxy: str = None,
+ options: ChromeOptions = None
+ ):
+ self.webdriver = webdriver
+ self.user_data_dir = user_data_dir
+ self.headless = headless
+ self.virtual_display = None
+ if has_pyvirtualdisplay and virtual_display:
+ self.virtual_display = Display(size=(1920,1080))
+ self.proxy = proxy
+ self.options = options
+ self.default_driver = None
+
+ def reopen(
+ self,
+ user_data_dir: str = None,
+ headless: bool = False,
+ virtual_display: bool = False
+ ) -> WebDriver:
+ if user_data_dir == None:
+ user_data_dir = self.user_data_dir
+ if self.default_driver:
+ self.default_driver.quit()
+ if not virtual_display and self.virtual_display:
+ self.virtual_display.stop()
+ self.virtual_display = None
+ self.default_driver = get_browser(user_data_dir, headless, self.proxy)
+ return self.default_driver
+
+ def __enter__(self) -> WebDriver:
+ if self.webdriver:
+ return self.webdriver
+ if self.virtual_display:
+ self.virtual_display.start()
+ self.default_driver = get_browser(self.user_data_dir, self.headless, self.proxy, self.options)
+ return self.default_driver
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self.default_driver:
+ try:
+ self.default_driver.close()
+ except:
+ pass
+ time.sleep(0.1)
+ self.default_driver.quit()
+ if self.virtual_display:
+ self.virtual_display.stop()
\ No newline at end of file
diff --git a/g4f/__init__.py b/g4f/__init__.py
index faef79238ed..b96ebf9c576 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -1,11 +1,11 @@
from __future__ import annotations
from requests import get
from .models import Model, ModelUtils, _all_models
-from .Provider import BaseProvider, RetryProvider
-from .typing import Messages, CreateResult, Union, List
+from .Provider import BaseProvider, AsyncGeneratorProvider, RetryProvider
+from .typing import Messages, CreateResult, AsyncResult, Union, List
from . import debug
-version = '0.1.8.7'
+version = '0.1.9.0'
version_check = True
def check_pypi_version() -> None:
@@ -80,13 +80,15 @@ async def create_async(model : Union[Model, str],
messages : Messages,
provider : Union[type[BaseProvider], None] = None,
stream : bool = False,
- ignored : List[str] = None, **kwargs) -> str:
-
- if stream:
- raise ValueError('"create_async" does not support "stream" argument')
-
+ ignored : List[str] = None,
+ **kwargs) -> Union[AsyncResult, str]:
model, provider = get_model_and_provider(model, provider, False, ignored)
+ if stream:
+ if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
+ return await provider.create_async_generator(model.name, messages, **kwargs)
+ raise ValueError(f'{provider.__name__} does not support "stream" argument')
+
return await provider.create_async(model.name, messages, **kwargs)
class Completion:
diff --git a/g4f/models.py b/g4f/models.py
index 9f5056145ec..46eb49a0a06 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -72,7 +72,7 @@ def __all__() -> list[str]:
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider=RetryProvider([
- ChatgptX, GptGo, You,
+ GptGo, You,
GptForLove, ChatBase,
Chatgpt4Online,
ChatAnywhere,
diff --git a/ptest.py b/ptest.py
new file mode 100644
index 00000000000..38dd2aa9ea0
--- /dev/null
+++ b/ptest.py
@@ -0,0 +1,57 @@
+import requests, json
+
+
+headers = {
+ 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'Cache-Control': 'no-cache',
+ 'Connection': 'keep-alive',
+ 'Content-Type': 'application/json',
+ 'Origin': 'https://deepinfra.com',
+ 'Pragma': 'no-cache',
+ 'Referer': 'https://deepinfra.com/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
+ 'X-Deepinfra-Source': 'web-embed',
+ 'accept': 'text/event-stream',
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+}
+
+json_data = json.dumps({
+ 'model': 'meta-llama/Llama-2-70b-chat-hf',
+ 'messages': [
+ {
+ 'role': 'user',
+ 'content': 'what is the meaning of life ?',
+ },
+ ],
+ 'stream': True}, separators=(',', ':'))
+
+response = requests.post('https://api.deepinfra.com/v1/openai/chat/completions',
+ headers=headers, data=json_data, stream=True)
+
+response.raise_for_status()
+first = True
+
+for line in response.iter_content(chunk_size=1024):
+ if line.startswith(b"data: [DONE]"):
+ break
+
+ elif line.startswith(b"data: "):
+ chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
+
+ if chunk:
+ if first:
+ chunk = chunk.lstrip()
+ if chunk:
+ first = False
+
+ print(chunk)
+
+# Note: json_data will not be serialized by requests
+# exactly as it was in the original request.
+#data = '{"model":"meta-llama/Llama-2-70b-chat-hf","messages":[{"role":"user","content":"what is the meaning of life ?"},{"role":"assistant","content":" The meaning of life is a question that has puzzled philosophers, religious leaders, scientists, and many others for centuries. There are many different perspectives on this question, and there is no one definitive answer. However, here are some possible approaches to understanding the meaning of life:\\n\\n1. Religious or spiritual perspective: Many people believe that the meaning of life is to fulfill a divine or spiritual purpose. According to this view, our lives have a higher purpose, which is to serve a deity or follow a set of moral principles. The meaning of life is then found in fulfilling this purpose, whether it be through prayer, service, or good works.\\n2. Personal fulfillment: Some people believe that the meaning of life is to find personal fulfillment and happiness. According to this view, we should pursue our passions and interests, cultivate strong relationships, and strive to live a life that is rich in experience and personal growth.\\n3. Social or cultural perspective: Others argue that the meaning of life is tied to our social and cultural contexts. From this perspective, the meaning of life is to contribute to the greater good of society, to uphold certain values or traditions, or to leave a lasting legacy.\\n4. Existentialism: Existentialists believe that the meaning of life is not predetermined or inherent, but rather something that each individual must create for themselves. According to this view, life has no inherent meaning, but we can give it meaning through our choices, actions, and experiences.\\n5. Scientific perspective: Some people believe that the meaning of life can be understood through scientific inquiry. From this perspective, the meaning of life might be seen as a product of evolution, a byproduct of the natural processes that gave rise to consciousness and intelligence.\\n6. Humanistic perspective: Humanists believe that the meaning of life is to promote human flourishing and well-being. According to this view, we should strive to create a world where individuals can live fulfilling lives, pursue their goals and dreams, and reach their full potential.\\n7. Nihilistic perspective: A minority perspective is that life has no inherent meaning and that any attempt to impose meaning on it is ultimately futile. This view is often associated with nihilism, which argues that traditional beliefs and values are baseless and that morality is an illusion.\\n8. Mystical perspective: Finally, some people believe that the meaning of life cannot be expressed in words or concepts, but can only be experienced directly through mystical practices such as meditation or psychedelic experiences.\\n\\nUltimately, the meaning of life may be something that each person must determine for themselves based on their own unique experiences, values, and beliefs. It could also be said that life has multiple meanings, and that these meanings can change throughout our lives as we grow, learn, and evolve as individuals."},{"role":"user","content":"welll well well"},{"role":"assistant","content":" I\'m glad you\'re feeling enthusiastic! \\"Well\\" can be used in various ways to express excitement or surprise, such as:\\n\\n* \\"Well, well, well!\\" - This phrase is often used to express surprise or intrigue, especially when something unexpected happens or when you\'re presented with new information.\\n* \\"Look at you, well done!\\" - Here, \\"well\\" is used to convey approval or admiration for someone\'s achievement or effort.\\n* \\"You think you\'re so smart, don\'t you?\\" - In this sentence, \\"well\\" is used to express sarcasm or skepticism towards someone who thinks they know everything.\\n\\nHowever, I\'d like to point out that using \\"well\\" too frequently can come across as insincere or filler words, especially in formal writing or professional settings. It\'s important to use language that accurately conveys your intended message without relying on unnecessary filler words. Is there anything else you\'d like me to help you with?"},{"role":"user","content":"nice"}],"stream":true}'
+#response = requests.post('https://api.deepinfra.com/v1/openai/chat/completions', headers=headers, data=data)
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index e09436dda50..96b5f3f5f92 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
requests
pycryptodome
-curl_cffi
+curl_cffi>=0.5.10b4
aiohttp
certifi
browser_cookie3
@@ -23,6 +23,6 @@ flask
py-arkose-generator
asyncstdlib
async-property
-selenium
+undetected-chromedriver
asyncstdlib
-async_property
\ No newline at end of file
+async_property
diff --git a/setup.py b/setup.py
index da3cc4856d2..f3ba746631a 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@
with open('requirements.txt') as f:
required = f.read().splitlines()
-VERSION = '0.1.8.7'
+VERSION = '0.1.9.0'
DESCRIPTION = (
'The official gpt4free repository | various collection of powerful language models'
)