Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added new providers, updated existing ones, and added new models and updated model lists #2125

Merged
merged 11 commits into from
Aug 1, 2024
71 changes: 71 additions & 0 deletions g4f/Provider/Allyfy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
from __future__ import annotations

from aiohttp import ClientSession
import json

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt


class Allyfy(AsyncGeneratorProvider):
url = "https://chatbot.allyfy.chat"
api_endpoint = "/api/v1/message/stream/super/chat"
working = True
supports_gpt_35_turbo = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json;charset=utf-8",
"dnt": "1",
"origin": "https://www.allyfy.chat",
"priority": "u=1, i",
"referer": "https://www.allyfy.chat/",
"referrer": "https://www.allyfy.chat",
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"messages": [{"content": prompt, "role": "user"}],
"content": prompt,
"baseInfo": {
"clientId": "q08kdrde1115003lyedfoir6af0yy531",
"pid": "38281",
"channelId": "100000",
"locale": "en-US",
"localZone": 180,
"packageName": "com.cch.allyfy.webh",
}
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = []
async for line in response.content:
line = line.decode().strip()
if line.startswith("data:"):
data_content = line[5:]
if data_content == "[DONE]":
break
try:
json_data = json.loads(data_content)
if "content" in json_data:
full_response.append(json_data["content"])
except json.JSONDecodeError:
continue
yield "".join(full_response)
75 changes: 75 additions & 0 deletions g4f/Provider/ChatGot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
from __future__ import annotations

import time
from hashlib import sha256

from aiohttp import BaseConnector, ClientSession

from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin


class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
working = True
supports_message_history = True
default_model = 'gemini-pro'

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs,
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Connection": "keep-alive",
"TE": "trailers",
}
async with ClientSession(
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
data = {
"messages": [
{
"role": "model" if message["role"] == "assistant" else "user",
"parts": [{"text": message["content"]}],
}
for message in messages
],
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
}
async with session.post(
f"{cls.url}/api/generate", json=data, proxy=proxy
) as response:
if response.status == 500:
if "Quota exceeded" in await response.text():
raise RateLimitError(
f"Response {response.status}: Rate limit reached"
)
await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode(errors="ignore")


def generate_signature(time: int, text: str, secret: str = ""):
message = f"{time}:{text}:{secret}"
return sha256(message.encode()).hexdigest()
101 changes: 50 additions & 51 deletions g4f/Provider/Chatgpt4Online.py
Original file line number Diff line number Diff line change
@@ -1,72 +1,71 @@
from __future__ import annotations

import re
import json
from aiohttp import ClientSession

from ..typing import Messages, AsyncResult
from ..requests import get_args_from_browser
from ..webdriver import WebDriver
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string
from .helper import format_prompt


class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
supports_message_history = True
supports_gpt_35_turbo = True
working = True
_wpnonce = None
_context_id = None
api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
working = True
supports_gpt_4 = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
args = get_args_from_browser(f"{cls.url}/chat/", webdriver, proxy=proxy)
async with ClientSession(**args) as session:
if not cls._wpnonce:
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'restNonce":"(.*?)"', response)
if result:
cls._wpnonce = result.group(1)
else:
raise RuntimeError("No nonce found")
result = re.search(r'contextId":(.*?),', response)
if result:
cls._context_id = result.group(1)
else:
raise RuntimeError("No contextId found")
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": cls.url,
"priority": "u=1, i",
"referer": f"{cls.url}/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"x-wp-nonce": "d9505e9877",
}

async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"botId":"default",
"customId":None,
"session":"N/A",
"chatId":get_random_string(11),
"contextId":cls._context_id,
"messages":messages[:-1],
"newMessage":messages[-1]["content"],
"newImageId":None,
"stream":True
"botId": "default",
"newMessage": prompt,
"stream": True,
}
async with session.post(
f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
json=data,
proxy=proxy,
headers={"x-wp-nonce": cls._wpnonce}
) as response:

async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:])
if "type" not in line:
raise RuntimeError(f"Response: {line}")
elif line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break
full_response = ""

async for chunk in response.content.iter_any():
if chunk:
try:
# Extract the JSON object from the chunk
for line in chunk.decode().splitlines():
if line.startswith("data: "):
json_data = json.loads(line[6:])
if json_data["type"] == "live":
full_response += json_data["data"]
elif json_data["type"] == "end":
final_data = json.loads(json_data["data"])
full_response = final_data["reply"]
break
except json.JSONDecodeError:
continue

yield full_response
107 changes: 107 additions & 0 deletions g4f/Provider/FreeNetfly.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
from __future__ import annotations

import json
import asyncio
from aiohttp import ClientSession, ClientTimeout, ClientError
from typing import AsyncGenerator

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin


class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.netfly.top"
api_endpoint = "/api/openai/v1/chat/completions"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
'gpt-4',
]

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": cls.url,
"referer": f"{cls.url}/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
data = {
"messages": messages,
"stream": True,
"model": model,
"temperature": 0.5,
"presence_penalty": 0,
"frequency_penalty": 0,
"top_p": 1
}

max_retries = 3
retry_delay = 1

for attempt in range(max_retries):
try:
async with ClientSession(headers=headers) as session:
timeout = ClientTimeout(total=60)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
response.raise_for_status()
async for chunk in cls._process_response(response):
yield chunk
return # If successful, exit the function
except (ClientError, asyncio.TimeoutError) as e:
if attempt == max_retries - 1:
raise # If all retries failed, raise the last exception
await asyncio.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff

@classmethod
async def _process_response(cls, response) -> AsyncGenerator[str, None]:
buffer = ""
async for line in response.content:
buffer += line.decode('utf-8')
if buffer.endswith('\n\n'):
for subline in buffer.strip().split('\n'):
if subline.startswith('data: '):
if subline == 'data: [DONE]':
return
try:
data = json.loads(subline[6:])
content = data['choices'][0]['delta'].get('content')
if content:
yield content
except json.JSONDecodeError:
print(f"Failed to parse JSON: {subline}")
except KeyError:
print(f"Unexpected JSON structure: {data}")
buffer = ""

# Process any remaining data in the buffer
if buffer:
for subline in buffer.strip().split('\n'):
if subline.startswith('data: ') and subline != 'data: [DONE]':
try:
data = json.loads(subline[6:])
content = data['choices'][0]['delta'].get('content')
if content:
yield content
except (json.JSONDecodeError, KeyError):
pass

4 changes: 2 additions & 2 deletions g4f/Provider/GeminiProChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@


class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
url = "https://gemini-pro.chat/"
working = True
supports_message_history = True
default_model = ''
default_model = 'gemini-pro'

@classmethod
async def create_async_generator(
Expand Down
Loading
Loading