Skip to content

Commit

Permalink
Merge branch 'xtekky:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
Lin-jun-xiang authored Nov 20, 2023
2 parents 937ed18 + d4c8f3e commit f3d474d
Show file tree
Hide file tree
Showing 223 changed files with 1,617 additions and 29,936 deletions.
43 changes: 38 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
> By using this repository or any code related to it, you agree to the [legal notice](LEGAL_NOTICE.md). The author is not responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
> [!Note]
> Latest pypi version: [`0.1.8.2`](https://pypi.org/project/g4f/0.1.8.2)
> Latest pypi version: [`0.1.8.7`](https://pypi.org/project/g4f/0.1.8.7)
```sh
pip install -U g4f
```
Expand Down Expand Up @@ -243,6 +243,41 @@ for message in response:
print(message)
```

##### Using Browser

Some providers using a browser to bypass the bot protection.
They using the selenium webdriver to control the browser.
The browser settings and the login data are saved in a custom directory.
If the headless mode is enabled, the browser windows are loaded invisibly.
For performance reasons, it is recommended to reuse the browser instances
and close them yourself at the end:

```python
import g4f
from undetected_chromedriver import Chrome, ChromeOptions
from g4f.Provider import (
Bard,
Poe,
AItianhuSpace,
MyShell,
Phind,
PerplexityAi,
)

options = ChromeOptions()
options.add_argument("--incognito");
browser = Chrome(options=options, headless=True)
for idx in range(10):
response = g4f.ChatCompletion.create(
model=g4f.models.default,
provider=g4f.Provider.Phind,
messages=[{"role": "user", "content": "Suggest me a name."}],
browser=browser
)
print(f"{idx}:", response)
browser.quit()
```

##### Cookies Required

Cookies are essential for the proper functioning of some service providers. It is imperative to maintain an active session, typically achieved by logging into your account.
Expand All @@ -253,18 +288,16 @@ When running the g4f package locally, the package automatically retrieves cookie
import g4f

from g4f.Provider import (
Bard,
Bing,
HuggingChat,
OpenAssistant,
OpenaiChat,
)

# Usage
response = g4f.ChatCompletion.create(
model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}],
provider=Bard,
provider=Bing,
#cookies=g4f.get_cookies(".google.com"),
cookies={"cookie_name": "value", "cookie_name2": "value2"},
auth=True
Expand Down Expand Up @@ -349,7 +382,7 @@ g4f api
or

```sh
python -m g4f.api
python -m g4f.api.run
```

```python
Expand Down
4 changes: 2 additions & 2 deletions etc/tool/create_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ def input_command():
class ChatGpt(AsyncGeneratorProvider):
url = "https://chat-gpt.com"
url = "https://chat-gpt.com"
working = True
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
Expand Down
10 changes: 4 additions & 6 deletions g4f/Provider/AItianhu.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from __future__ import annotations

import json
import browser_cookie3

from ..typing import AsyncResult, Messages
from ..requests import StreamSession
Expand All @@ -10,7 +9,7 @@

class AItianhu(AsyncGeneratorProvider):
url = "https://www.aitianhu.com"
working = True
working = False
supports_gpt_35_turbo = True

@classmethod
Expand Down Expand Up @@ -72,12 +71,11 @@ async def create_async_generator(

if "detail" not in line:
raise RuntimeError(f"Response: {line}")
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):

content = line["detail"]["choices"][0]["delta"].get("content")
if content:
yield content


@classmethod
@property
def params(cls):
Expand Down
177 changes: 99 additions & 78 deletions g4f/Provider/AItianhuSpace.py
Original file line number Diff line number Diff line change
@@ -1,95 +1,116 @@
from __future__ import annotations

import random, json
from .. import debug
from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
import time
import random

domains = {
"gpt-3.5-turbo": "aitianhu.space",
"gpt-4": "aitianhu.website",
}
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
from .helper import WebDriver, WebDriverSession, format_prompt, get_random_string
from .. import debug

class AItianhuSpace(AsyncGeneratorProvider):
class AItianhuSpace(BaseProvider):
url = "https://chat3.aiyunos.top/"
working = True
supports_stream = True
supports_gpt_35_turbo = True
_domains = ["aitianhu.com", "aitianhu1.top"]

@classmethod
async def create_async_generator(cls,
model: str,
messages: Messages,
proxy: str = None,
domain: str = None,
cookies: dict = None,
timeout: int = 10, **kwargs) -> AsyncResult:

def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
domain: str = None,
proxy: str = None,
timeout: int = 120,
web_driver: WebDriver = None,
headless: bool = True,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"

elif model not in domains:
raise ValueError(f"Model are not supported: {model}")

if not domain:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
domain = f"{rand}.{domains[model]}"

rand = get_random_string(6)
domain = random.choice(cls._domains)
domain = f"{rand}.{domain}"
if debug.logging:
print(f"AItianhuSpace | using domain: {domain}")
url = f"https://{domain}"
prompt = format_prompt(messages)

if not cookies:
cookies = get_cookies('.aitianhu.space')
if not cookies:
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://{domain} on chrome]")
with WebDriverSession(web_driver, "", headless=headless, proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

url = f'https://{domain}'
async with StreamSession(proxies={"https": proxy},
cookies=cookies, timeout=timeout, impersonate="chrome110", verify=False) as session:

data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
headers = {
"Authority": url,
"Accept": "application/json, text/plain, */*",
"Origin": url,
"Referer": f"{url}/"
}
async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response:
response.raise_for_status()
async for line in response.iter_lines():
if line == b"<script>":
raise RuntimeError("Solve challenge and pass cookies and a fixed domain")
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
if content := line["detail"]["choices"][0]["delta"].get(
"content"
):
yield content
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached")
else:
raise RuntimeError(f"Response: {line}")

wait = WebDriverWait(driver, timeout)

@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("top_p", "int"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
# Bypass devtools detection
driver.get("https://blank.page/")
wait.until(EC.visibility_of_element_located((By.ID, "sheet")))
driver.execute_script(f"""
document.getElementById('sheet').addEventListener('click', () => {{
window.open('{url}', '_blank');
}});
""")
driver.find_element(By.ID, "sheet").click()
time.sleep(10)

original_window = driver.current_window_handle
for window_handle in driver.window_handles:
if window_handle != original_window:
driver.close()
driver.switch_to.window(window_handle)
break

# Wait for page load
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea.n-input__textarea-el")))

# Register hook in XMLHttpRequest
script = """
const _http_request_open = XMLHttpRequest.prototype.open;
window._last_message = window._message = "";
window._loadend = false;
XMLHttpRequest.prototype.open = function(method, url) {
if (url == "/api/chat-process") {
this.addEventListener("progress", (event) => {
const lines = this.responseText.split("\\n");
try {
window._message = JSON.parse(lines[lines.length-1])["text"];
} catch(e) { }
});
this.addEventListener("loadend", (event) => {
window._loadend = true;
});
}
return _http_request_open.call(this, method, url);
}
"""
driver.execute_script(script)

# Submit prompt
driver.find_element(By.CSS_SELECTOR, "textarea.n-input__textarea-el").send_keys(prompt)
driver.find_element(By.CSS_SELECTOR, "button.n-button.n-button--primary-type.n-button--medium-type").click()

# Read response
while True:
chunk = driver.execute_script("""
if (window._message && window._message != window._last_message) {
try {
return window._message.substring(window._last_message.length);
} finally {
window._last_message = window._message;
}
}
if (window._loadend) {
return null;
}
return "";
""")
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)
59 changes: 59 additions & 0 deletions g4f/Provider/AiChatOnline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string

class AiChatOnline(AsyncGeneratorProvider):
url = "https://aichatonline.org"
working = True
supports_gpt_35_turbo = True
supports_message_history = False

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chatgpt/chat/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "aichatonline.org",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"botId": "default",
"customId": None,
"session": get_random_string(16),
"chatId": get_random_string(),
"contextId": 7,
"messages": messages,
"newMessage": messages[-1]["content"],
"newImageId": None,
"stream": True
}
async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk.startswith(b"data: "):
data = json.loads(chunk[6:])
if data["type"] == "live":
yield data["data"]
elif data["type"] == "end":
break
Loading

0 comments on commit f3d474d

Please sign in to comment.