Skip to content

Commit

Permalink
Merge pull request xtekky#1240 from hlohaus/pi
Browse files Browse the repository at this point in the history
Add auto_continue to OpenaiChat
  • Loading branch information
xtekky authored Nov 12, 2023
2 parents 9d222e8 + 745ea57 commit df8f416
Show file tree
Hide file tree
Showing 7 changed files with 192 additions and 186 deletions.
120 changes: 57 additions & 63 deletions README.md

Large diffs are not rendered by default.

10 changes: 8 additions & 2 deletions etc/testing/_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,14 @@ def main():
def get_providers() -> list[type[BaseProvider]]:
providers = dir(Provider)
providers = [getattr(Provider, provider) for provider in providers if provider != "RetryProvider"]
providers = [provider for provider in providers if isinstance(provider, type)]
return [provider for provider in providers if issubclass(provider, BaseProvider)]
providers = [provider for provider in providers if isinstance(provider, type) and hasattr(provider, "url")]
return [
provider
for provider in providers
if issubclass(provider, BaseProvider)
and provider.__name__ not in dir(Provider.deprecated)
and provider.__name__ not in dir(Provider.unfinished)
]


def create_response(_provider: type[BaseProvider]) -> str:
Expand Down
129 changes: 55 additions & 74 deletions etc/tool/readme_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,58 +3,32 @@
from pathlib import Path
from urllib.parse import urlparse

sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent.parent))

import asyncio
from g4f import models
from g4f.Provider.base_provider import AsyncProvider, BaseProvider
from g4f.Provider.retry_provider import RetryProvider
from testing._providers import get_providers

logging = False


def print_imports():
print("##### Providers:")
print("```py")
print("from g4f.Provider import (")
for _provider in get_providers():
if _provider.working:
print(f" {_provider.__name__},")

print(")")
print("# Usage:")
print("response = g4f.ChatCompletion.create(..., provider=ProviderName)")
print("```")
print()
print()

def print_async():
print("##### Async support:")
print("```py")
print("_providers = [")
for _provider in get_providers():
if _provider.working and issubclass(_provider, AsyncProvider):
print(f" g4f.Provider.{_provider.__name__},")
print("]")
print("```")
print()
print()
from g4f import ChatCompletion
from g4f.Provider.base_provider import BaseProvider
from etc.testing._providers import get_providers

from g4f import debug

debug.logging = True


async def test_async(provider: type[BaseProvider]):
if not provider.working:
return False
model = models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else models.default.name
messages = [{"role": "user", "content": "Hello Assistant!"}]
try:
if issubclass(provider, AsyncProvider):
response = await provider.create_async(model=model, messages=messages)
else:
response = provider.create_completion(model=model, messages=messages, stream=False)
response = await asyncio.wait_for(ChatCompletion.create_async(
model=models.default,
messages=messages,
provider=provider
), 30)
return bool(response)
except Exception as e:
if logging:
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
return False

Expand All @@ -68,44 +42,53 @@ async def test_async_list(providers: list[type[BaseProvider]]):


def print_providers():
lines = [
"| Website| Provider| gpt-3.5 | gpt-4 | Streaming | Asynchron | Status | Auth |",
"| ------ | ------- | ------- | ----- | --------- | --------- | ------ | ---- |",
]

providers = get_providers()
responses = asyncio.run(test_async_list(providers))

for is_working in (True, False):
for idx, _provider in enumerate(providers):
if is_working != _provider.working:
continue
if _provider == RetryProvider:
continue

netloc = urlparse(_provider.url).netloc
website = f"[{netloc}]({_provider.url})"

provider_name = f"`g4f.Provider.{_provider.__name__}`"

has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "❌"
has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "❌"
stream = "✔️" if _provider.supports_stream else "❌"
can_async = "✔️" if issubclass(_provider, AsyncProvider) else "❌"
if _provider.working:
status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
if responses[idx]:
for type in ("GPT-4", "GPT-3.5", "Other"):
lines = [
"",
f"### {type}",
"",
"| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |",
"| ------ | ------- | ------- | ----- | ------ | ------ | ---- |",
]
for is_working in (True, False):
for idx, _provider in enumerate(providers):
if is_working != _provider.working:
continue
do_continue = False
if type == "GPT-4" and _provider.supports_gpt_4:
do_continue = True
elif type == "GPT-3.5" and not _provider.supports_gpt_4 and _provider.supports_gpt_35_turbo:
do_continue = True
elif type == "Other" and not _provider.supports_gpt_4 and not _provider.supports_gpt_35_turbo:
do_continue = True
if not do_continue:
continue
netloc = urlparse(_provider.url).netloc
website = f"[{netloc}]({_provider.url})"

provider_name = f"`g4f.Provider.{_provider.__name__}`"

has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "❌"
has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "❌"
stream = "✔️" if _provider.supports_stream else "❌"
if _provider.working:
status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
if responses[idx]:
status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
else:
status = '![Unknown](https://img.shields.io/badge/Unknown-grey)'
else:
status = '![Unknown](https://img.shields.io/badge/Unknown-grey)'
else:
status = '![Inactive](https://img.shields.io/badge/Inactive-red)'
auth = "✔️" if _provider.needs_auth else "❌"

lines.append(
f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {can_async} | {status} | {auth} |"
)
print("\n".join(lines))
status = '![Inactive](https://img.shields.io/badge/Inactive-red)'
auth = "✔️" if _provider.needs_auth else "❌"

lines.append(
f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {status} | {auth} |"
)
print("\n".join(lines))

def print_models():
base_provider_names = {
Expand Down Expand Up @@ -151,8 +134,6 @@ def get_models():


if __name__ == "__main__":
print_imports()
print_async()
print_providers()
print("\n", "-" * 50, "\n")
print_models()
1 change: 1 addition & 0 deletions g4f/Provider/MyShell.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
class MyShell(AsyncGeneratorProvider):
url = "https://app.myshell.ai/chat"
working = True
supports_gpt_35_turbo = True

@classmethod
async def create_async_generator(
Expand Down
114 changes: 69 additions & 45 deletions g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

import uuid, json, time, os
import uuid, json, time, asyncio
from py_arkose_generator.arkose import get_values_for_request

from ..base_provider import AsyncGeneratorProvider
Expand All @@ -24,6 +24,7 @@ async def create_async_generator(
proxy: str = None,
timeout: int = 120,
access_token: str = None,
auto_continue: bool = False,
cookies: dict = None,
**kwargs
) -> AsyncResult:
Expand All @@ -34,50 +35,73 @@ async def create_async_generator(
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
}
async with StreamSession(
proxies=proxies,
headers=headers,
impersonate="chrome107",
timeout=timeout
) as session:
messages = [
{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [format_prompt(messages)]},
},
]
data = {
"action": "next",
"arkose_token": await get_arkose_token(proxy),
"messages": messages,
"conversation_id": None,
"parent_message_id": str(uuid.uuid4()),
"model": "text-davinci-002-render-sha",
"history_and_training_disabled": True,
}
async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response:
response.raise_for_status()
last_message = ""
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = line[6:]
if line == b"[DONE]":
break
try:
line = json.loads(line)
except:
continue
if "message" not in line:
continue
if "error" in line and line["error"]:
raise RuntimeError(line["error"])
if "message_type" not in line["message"]["metadata"]:
continue
if line["message"]["metadata"]["message_type"] == "next":
new_message = line["message"]["content"]["parts"][0]
yield new_message[len(last_message):]
last_message = new_message
messages = [
{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [format_prompt(messages)]},
},
]
message_id = str(uuid.uuid4())
data = {
"action": "next",
"arkose_token": await get_arkose_token(proxy),
"messages": messages,
"conversation_id": None,
"parent_message_id": message_id,
"model": "text-davinci-002-render-sha",
"history_and_training_disabled": not auto_continue,
}
conversation_id = None
while not end_turn:
if not auto_continue:
end_turn = True
async with StreamSession(
proxies=proxies,
headers=headers,
impersonate="chrome107",
timeout=timeout
) as session:
async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response:
try:
response.raise_for_status()
except:
raise RuntimeError(f"Response: {await response.text()}")
last_message = ""
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = line[6:]
if line == b"[DONE]":
break
try:
line = json.loads(line)
except:
continue
if "message" not in line:
continue
if "error" in line and line["error"]:
raise RuntimeError(line["error"])
end_turn = line["message"]["end_turn"]
message_id = line["message"]["id"]
if line["conversation_id"]:
conversation_id = line["conversation_id"]
if "message_type" not in line["message"]["metadata"]:
continue
if line["message"]["metadata"]["message_type"] in ("next", "continue"):
new_message = line["message"]["content"]["parts"][0]
yield new_message[len(last_message):]
last_message = new_message
if end_turn:
return
data = {
"action": "continue",
"arkose_token": await get_arkose_token(proxy),
"conversation_id": conversation_id,
"parent_message_id": message_id,
"model": "text-davinci-002-render-sha",
"history_and_training_disabled": False,
}
await asyncio.sleep(5)

@classmethod
async def browse_access_token(cls) -> str:
Expand Down
2 changes: 1 addition & 1 deletion g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def __all__() -> list[str]:
best_provider = RetryProvider([Llama2, DeepInfra]))

llama2_13b = Model(
name ="meta-llama/Llama-2-13b-chat-hf",
name = "meta-llama/Llama-2-13b-chat-hf",
base_provider = 'huggingface',
best_provider = RetryProvider([Llama2, DeepInfra]))

Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ asgiref
fastapi
uvicorn
flask
git+https://github.com/hlohaus/py-arkose-token-generator.git
py-arkose-generator

0 comments on commit df8f416

Please sign in to comment.