From cd4207a1464f51be7b8b3db4223d6382cb4b15bd Mon Sep 17 00:00:00 2001 From: razrab Date: Mon, 20 Nov 2023 09:22:20 +0300 Subject: [PATCH 01/21] Fix tupo in GptGo --- g4f/Provider/GptGo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py index ac3f7fe8946..726c7a99af6 100644 --- a/g4f/Provider/GptGo.py +++ b/g4f/Provider/GptGo.py @@ -63,7 +63,7 @@ async def create_async_generator( if line["choices"][0]["finish_reason"] == "stop": break - content = line["choices"][0]["delta"].get("content"): + content = line["choices"][0]["delta"].get("content") if content: yield content @@ -79,4 +79,4 @@ def params(cls): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})" From 702837a33ab1b632ecb8903cda8d45a87a7e400a Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 20 Nov 2023 13:59:14 +0100 Subject: [PATCH 02/21] Add auto support params method --- g4f/Provider/AItianhu.py | 21 +++---------- g4f/Provider/ChatBase.py | 13 +------- g4f/Provider/ChatForAi.py | 10 ------ g4f/Provider/FreeGpt.py | 10 ------ g4f/Provider/GeekGpt.py | 14 +-------- g4f/Provider/GptGo.py | 20 ++---------- g4f/Provider/Liaobots.py | 14 --------- g4f/Provider/Opchatgpts.py | 14 +-------- g4f/Provider/Ylokh.py | 19 +----------- g4f/Provider/base_provider.py | 43 +++++++++++++++++++++----- g4f/Provider/deprecated/Aibn.py | 12 ------- g4f/Provider/deprecated/Ails.py | 13 -------- g4f/Provider/deprecated/Aivvm.py | 14 +-------- g4f/Provider/deprecated/ChatgptDuo.py | 13 +------- g4f/Provider/deprecated/CodeLinkAva.py | 15 +-------- g4f/Provider/deprecated/DfeHub.py | 15 --------- g4f/Provider/deprecated/EasyChat.py | 19 +----------- g4f/Provider/deprecated/Equing.py | 13 +------- g4f/Provider/deprecated/FastGpt.py | 13 +------- g4f/Provider/deprecated/GetGpt.py | 16 ---------- g4f/Provider/deprecated/H2o.py | 20 +----------- g4f/Provider/deprecated/Lockchat.py | 14 +-------- g4f/Provider/deprecated/Myshell.py | 12 ------- g4f/Provider/deprecated/V50.py | 15 +-------- g4f/Provider/deprecated/Vitalentum.py | 16 +--------- g4f/Provider/deprecated/Wuguokai.py | 13 +------- 26 files changed, 56 insertions(+), 355 deletions(-) diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py index fcf9a4fb8a4..05ee5a2030f 100644 --- a/g4f/Provider/AItianhu.py +++ b/g4f/Provider/AItianhu.py @@ -71,21 +71,8 @@ async def create_async_generator( if "detail" not in line: raise RuntimeError(f"Response: {line}") - - content = line["detail"]["choices"][0]["delta"].get("content") - if content: - yield content - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("temperature", "float"), - ("top_p", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" + if content := line["detail"]["choices"][0]["delta"].get( + "content" + ): + yield content \ No newline at end of file diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py index ccc20244990..996ca39ac27 100644 --- a/g4f/Provider/ChatBase.py +++ b/g4f/Provider/ChatBase.py @@ -58,15 +58,4 @@ async def create_async_generator( for incorrect_response in cls.list_incorrect_responses: if incorrect_response in response_data: raise RuntimeError("Incorrect response") - yield stream.decode() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" + yield stream.decode() \ No newline at end of file diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py index 7a123f0f71a..afab034bf34 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/ChatForAi.py @@ -57,16 +57,6 @@ async def create_async_generator( raise RuntimeError(f"Response: {chunk.decode()}") yield chunk.decode() - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" def generate_signature(timestamp: int, message: str, id: str): buffer = f"{timestamp}:{id}:{message}:7YN8z6d6" diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 22c6c9aa435..15232c8df60 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -47,16 +47,6 @@ async def create_async_generator( raise RuntimeError("Rate limit reached") yield chunk - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" def generate_signature(timestamp: int, message: str, secret: str = ""): data = f"{timestamp}:{message}:{secret}" diff --git a/g4f/Provider/GeekGpt.py b/g4f/Provider/GeekGpt.py index 8c449745058..9ed9c09b981 100644 --- a/g4f/Provider/GeekGpt.py +++ b/g4f/Provider/GeekGpt.py @@ -70,16 +70,4 @@ def create_completion( raise RuntimeError(f'error | {e} :', json_data) if content: - yield content - - @classmethod - @property - def params(cls): - params = [ - ('model', 'str'), - ('messages', 'list[dict[str, str]]'), - ('stream', 'bool'), - ('temperature', 'float'), - ] - param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' + yield content \ No newline at end of file diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py index ac3f7fe8946..be9979f2a05 100644 --- a/g4f/Provider/GptGo.py +++ b/g4f/Provider/GptGo.py @@ -62,21 +62,5 @@ async def create_async_generator( line = json.loads(line[len(start):-1]) if line["choices"][0]["finish_reason"] == "stop": break - - content = line["choices"][0]["delta"].get("content"): - if content: - yield content - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + if content := line["choices"][0]["delta"].get("content"): + yield content \ No newline at end of file diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 109f7e2d359..807b44247b7 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -97,17 +97,3 @@ async def create_async_generator( async for stream in response.content.iter_any(): if stream: yield stream.decode() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("auth", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/Opchatgpts.py b/g4f/Provider/Opchatgpts.py index 8abdf39b30b..8c2987face3 100644 --- a/g4f/Provider/Opchatgpts.py +++ b/g4f/Provider/Opchatgpts.py @@ -56,16 +56,4 @@ async def create_async_generator( if line["type"] == "live": yield line["data"] elif line["type"] == "end": - break - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + break \ No newline at end of file diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py index 136921390b7..11fe497faf0 100644 --- a/g4f/Provider/Ylokh.py +++ b/g4f/Provider/Ylokh.py @@ -55,21 +55,4 @@ async def create_async_generator( yield content else: chat = await response.json() - yield chat["choices"][0]["message"].get("content") - - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("timeout", "int"), - ("temperature", "float"), - ("top_p", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + yield chat["choices"][0]["message"].get("content") \ No newline at end of file diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py index 47ea6ff8bda..564dd77eeb4 100644 --- a/g4f/Provider/base_provider.py +++ b/g4f/Provider/base_provider.py @@ -3,6 +3,8 @@ from asyncio import AbstractEventLoop from concurrent.futures import ThreadPoolExecutor from abc import ABC, abstractmethod +from inspect import signature, Parameter +from types import NoneType from .helper import get_event_loop, get_cookies, format_prompt from ..typing import CreateResult, AsyncResult, Messages @@ -52,17 +54,42 @@ def create_func() -> str: executor, create_func ) - + @classmethod @property def params(cls) -> str: - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" + if issubclass(cls, AsyncGeneratorProvider): + sig = signature(cls.create_async_generator) + elif issubclass(cls, AsyncProvider): + sig = signature(cls.create_async) + else: + sig = signature(cls.create_completion) + + def get_type_name(annotation: type) -> str: + if hasattr(annotation, "__name__"): + annotation = annotation.__name__ + elif isinstance(annotation, NoneType): + annotation = "None" + return str(annotation) + + args = ""; + for name, param in sig.parameters.items(): + if name in ("self", "kwargs"): + continue + if name == "stream" and not cls.supports_stream: + continue + if args: + args += ", " + args += "\n" + args += " " + name + if name != "model" and param.annotation is not Parameter.empty: + args += f": {get_type_name(param.annotation)}" + if param.default == "": + args += ' = ""' + elif param.default is not Parameter.empty: + args += f" = {param.default}" + + return f"g4f.Provider.{cls.__name__} supports: ({args}\n)" class AsyncProvider(BaseProvider): diff --git a/g4f/Provider/deprecated/Aibn.py b/g4f/Provider/deprecated/Aibn.py index 60cef1e48c4..0bbfb436caf 100644 --- a/g4f/Provider/deprecated/Aibn.py +++ b/g4f/Provider/deprecated/Aibn.py @@ -39,18 +39,6 @@ async def create_async_generator( response.raise_for_status() async for chunk in response.iter_content(): yield chunk.decode() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" def generate_signature(timestamp: int, message: str, secret: str = "undefined"): diff --git a/g4f/Provider/deprecated/Ails.py b/g4f/Provider/deprecated/Ails.py index 5244fd7570c..e87ceb32dd8 100644 --- a/g4f/Provider/deprecated/Ails.py +++ b/g4f/Provider/deprecated/Ails.py @@ -77,19 +77,6 @@ async def create_async_generator( yield token - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - def _hash(json_data: dict[str, str]) -> SHA256: base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}' diff --git a/g4f/Provider/deprecated/Aivvm.py b/g4f/Provider/deprecated/Aivvm.py index 12fd387db86..8b5a9e05a4b 100644 --- a/g4f/Provider/deprecated/Aivvm.py +++ b/g4f/Provider/deprecated/Aivvm.py @@ -69,16 +69,4 @@ def create_completion(cls, try: yield chunk.decode("utf-8") except UnicodeDecodeError: - yield chunk.decode("unicode-escape") - - @classmethod - @property - def params(cls): - params = [ - ('model', 'str'), - ('messages', 'list[dict[str, str]]'), - ('stream', 'bool'), - ('temperature', 'float'), - ] - param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' + yield chunk.decode("unicode-escape") \ No newline at end of file diff --git a/g4f/Provider/deprecated/ChatgptDuo.py b/g4f/Provider/deprecated/ChatgptDuo.py index c77c6a1c6be..c2d2de7ab42 100644 --- a/g4f/Provider/deprecated/ChatgptDuo.py +++ b/g4f/Provider/deprecated/ChatgptDuo.py @@ -44,15 +44,4 @@ async def create_async( @classmethod def get_sources(cls): - return cls._sources - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + return cls._sources \ No newline at end of file diff --git a/g4f/Provider/deprecated/CodeLinkAva.py b/g4f/Provider/deprecated/CodeLinkAva.py index 64ce1af9379..a909ab970a2 100644 --- a/g4f/Provider/deprecated/CodeLinkAva.py +++ b/g4f/Provider/deprecated/CodeLinkAva.py @@ -47,17 +47,4 @@ async def create_async_generator( break line = json.loads(line[6:-1]) if content := line["choices"][0]["delta"].get("content"): - yield content - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + yield content \ No newline at end of file diff --git a/g4f/Provider/deprecated/DfeHub.py b/g4f/Provider/deprecated/DfeHub.py index 4ea7501f760..4458bac65af 100644 --- a/g4f/Provider/deprecated/DfeHub.py +++ b/g4f/Provider/deprecated/DfeHub.py @@ -60,18 +60,3 @@ def create_completion( if b"content" in chunk: data = json.loads(chunk.decode().split("data: ")[1]) yield (data["choices"][0]["delta"]["content"]) - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("presence_penalty", "int"), - ("frequency_penalty", "int"), - ("top_p", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/deprecated/EasyChat.py b/g4f/Provider/deprecated/EasyChat.py index bd49c09c418..3142f243a6d 100644 --- a/g4f/Provider/deprecated/EasyChat.py +++ b/g4f/Provider/deprecated/EasyChat.py @@ -87,21 +87,4 @@ def create_completion( splitData = chunk.decode().split("data:") if len(splitData) > 1: - yield json.loads(splitData[1])["choices"][0]["delta"]["content"] - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("presence_penalty", "int"), - ("frequency_penalty", "int"), - ("top_p", "int"), - ("active_server", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" + yield json.loads(splitData[1])["choices"][0]["delta"]["content"] \ No newline at end of file diff --git a/g4f/Provider/deprecated/Equing.py b/g4f/Provider/deprecated/Equing.py index 5ba125a3191..076b5ac5527 100644 --- a/g4f/Provider/deprecated/Equing.py +++ b/g4f/Provider/deprecated/Equing.py @@ -66,15 +66,4 @@ def create_completion( if b'content' in line: line_json = json.loads(line.decode('utf-8').split('data: ')[1]) if token := line_json['choices'][0]['delta'].get('content'): - yield token - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + yield token \ No newline at end of file diff --git a/g4f/Provider/deprecated/FastGpt.py b/g4f/Provider/deprecated/FastGpt.py index 17b21b3740b..ef69e8925f9 100644 --- a/g4f/Provider/deprecated/FastGpt.py +++ b/g4f/Provider/deprecated/FastGpt.py @@ -74,15 +74,4 @@ def create_completion( ): yield token except: - continue - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + continue \ No newline at end of file diff --git a/g4f/Provider/deprecated/GetGpt.py b/g4f/Provider/deprecated/GetGpt.py index 0fbb5b875a7..a7f4695cc85 100644 --- a/g4f/Provider/deprecated/GetGpt.py +++ b/g4f/Provider/deprecated/GetGpt.py @@ -55,22 +55,6 @@ def create_completion( line_json = json.loads(line.decode('utf-8').split('data: ')[1]) yield (line_json['choices'][0]['delta']['content']) - @classmethod - @property - def params(cls): - params = [ - ('model', 'str'), - ('messages', 'list[dict[str, str]]'), - ('stream', 'bool'), - ('temperature', 'float'), - ('presence_penalty', 'int'), - ('frequency_penalty', 'int'), - ('top_p', 'int'), - ('max_tokens', 'int'), - ] - param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' - def _encrypt(e: str): t = os.urandom(8).hex().encode('utf-8') diff --git a/g4f/Provider/deprecated/H2o.py b/g4f/Provider/deprecated/H2o.py index cead17e1678..ba4ca507c08 100644 --- a/g4f/Provider/deprecated/H2o.py +++ b/g4f/Provider/deprecated/H2o.py @@ -86,22 +86,4 @@ async def create_async_generator( f"{cls.url}/conversation/{conversationId}", proxy=proxy, ) as response: - response.raise_for_status() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("truncate", "int"), - ("max_new_tokens", "int"), - ("do_sample", "bool"), - ("repetition_penalty", "float"), - ("return_full_text", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" + response.raise_for_status() \ No newline at end of file diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py index 5acfbfbf861..d93c9f8a220 100644 --- a/g4f/Provider/deprecated/Lockchat.py +++ b/g4f/Provider/deprecated/Lockchat.py @@ -48,16 +48,4 @@ def create_completion( if b"content" in token: token = json.loads(token.decode("utf-8").split("data: ")[1]) if token := token["choices"][0]["delta"].get("content"): - yield (token) - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" + yield (token) \ No newline at end of file diff --git a/g4f/Provider/deprecated/Myshell.py b/g4f/Provider/deprecated/Myshell.py index 85731325f45..2487440d041 100644 --- a/g4f/Provider/deprecated/Myshell.py +++ b/g4f/Provider/deprecated/Myshell.py @@ -98,18 +98,6 @@ async def create_async_generator( raise RuntimeError(f"Received unexpected message: {data_type}") - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - def generate_timestamp() -> str: return str( int( diff --git a/g4f/Provider/deprecated/V50.py b/g4f/Provider/deprecated/V50.py index f4f4d823a6c..e24ac2d488f 100644 --- a/g4f/Provider/deprecated/V50.py +++ b/g4f/Provider/deprecated/V50.py @@ -58,17 +58,4 @@ def create_completion( ) if "https://fk1.v50.ltd" not in response.text: - yield response.text - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("top_p", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + yield response.text \ No newline at end of file diff --git a/g4f/Provider/deprecated/Vitalentum.py b/g4f/Provider/deprecated/Vitalentum.py index d6ba9336f18..13160d94f41 100644 --- a/g4f/Provider/deprecated/Vitalentum.py +++ b/g4f/Provider/deprecated/Vitalentum.py @@ -50,18 +50,4 @@ async def create_async_generator( break line = json.loads(line[6:-1]) if content := line["choices"][0]["delta"].get("content"): - yield content - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + yield content \ No newline at end of file diff --git a/g4f/Provider/deprecated/Wuguokai.py b/g4f/Provider/deprecated/Wuguokai.py index 079f0541e71..87877198e3c 100644 --- a/g4f/Provider/deprecated/Wuguokai.py +++ b/g4f/Provider/deprecated/Wuguokai.py @@ -54,15 +54,4 @@ def create_completion( if len(_split) > 1: yield _split[1].strip() else: - yield _split[0].strip() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool") - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + yield _split[0].strip() \ No newline at end of file From 08e308348b2825f4dfe309158c25a1d55ac45271 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 20 Nov 2023 14:00:40 +0100 Subject: [PATCH 03/21] Add webdriver module --- g4f/Provider/AItianhuSpace.py | 7 +- g4f/Provider/MyShell.py | 18 ++-- g4f/Provider/PerplexityAi.py | 7 +- g4f/Provider/Phind.py | 50 ++++----- g4f/Provider/TalkAi.py | 6 +- g4f/Provider/helper.py | 84 +-------------- g4f/Provider/needs_auth/Bard.py | 11 +- g4f/Provider/needs_auth/HuggingChat.py | 15 +-- g4f/Provider/needs_auth/OpenAssistant.py | 12 --- g4f/Provider/needs_auth/OpenaiChat.py | 131 ++++++++++------------- g4f/Provider/needs_auth/Poe.py | 11 +- g4f/Provider/needs_auth/Raycast.py | 15 --- g4f/Provider/needs_auth/Theb.py | 29 ++--- g4f/Provider/webdriver.py | 92 ++++++++++++++++ 14 files changed, 219 insertions(+), 269 deletions(-) create mode 100644 g4f/Provider/webdriver.py diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py index fabe6b475e5..95386e8ef0d 100644 --- a/g4f/Provider/AItianhuSpace.py +++ b/g4f/Provider/AItianhuSpace.py @@ -5,7 +5,8 @@ from ..typing import CreateResult, Messages from .base_provider import BaseProvider -from .helper import WebDriver, WebDriverSession, format_prompt, get_random_string +from .helper import format_prompt, get_random_string +from .webdriver import WebDriver, WebDriverSession from .. import debug class AItianhuSpace(BaseProvider): @@ -24,7 +25,7 @@ def create_completion( domain: str = None, proxy: str = None, timeout: int = 120, - web_driver: WebDriver = None, + webdriver: WebDriver = None, headless: bool = True, **kwargs ) -> CreateResult: @@ -39,7 +40,7 @@ def create_completion( url = f"https://{domain}" prompt = format_prompt(messages) - with WebDriverSession(web_driver, "", headless=headless, proxy=proxy) as driver: + with WebDriverSession(webdriver, "", headless=headless, proxy=proxy) as driver: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py index a1c8d33590c..5c9c4fe6f0d 100644 --- a/g4f/Provider/MyShell.py +++ b/g4f/Provider/MyShell.py @@ -4,7 +4,8 @@ from ..typing import CreateResult, Messages from .base_provider import BaseProvider -from .helper import WebDriver, WebDriverSession, format_prompt +from .helper import format_prompt +from .webdriver import WebDriver, WebDriverSession class MyShell(BaseProvider): url = "https://app.myshell.ai/chat" @@ -20,10 +21,10 @@ def create_completion( stream: bool, proxy: str = None, timeout: int = 120, - web_driver: WebDriver = None, + webdriver: WebDriver = None, **kwargs ) -> CreateResult: - with WebDriverSession(web_driver, "", proxy=proxy) as driver: + with WebDriverSession(webdriver, "", proxy=proxy) as driver: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC @@ -52,15 +53,16 @@ def create_completion( "body": '{body}', "method": "POST" }) -window.reader = response.body.getReader(); +window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); """ driver.execute_script(script.replace("{body}", json.dumps(data))) script = """ -chunk = await window.reader.read(); -if (chunk['done']) return null; -text = (new TextDecoder()).decode(chunk['value']); +chunk = await window._reader.read(); +if (chunk['done']) { + return null; +} content = ''; -text.split('\\n').forEach((line, index) => { +chunk['value'].split('\\n').forEach((line, index) => { if (line.startsWith('data: ')) { try { const data = JSON.parse(line.substring('data: '.length)); diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py index c0b2412e2d9..03353a957f4 100644 --- a/g4f/Provider/PerplexityAi.py +++ b/g4f/Provider/PerplexityAi.py @@ -4,7 +4,8 @@ from ..typing import CreateResult, Messages from .base_provider import BaseProvider -from .helper import WebDriver, WebDriverSession, format_prompt +from .helper import format_prompt +from .webdriver import WebDriver, WebDriverSession class PerplexityAi(BaseProvider): url = "https://www.perplexity.ai" @@ -20,12 +21,12 @@ def create_completion( stream: bool, proxy: str = None, timeout: int = 120, - web_driver: WebDriver = None, + webdriver: WebDriver = None, virtual_display: bool = True, copilot: bool = False, **kwargs ) -> CreateResult: - with WebDriverSession(web_driver, "", virtual_display=virtual_display, proxy=proxy) as driver: + with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py index 32f6366525a..82769ab0dbb 100644 --- a/g4f/Provider/Phind.py +++ b/g4f/Provider/Phind.py @@ -5,7 +5,8 @@ from ..typing import CreateResult, Messages from .base_provider import BaseProvider -from .helper import WebDriver, WebDriverSession, format_prompt +from .helper import format_prompt +from .webdriver import WebDriver, WebDriverSession class Phind(BaseProvider): url = "https://www.phind.com" @@ -21,11 +22,11 @@ def create_completion( stream: bool, proxy: str = None, timeout: int = 120, - web_driver: WebDriver = None, + webdriver: WebDriver = None, creative_mode: bool = None, **kwargs ) -> CreateResult: - with WebDriverSession(web_driver, "", proxy=proxy) as driver: + with WebDriverSession(webdriver, "", proxy=proxy) as driver: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC @@ -34,40 +35,38 @@ def create_completion( driver.get(f"{cls.url}/search?q={prompt}&source=searchbox") # Register fetch hook - driver.execute_script(""" + source = """ window._fetch = window.fetch; -window.fetch = (url, options) => { - // Call parent fetch method - const result = window._fetch(url, options); +window.fetch = async (url, options) => { + const response = await window._fetch(url, options); if (url != "/api/infer/answer") { - return result; + return response; } - // Load response reader - result.then((response) => { - if (!response.body.locked) { - window._reader = response.body.getReader(); - } - }); - // Return dummy response - return new Promise((resolve, reject) => { - resolve(new Response(new ReadableStream())) - }); + copy = response.clone(); + window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); + return copy; } -""") +""" + driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { + "source": source + }) # Need to change settings - if model.startswith("gpt-4") or creative_mode: - wait = WebDriverWait(driver, timeout) + wait = WebDriverWait(driver, timeout) + def open_dropdown(): # Open settings dropdown wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle"))) driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click() # Wait for dropdown toggle wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']"))) - # Enable GPT-4 + if model.startswith("gpt-4") or creative_mode: + # Enable GPT-4 if model.startswith("gpt-4"): + open_dropdown() driver.find_element(By.XPATH, "//button[text()='GPT-4']").click() # Enable creative mode if creative_mode or creative_mode == None: + open_dropdown() driver.find_element(By.ID, "Creative Mode").click() # Submit changes driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click() @@ -78,10 +77,11 @@ def create_completion( chunk = driver.execute_script(""" if(window._reader) { chunk = await window._reader.read(); - if (chunk['done']) return null; - text = (new TextDecoder()).decode(chunk['value']); + if (chunk['done']) { + return null; + } content = ''; - text.split('\\r\\n').forEach((line, index) => { + chunk['value'].split('\\r\\n').forEach((line, index) => { if (line.startsWith('data: ')) { line = line.substring('data: '.length); if (!line.startsWith('')) { diff --git a/g4f/Provider/TalkAi.py b/g4f/Provider/TalkAi.py index 20ba65b583d..0edd9f6b7dc 100644 --- a/g4f/Provider/TalkAi.py +++ b/g4f/Provider/TalkAi.py @@ -4,7 +4,7 @@ from ..typing import CreateResult, Messages from .base_provider import BaseProvider -from .helper import WebDriver, WebDriverSession +from .webdriver import WebDriver, WebDriverSession class TalkAi(BaseProvider): url = "https://talkai.info" @@ -19,10 +19,10 @@ def create_completion( messages: Messages, stream: bool, proxy: str = None, - web_driver: WebDriver = None, + webdriver: WebDriver = None, **kwargs ) -> CreateResult: - with WebDriverSession(web_driver, "", virtual_display=True, proxy=proxy) as driver: + with WebDriverSession(webdriver, "", virtual_display=True, proxy=proxy) as driver: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 03e9ba94a76..2171f0b787b 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -6,7 +6,6 @@ import random import string import secrets -import time from os import path from asyncio import AbstractEventLoop from platformdirs import user_config_dir @@ -21,26 +20,8 @@ firefox, BrowserCookieError ) -try: - from selenium.webdriver.remote.webdriver import WebDriver -except ImportError: - class WebDriver(): - pass -try: - from undetected_chromedriver import Chrome, ChromeOptions -except ImportError: - class Chrome(): - def __init__(): - raise RuntimeError('Please install the "undetected_chromedriver" package') - class ChromeOptions(): - def add_argument(): - pass -try: - from pyvirtualdisplay import Display -except ImportError: - pass -from ..typing import Dict, Messages, Union, Tuple +from ..typing import Dict, Messages from .. import debug # Change event loop policy on windows @@ -135,74 +116,11 @@ def format_prompt(messages: Messages, add_special_tokens=False) -> str: return f"{formatted}\nAssistant:" -def get_browser( - user_data_dir: str = None, - headless: bool = False, - proxy: str = None, - options: ChromeOptions = None -) -> Chrome: - if user_data_dir == None: - user_data_dir = user_config_dir("g4f") - if proxy: - if not options: - options = ChromeOptions() - options.add_argument(f'--proxy-server={proxy}') - return Chrome(options=options, user_data_dir=user_data_dir, headless=headless) - -class WebDriverSession(): - def __init__( - self, - web_driver: WebDriver = None, - user_data_dir: str = None, - headless: bool = False, - virtual_display: bool = False, - proxy: str = None, - options: ChromeOptions = None - ): - self.web_driver = web_driver - self.user_data_dir = user_data_dir - self.headless = headless - self.virtual_display = virtual_display - self.proxy = proxy - self.options = options - - def reopen( - self, - user_data_dir: str = None, - headless: bool = False, - virtual_display: bool = False - ) -> WebDriver: - if user_data_dir == None: - user_data_dir = self.user_data_dir - self.default_driver.quit() - if not virtual_display and self.virtual_display: - self.virtual_display.stop() - self.default_driver = get_browser(user_data_dir, headless, self.proxy) - return self.default_driver - - def __enter__(self) -> WebDriver: - if self.web_driver: - return self.web_driver - if self.virtual_display == True: - self.virtual_display = Display(size=(1920,1080)) - self.virtual_display.start() - self.default_driver = get_browser(self.user_data_dir, self.headless, self.proxy, self.options) - return self.default_driver - - def __exit__(self, exc_type, exc_val, exc_tb): - if self.default_driver: - self.default_driver.close() - time.sleep(0.1) - self.default_driver.quit() - if self.virtual_display: - self.virtual_display.stop() - def get_random_string(length: int = 10) -> str: return ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(length) ) - def get_random_hex() -> str: return secrets.token_hex(16).zfill(32) \ No newline at end of file diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py index 77c029b8517..2c1f6121382 100644 --- a/g4f/Provider/needs_auth/Bard.py +++ b/g4f/Provider/needs_auth/Bard.py @@ -4,7 +4,8 @@ from ...typing import CreateResult, Messages from ..base_provider import BaseProvider -from ..helper import WebDriver, WebDriverSession, format_prompt +from ..helper import format_prompt +from ..webdriver import WebDriver, WebDriverSession class Bard(BaseProvider): url = "https://bard.google.com" @@ -18,13 +19,13 @@ def create_completion( messages: Messages, stream: bool, proxy: str = None, - web_driver: WebDriver = None, + webdriver: WebDriver = None, user_data_dir: str = None, headless: bool = True, **kwargs ) -> CreateResult: prompt = format_prompt(messages) - session = WebDriverSession(web_driver, user_data_dir, headless, proxy=proxy) + session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy) with session as driver: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait @@ -36,8 +37,8 @@ def create_completion( wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) except: # Reopen browser for login - if not web_driver: - driver = session.reopen(headless=False) + if not webdriver: + driver = session.reopen() driver.get(f"{cls.url}/chat") wait = WebDriverWait(driver, 240) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 68c6713befc..59e2da73529 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -59,17 +59,4 @@ async def create_async_generator( break async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response: - response.raise_for_status() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" + response.raise_for_status() \ No newline at end of file diff --git a/g4f/Provider/needs_auth/OpenAssistant.py b/g4f/Provider/needs_auth/OpenAssistant.py index de62636cb8b..e549b517ccd 100644 --- a/g4f/Provider/needs_auth/OpenAssistant.py +++ b/g4f/Provider/needs_auth/OpenAssistant.py @@ -87,15 +87,3 @@ async def create_async_generator( } async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response: response.raise_for_status() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 9fd90812295..8c9dd1e0530 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -6,7 +6,8 @@ from async_property import async_cached_property from ..base_provider import AsyncGeneratorProvider -from ..helper import get_browser, get_event_loop +from ..helper import get_event_loop +from ..webdriver import get_browser from ...typing import AsyncResult, Messages from ...requests import StreamSession @@ -38,7 +39,10 @@ async def create( **kwargs ) -> Response: if prompt: - messages.append({"role": "user", "content": prompt}) + messages.append({ + "role": "user", + "content": prompt + }) generator = cls.create_async_generator( model, messages, @@ -49,12 +53,9 @@ async def create( response_fields=True, **kwargs ) - fields: ResponseFields = await anext(generator) - if "access_token" not in kwargs: - kwargs["access_token"] = cls._access_token return Response( generator, - fields, + await anext(generator), action, messages, kwargs @@ -87,7 +88,6 @@ async def create_async_generator( headers = { "Accept": "text/event-stream", "Authorization": f"Bearer {access_token}", - "Cookie": 'intercom-device-id-dgkjq2bp=0f047573-a750-46c8-be62-6d54b56e7bf0; ajs_user_id=user-iv3vxisaoNodwWpxmNpMfekH; ajs_anonymous_id=fd91be0b-0251-4222-ac1e-84b1071e9ec1; __Host-next-auth.csrf-token=d2b5f67d56f7dd6a0a42ae4becf2d1a6577b820a5edc88ab2018a59b9b506886%7Ce5c33eecc460988a137cbc72d90ee18f1b4e2f672104f368046df58e364376ac; _cfuvid=gt_mA.q6rue1.7d2.AR0KHpbVBS98i_ppfi.amj2._o-1700353424353-0-604800000; cf_clearance=GkHCfPSFU.NXGcHROoe4FantnqmnNcluhTNHz13Tk.M-1700353425-0-1-dfe77f81.816e9bc2.714615da-0.2.1700353425; __Secure-next-auth.callback-url=https%3A%2F%2Fchat.openai.com; intercom-session-dgkjq2bp=UWdrS1hHazk5VXN1c0V5Q1F0VXdCQmsyTU9pVjJMUkNpWnFnU3dKWmtIdGwxTC9wbjZuMk5hcEc0NWZDOGdndS0tSDNiaDNmMEdIL1RHU1dFWDBwOHFJUT09--f754361b91fddcd23a13b288dcb2bf8c7f509e91; _uasid="Z0FBQUFBQmxXVnV0a3dmVno4czRhcDc2ZVcwaUpSNUdZejlDR25YSk5NYTJQQkpyNmRvOGxjTHMyTlAxWmJhaURrMVhjLXZxQXdZeVpBbU1aczA5WUpHT2dwaS1MOWc4MnhyNWFnbGRzeGdJcGFKT0ZRdnBTMVJHcGV2MGNTSnVQY193c0hqUWIycHhQRVF4dENlZ3phcDdZeHgxdVhoalhrZmtZME9NbWhMQjdVR3Vzc3FRRk0ybjJjNWMwTWtIRjdPb19lUkFtRmV2MDVqd1kwWU11QTYtQkdZenEzVHhLMGplY1hZM3FlYUt1cVZaNWFTRldleEJETzJKQjk1VTJScy1GUnMxUVZWMnVxYklxMjdockVZbkZyd1R4U1RtMnA1ZzlSeXphdmVOVk9xeEdrRkVOSjhwTVd1QzFtQjhBcWdDaE92Q1VlM2pwcjFQTXRuLVJNRVlZSGpIdlZ0aGV3PT0="; _dd_s=rum=0&expire=1700356244884; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..3aK6Fbdy2_8f07bf.8eT2xgonrCnz7ySY6qXFsg3kzL6UQfXKAYaw3tyn-6_X9657zy47k9qGvmi9mF0QKozj5jau3_Ca62AQQ7FmeC6Y2F1urtzqrXqwTTsQ2LuzFPIQkx6KKb2DXc8zW2-oyEzJ_EY5yxfLB2RlRkSh3M7bYNZh4_ltEcfkj38s_kIPGMxv34udtPWGWET99MCjkdwQWXylJag4s0fETA0orsBAKnGCyqAUNJbb_D7BYtGSV-MQ925kZMG6Di_QmfO0HQWURDYjmdRNcuy1PT_xJ1DJko8sjL42i4j3RhkNDkhqCIqyYImz2eHFWHW7rYKxTkrBhlCPMS5hRdcCswD7JYPcSBiwnVRYgyOocFGXoFvQgIZ2FX9NiZ3SMEVM1VwIGSE-qH0H2nMa8_iBvsOgOWJgKjVAvzzyzZvRVDUUHzJrikSFPNONVDU3h-04c1kVL4qIu9DfeTPN7n8AvNmYwMbro0L9-IUAeXNo4-pwF0Kt-AtTsamqWvMqnK4O_YOyLnDDlvkmnOvDC2d5uinwlQIxr6APO6qFfGLlHiLZemKoekxEE1Fx70dl-Ouhk1VIzbF3OC6XNNxeBm9BUYUiHdL0wj2H9rHgX4cz6ZmS_3VTgpD6UJh-evu5KJ2gIvjYmVbyzEN0aPNDxfvBaOm-Ezpy4bUJ2bUrOwNn-0knWkDiTvjYmNhCyefPCtCF6rpKNay8PCw_yh79C4SdEP6Q4V7LI0Tvdi5uz7kLCiBC4AT9L0ao1WDX03mkUOpjvzHDvPLmj8chW3lTVm_kA0eYGQY4wT0jzleWlfV0Q8rB2oYECNLWksA3F1zlGfcl4lQjprvTXRePkvAbMpoJEsZD3Ylq7-foLDLk4-M2LYAFZDs282AY04sFjAjQBxTELFCCuDgTIgTXSIskY_XCxpVXDbdLlbCJY7XVK45ybwtfqwlKRp8Mo0B131uQAFc-migHaUaoGujxJJk21bP8F0OmhNYHBo4FQqE1rQm2JH5bNM7txKeh5KXdJgVUVbRSr7OIp_OF5-Bx_v9eRBGAIDkue26E2-O8Rnrp5zQ5TnvecQLDaUzWavCLPwsZ0_gsOLBxNOmauNYZtF8IElCsQSFDdhoiMxXsYUm4ZYKEAy3GWq8HGTAvBhNkh1hvnI7y-d8-DOaZf_D_D98-olZfm-LUkeosLNpPB9rxYMqViCiW3KrXE9Yx0wlFm5ePKaVvR7Ym_EPhSOhJBKFPCvdTdMZSNPUcW0ZJBVByq0A9sxD51lYq3gaFyqh94S4s_ox182AQ3szGzHkdgLcnQmJG9OYvKxAVcd43eg6_gODAYhx02GjbMw-7JTAhyXSeCrlMteHyOXl8hai-3LilC3PmMzi7Vbu49dhF1s4LcVlUowen5ira44rQQaB26mdaOUoQfodgt66M3RTWGPXyK1Nb72AzSXsCKyaQPbzeb6cN0fdGSdG4ktwvR04eFNEkquo_3aKu2GmUKTD0XcRx9dYrfXjgY-X1DDTVs1YND2gRhdx7FFEeBVjtbj2UqmG3Rvd4IcHGe7OnYWw2MHDcol68SsR1KckXWwWREz7YTGUnDB2M1kx_H4W2mjclytnlHOnYU3RflegRPeSTbdzUZJvGKXCCz45luHkQWN_4DExE76D-9YqbFIz-RY5yL4h-Zs-i2xjm2K-4xCMM9nQIOqhLMqixIZQ2ldDAidKoYtbs5ppzbcBLyrZM96bq9DwRBY3aacqWdlRd-TfX0wv5KO4fo0sSh5FsuhuN0zcEV_NNXgqIEM_p14EcPqgbrAvCBQ8os70TRBQLXiF0EniSofGjxwF8kQvUk3C6Wfc8cTTeN-E6GxCVTn91HBwA1iSEZlRLMVb8_BcRJNqwbgnb_07jR6-eo42u88CR3KQdAWwbQRdMxsURFwZ0ujHXVGG0Ll6qCFBcHXWyDO1x1yHdHnw8_8yF26pnA2iPzrFR-8glMgIA-639sLuGAxjO1_ZuvJ9CAB41Az9S_jaZwaWy215Hk4-BRYD-MKmHtonwo3rrxhE67WJgbbu14efsw5nT6ow961pffgwXov5VA1Rg7nv1E8RvQOx7umWW6o8R4W6L8f2COsmPTXfgwIjoJKkjhUqAQ8ceG7cM0ET-38yaC0ObU8EkXfdGGgxI28qTEZWczG66_iM4hw7QEGCY5Cz2kbO6LETAiw9OsSigtBvDS7f0Ou0bZ41pdK7G3FmvdZAnjWPjObnDF4k4uWfn7mzt0fgj3FyqK20JezRDyGuAbUUhOvtZpc9sJpzxR34eXEZTouuALrHcGuNij4z6rx51FrQsaMtiup8QVrhtZbXtKLMYnWYSbkhuTeN2wY-xV1ZUsQlakIZszzGF7kuIG87KKWMpuPMvbXjz6Pp_gWJiIC6aQuk8xl5g0iBPycf_6Q-MtpuYxzNE2TpI1RyR9mHeXmteoRzrFiWp7yEC-QGNFyAJgxTqxM3CjHh1Jt6IddOsmn89rUo1dZM2Smijv_fbIv3avXLkIPX1KZjILeJCtpU0wAdsihDaRiRgDdx8fG__F8zuP0n7ziHas73cwrfg-Ujr6DhC0gTNxyd9dDA_oho9N7CQcy6EFmfNF2te7zpLony0859jtRv2t1TnpzAa1VvMK4u6mXuJ2XDo04_6GzLO3aPHinMdl1BcIAWnqAqWAu3euGFLTHOhXlfijut9N1OCifd_zWjhVtzlR39uFeCQBU5DyQArzQurdoMx8U1ETsnWgElxGSStRW-YQoPsAJ87eg9trqKspFpTVlAVN3t1GtoEAEhcwhe81SDssLmKGLc.7PqS6jRGTIfgTPlO7Ognvg; __cf_bm=VMWoAKEB45hQSwxXtnYXcurPaGZDJS4dMi6dIMFLwdw-1700355394-0-ATVsbq97iCaTaJbtYr8vtg1Zlbs3nLrJLKVBHYa2Jn7hhkGclqAy8Gbyn5ePEhDRqj93MsQmtayfYLqY5n4WiLY=; __cflb=0H28vVfF4aAyg2hkHFH9CkdHRXPsfCUf6VpYf2kz3RX' } async with StreamSession( proxies={"https": proxy}, @@ -95,24 +95,22 @@ async def create_async_generator( headers=headers, timeout=timeout ) as session: - data = { - "action": action, - "arkose_token": await get_arkose_token(proxy, timeout), - "conversation_id": conversation_id, - "parent_message_id": parent_id, - "model": models[model], - "history_and_training_disabled": history_disabled and not auto_continue, - } - if action != "continue": - data["messages"] = [{ - "id": str(uuid.uuid4()), - "author": {"role": "user"}, - "content": {"content_type": "text", "parts": [messages[-1]["content"]]}, - }] - first = True end_turn = EndTurn() - while first or auto_continue and not end_turn.is_end: - first = False + while not end_turn.is_end: + data = { + "action": action, + "arkose_token": await get_arkose_token(proxy, timeout), + "conversation_id": conversation_id, + "parent_message_id": parent_id, + "model": models[model], + "history_and_training_disabled": history_disabled and not auto_continue, + } + if action != "continue": + data["messages"] = [{ + "id": str(uuid.uuid4()), + "author": {"role": "user"}, + "content": {"content_type": "text", "parts": [messages[-1]["content"]]}, + }] async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response: try: response.raise_for_status() @@ -120,43 +118,38 @@ async def create_async_generator( raise RuntimeError(f"Error {response.status_code}: {await response.text()}") last_message = 0 async for line in response.iter_lines(): - if line.startswith(b"data: "): - line = line[6:] - if line == b"[DONE]": - break - try: - line = json.loads(line) - except: - continue - if "message" not in line: - continue - if "error" in line and line["error"]: - raise RuntimeError(line["error"]) - if "message_type" not in line["message"]["metadata"]: - continue - if line["message"]["author"]["role"] != "assistant": - continue - if line["message"]["metadata"]["message_type"] in ("next", "continue", "variant"): - conversation_id = line["conversation_id"] - parent_id = line["message"]["id"] - if response_fields: - response_fields = False - yield ResponseFields(conversation_id, parent_id, end_turn) - new_message = line["message"]["content"]["parts"][0] - yield new_message[last_message:] - last_message = len(new_message) - if "finish_details" in line["message"]["metadata"]: - if line["message"]["metadata"]["finish_details"]["type"] == "max_tokens": - end_turn.end() - - data = { - "action": "continue", - "arkose_token": await get_arkose_token(proxy, timeout), - "conversation_id": conversation_id, - "parent_message_id": parent_id, - "model": models[model], - "history_and_training_disabled": False, - } + if not line.startswith(b"data: "): + continue + line = line[6:] + if line == b"[DONE]": + break + try: + line = json.loads(line) + except: + continue + if "message" not in line: + continue + if "error" in line and line["error"]: + raise RuntimeError(line["error"]) + if "message_type" not in line["message"]["metadata"]: + continue + if line["message"]["author"]["role"] != "assistant": + continue + if line["message"]["metadata"]["message_type"] in ("next", "continue", "variant"): + conversation_id = line["conversation_id"] + parent_id = line["message"]["id"] + if response_fields: + response_fields = False + yield ResponseFields(conversation_id, parent_id, end_turn) + new_message = line["message"]["content"]["parts"][0] + yield new_message[last_message:] + last_message = len(new_message) + if "finish_details" in line["message"]["metadata"]: + if line["message"]["metadata"]["finish_details"]["type"] == "stop": + end_turn.end() + if not auto_continue: + break + action = "continue" await asyncio.sleep(5) @classmethod @@ -167,7 +160,7 @@ def browse() -> str: from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC - driver = get_browser("~/openai", proxy=proxy) + driver = get_browser(proxy=proxy) except ImportError: return try: @@ -193,18 +186,6 @@ async def get_access_token(cls, proxy: str = None) -> str: raise RuntimeError("Read access token failed") return cls._access_token - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("access_token", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" async def get_arkose_token(proxy: str = None, timeout: int = None) -> str: config = { @@ -293,7 +274,7 @@ async def do_continue(self, **kwargs) -> Response: async def variant(self, **kwargs) -> Response: if self.action != "next": - raise RuntimeError("Can't create variant with continue or variant request.") + raise RuntimeError("Can't create variant from continue or variant request.") return await OpenaiChat.create( **self._options, messages=self._messages, diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py index 1c8c97d74db..99f6945b3bc 100644 --- a/g4f/Provider/needs_auth/Poe.py +++ b/g4f/Provider/needs_auth/Poe.py @@ -4,7 +4,8 @@ from ...typing import CreateResult, Messages from ..base_provider import BaseProvider -from ..helper import WebDriver, WebDriverSession, format_prompt +from ..helper import format_prompt +from ..webdriver import WebDriver, WebDriverSession models = { "meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"}, @@ -33,7 +34,7 @@ def create_completion( messages: Messages, stream: bool, proxy: str = None, - web_driver: WebDriver = None, + webdriver: WebDriver = None, user_data_dir: str = None, headless: bool = True, **kwargs @@ -44,7 +45,7 @@ def create_completion( raise ValueError(f"Model are not supported: {model}") prompt = format_prompt(messages) - session = WebDriverSession(web_driver, user_data_dir, headless, proxy=proxy) + session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy) with session as driver: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait @@ -80,8 +81,8 @@ class ProxiedWebSocket extends WebSocket { wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']"))) except: # Reopen browser for login - if not web_driver: - driver = session.reopen(headless=False) + if not webdriver: + driver = session.reopen() driver.get(f"{cls.url}/{models[model]['name']}") wait = WebDriverWait(driver, 240) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']"))) diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py index 4570fd9fad9..d7be98ac8ee 100644 --- a/g4f/Provider/needs_auth/Raycast.py +++ b/g4f/Provider/needs_auth/Raycast.py @@ -60,18 +60,3 @@ def create_completion( token = completion_chunk['text'] if token != None: yield token - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("top_p", "int"), - ("model", "str"), - ("auth", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py index cf33f0c6579..49ee174b8c0 100644 --- a/g4f/Provider/needs_auth/Theb.py +++ b/g4f/Provider/needs_auth/Theb.py @@ -4,7 +4,8 @@ from ...typing import CreateResult, Messages from ..base_provider import BaseProvider -from ..helper import WebDriver, WebDriverSession, format_prompt +from ..helper import format_prompt +from ..webdriver import WebDriver, WebDriverSession models = { "theb-ai": "TheB.AI", @@ -44,14 +45,14 @@ def create_completion( messages: Messages, stream: bool, proxy: str = None, - web_driver: WebDriver = None, + webdriver: WebDriver = None, virtual_display: bool = True, **kwargs ) -> CreateResult: if model in models: model = models[model] prompt = format_prompt(messages) - web_session = WebDriverSession(web_driver, virtual_display=virtual_display, proxy=proxy) + web_session = WebDriverSession(webdriver, virtual_display=virtual_display, proxy=proxy) with web_session as driver: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait @@ -61,22 +62,16 @@ def create_completion( # Register fetch hook script = """ window._fetch = window.fetch; -window.fetch = (url, options) => { +window.fetch = async (url, options) => { // Call parent fetch method - const result = window._fetch(url, options); + const response = await window._fetch(url, options); if (!url.startsWith("/api/conversation")) { return result; } - // Load response reader - result.then((response) => { - if (!response.body.locked) { - window._reader = response.body.getReader(); - } - }); - // Return dummy response - return new Promise((resolve, reject) => { - resolve(new Response(new ReadableStream())) - }); + // Copy response + copy = response.clone(); + window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); + return copy; } window._last_message = ""; """ @@ -97,7 +92,6 @@ def create_completion( wait = WebDriverWait(driver, 240) wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize"))) - time.sleep(200) try: driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click() @@ -134,9 +128,8 @@ def create_completion( if (chunk['done']) { return null; } - text = (new TextDecoder()).decode(chunk['value']); message = ''; - text.split('\\r\\n').forEach((line, index) => { + chunk['value'].split('\\r\\n').forEach((line, index) => { if (line.startsWith('data: ')) { try { line = JSON.parse(line.substring('data: '.length)); diff --git a/g4f/Provider/webdriver.py b/g4f/Provider/webdriver.py new file mode 100644 index 00000000000..da3b13ed762 --- /dev/null +++ b/g4f/Provider/webdriver.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import time +from platformdirs import user_config_dir +try: + from selenium.webdriver.remote.webdriver import WebDriver +except ImportError: + class WebDriver(): + pass +try: + from undetected_chromedriver import Chrome, ChromeOptions +except ImportError: + class Chrome(): + def __init__(): + raise RuntimeError('Please install the "undetected_chromedriver" package') + class ChromeOptions(): + def add_argument(): + pass +try: + from pyvirtualdisplay import Display + has_pyvirtualdisplay = True +except ImportError: + has_pyvirtualdisplay = False + +def get_browser( + user_data_dir: str = None, + headless: bool = False, + proxy: str = None, + options: ChromeOptions = None +) -> Chrome: + if user_data_dir == None: + user_data_dir = user_config_dir("g4f") + if proxy: + if not options: + options = ChromeOptions() + options.add_argument(f'--proxy-server={proxy}') + return Chrome(options=options, user_data_dir=user_data_dir, headless=headless) + +class WebDriverSession(): + def __init__( + self, + webdriver: WebDriver = None, + user_data_dir: str = None, + headless: bool = False, + virtual_display: bool = False, + proxy: str = None, + options: ChromeOptions = None + ): + self.webdriver = webdriver + self.user_data_dir = user_data_dir + self.headless = headless + self.virtual_display = None + if has_pyvirtualdisplay and virtual_display: + self.virtual_display = Display(size=(1920,1080)) + self.proxy = proxy + self.options = options + self.default_driver = None + + def reopen( + self, + user_data_dir: str = None, + headless: bool = False, + virtual_display: bool = False + ) -> WebDriver: + if user_data_dir == None: + user_data_dir = self.user_data_dir + if self.default_driver: + self.default_driver.quit() + if not virtual_display and self.virtual_display: + self.virtual_display.stop() + self.virtual_display = None + self.default_driver = get_browser(user_data_dir, headless, self.proxy) + return self.default_driver + + def __enter__(self) -> WebDriver: + if self.webdriver: + return self.webdriver + if self.virtual_display: + self.virtual_display.start() + self.default_driver = get_browser(self.user_data_dir, self.headless, self.proxy, self.options) + return self.default_driver + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.default_driver: + try: + self.default_driver.close() + except: + pass + time.sleep(0.1) + self.default_driver.quit() + if self.virtual_display: + self.virtual_display.stop() \ No newline at end of file From ba9d5ed6bacee835d597c76f528b790be33c8010 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 20 Nov 2023 14:02:12 +0100 Subject: [PATCH 04/21] Add translate readme module Translate readme to german --- README-DE.md | 726 +++++++++++++++++++++++++++++++++++ README.md | 25 +- etc/tool/translate_readme.py | 88 +++++ 3 files changed, 826 insertions(+), 13 deletions(-) create mode 100644 README-DE.md create mode 100644 etc/tool/translate_readme.py diff --git a/README-DE.md b/README-DE.md new file mode 100644 index 00000000000..a27fdccbdbc --- /dev/null +++ b/README-DE.md @@ -0,0 +1,726 @@ + + Open in EN + + +![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9) + +Kauf mir einen Kaffee auf ko-fi.com + +
+ +> Durch die Nutzung dieses Repositories oder jeglichen damit verbundenen Code stimmen Sie dem [Rechtshinweis](LEGAL_NOTICE.md) zu. Der Autor ist nicht verantwortlich für Kopien, Forks, erneute Uploads durch andere Benutzer oder sonstige mit GPT4Free verbundene Aktivitäten. Dies ist das einzige Konto und Repository des Autors. Um Identitätsdiebstahl oder unverantwortliche Handlungen zu verhindern, halten Sie sich bitte an die GNU GPL-Lizenz, die dieses Repository verwendet. + +```sh +pip install -U g4f +``` + + +## 🆕 Was gibt es Neues + +- Tritt unserem Telegram-Kanal bei: [t.me/g4f_channel](https://telegram.me/g4f_channel) +- Tritt unserer Discord-Gruppe bei: [discord.gg/XfybzPXPH5](https://discord.gg/XfybzPXPH5) +- Erkunde die g4f-Dokumentation (unvollständig): [g4f.mintlify.app](https://g4f.mintlify.app) | Trage zur Dokumentation bei: [github.com/xtekky/gpt4free-docs](https://github.com/xtekky/gpt4free-docs) + + +## 📚 Inhaltsverzeichnis + +- [🆕 Was ist neu](#-was-ist-neu) +- [📚 Inhaltsverzeichnis](#-inhaltsverzeichnis) +- [🛠️ Erste Schritte](#️-erste-schritte) + - [Voraussetzungen:](#voraussetzungen) + - [Projekt einrichten:](#projekt-einrichten) + - [Installation über PyPi](#installation-über-pypi) + - [oder](#oder) + - [Einrichten mit Docker:](#einrichten-mit-docker) +- [💡 Verwendung](#-verwendung) + - [Das `g4f` Paket](#das-g4f-paket) + - [ChatCompletion](#chatcompletion) + - [Vervollständigung](#vervollständigung) + - [Anbieter](#anbieter) + - [Cookies erforderlich](#cookies-erforderlich) + - [Async-Unterstützung](#async-unterstützung) + - [Proxy- und Timeout-Unterstützung](#proxy-und-timeout-unterstützung) + - [Interference openai-proxy API (Verwendung mit openai Python-Paket)](#interference-openai-proxy-api-verwendung-mit-openai-python-paket) + - [API von PyPi-Paket ausführen](#api-von-pypi-paket-ausführen) + - [API von Repository ausführen](#api-von-repository-ausführen) +- [🚀 Anbieter und Modelle](#-anbieter-und-modelle) + - [GPT-4](#gpt-4) + - [GPT-3.5](#gpt-35) + - [Andere](#andere) + - [Modelle](#modelle) +- [🔗 Verwandte GPT4Free-Projekte](#-verwandte-gpt4free-projekte) +- [🤝 Mitwirken](#-mitwirken) + - [Anbieter mit KI-Tool erstellen](#anbieter-mit-ki-tool-erstellen) + - [Anbieter erstellen](#anbieter-erstellen) +- [🙌 Mitwirkende](#-mitwirkende) +- [©️ Urheberrecht](#️-urheberrecht) +- [⭐ Sternenhistorie](#-sternenhistorie) +- [📄 Lizenz](#-lizenz) + + +## 🛠️ Erste Schritte + +#### Voraussetzungen: + +1. [Python herunterladen und installieren](https://www.python.org/downloads/) (Version 3.10+ wird empfohlen). + +#### Projekt einrichten: + +##### Installation über pypi + +``` +pip install -U g4f +``` + +##### oder + +1. Klonen Sie das GitHub-Repository: + +``` +git clone https://github.com/xtekky/gpt4free.git +``` + +2. Navigieren Sie zum Projektverzeichnis: + +``` +cd gpt4free +``` + +3. (Empfohlen) Erstellen Sie eine Python-Virtual-Umgebung: +Sie können der [Python-Offiziellen Dokumentation](https://docs.python.org/3/tutorial/venv.html) für virtuelle Umgebungen folgen. + +``` +python3 -m venv venv +``` + +4. Aktivieren Sie die virtuelle Umgebung: + - Unter Windows: + ``` + .\venv\Scripts\activate + ``` + - Unter macOS und Linux: + ``` + source venv/bin/activate + ``` +5. Installieren Sie die erforderlichen Python-Pakete aus `requirements.txt`: + +``` +pip install -r requirements.txt +``` + +6. Erstellen Sie eine Datei `test.py` im Stammverzeichnis und beginnen Sie mit der Verwendung des Repositories. Weitere Anweisungen finden Sie unten + +```py +import g4f + +... +``` + +##### Einrichten mit Docker: + +Wenn Docker installiert ist, können Sie das Projekt ohne manuelle Installation von Abhängigkeiten einfach einrichten und ausführen. + +1. Stellen Sie zunächst sicher, dass sowohl Docker als auch Docker Compose installiert sind. + + - [Docker installieren](https://docs.docker.com/get-docker/) + - [Docker Compose installieren](https://docs.docker.com/compose/install/) + +2. Klonen Sie das GitHub-Repo: + +```bash +git clone https://github.com/xtekky/gpt4free.git +``` + +3. Navigieren Sie zum Projektverzeichnis: + +```bash +cd gpt4free +``` + +4. Erstellen Sie das Docker-Image: + +```bash +docker-compose build +``` + +5. Starten Sie den Dienst mit Docker Compose: + +```bash +docker-compose up +``` + +Ihr Server wird jetzt unter `http://localhost:1337` ausgeführt. Sie können mit der API interagieren oder Ihre Tests wie gewohnt ausführen. + +Um die Docker-Container zu stoppen, führen Sie einfach aus: + +```bash +docker-compose down +``` + +> [!Note] +> Wenn Sie Docker verwenden, werden alle Änderungen, die Sie an Ihren lokalen Dateien vornehmen, im Docker-Container durch die Volumenabbildung in der `docker-compose.yml`-Datei widergespiegelt. Wenn Sie jedoch Abhängigkeiten hinzufügen oder entfernen, müssen Sie das Docker-Image mit `docker-compose build` neu erstellen. + + +## 💡 Verwendung + +### Das `g4f` Paket + +#### ChatCompletion + +```python +import g4f + +g4f.debug.logging = True # Aktiviere das Protokollieren +g4f.check_version = False # Deaktiviere die automatische Versionsüberprüfung +print(g4f.version) # Überprüfe die Version +print(g4f.Provider.Ails.params) # Unterstützte Argumente + +# Automatische Auswahl des Anbieters + +# Gestreamte Vervollständigung +response = g4f.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Hallo"}], + stream=True, +) + +for message in response: + print(message, flush=True, end='') + +# Normale Antwort +response = g4f.ChatCompletion.create( + model=g4f.models.gpt_4, + messages=[{"role": "user", "content": "Hallo"}], +) # Alternative Modellkonfiguration + +print(response) +``` + +##### Vervollständigung + +```python +import g4f + +erlaubte_modelle = [ + 'code-davinci-002', + 'text-ada-001', + 'text-babbage-001', + 'text-curie-001', + 'text-davinci-002', + 'text-davinci-003' +] + +response = g4f.Completion.create( + model='text-davinci-003', + prompt='sage, dass dies ein Test ist' +) + +print(response) +``` + +##### Anbieter + +```python +import g4f + +from g4f.Provider import ( + AItianhu, + Aichat, + Bard, + Bing, + ChatBase, + ChatgptAi, + OpenaiChat, + Vercel, + You, + Yqcloud, +) + +# Festlegen des Anbieters +response = g4f.ChatCompletion.create( + model="gpt-3.5-turbo", + provider=g4f.Provider.Aichat, + messages=[{"role": "user", "content": "Hallo"}], + stream=True, +) + +for message in response: + print(message) +``` + +##### Verwendung des Browsers + +Einige Anbieter verwenden einen Browser, um den Bot-Schutz zu umgehen. +Sie verwenden den Selenium-Webtreiber, um den Browser zu steuern. +Die Browsereinstellungen und die Anmeldedaten werden in einem benutzerdefinierten Verzeichnis gespeichert. +Wenn der Headless-Modus aktiviert ist, werden die Browserfenster unsichtbar geladen. +Aus Leistungsgründen wird empfohlen, die Browserinstanzen wiederzuverwenden +und sie am Ende selbst zu schließen: + +```python +import g4f +from undetected_chromedriver import Chrome, ChromeOptions +from g4f.Provider import ( + Bard, + Poe, + AItianhuSpace, + MyShell, + Phind, + PerplexityAi, +) + +options = ChromeOptions() +options.add_argument("--incognito") +browser = Chrome(options=options, headless=True) +for idx in range(10): + response = g4f.ChatCompletion.create( + model=g4f.models.default, + provider=g4f.Provider.Phind, + messages=[{"role": "user", "content": "Schlage mir einen Namen vor."}], + browser=browser + ) + print(f"{idx}:", response) +browser.quit() +``` + +##### Erforderliche Cookies + +Cookies sind für die ordnungsgemäße Funktion einiger Dienstanbieter unerlässlich. Es ist unerlässlich, eine aktive Sitzung aufrechtzuerhalten, die in der Regel durch das Anmelden in Ihrem Konto erreicht wird. + +Wenn Sie das g4f-Paket lokal ausführen, ruft das Paket automatisch Cookies aus Ihrem Webbrowser ab, indem es die `get_cookies`-Funktion verwendet. Wenn Sie es jedoch nicht lokal ausführen, müssen Sie die Cookies manuell bereitstellen, indem Sie sie als Parameter unter Verwendung des `cookies`-Parameters übergeben. + +```python +import g4f + +from g4f.Provider import ( + Bing, + HuggingChat, + OpenAssistant, +) + +# Verwendung +response = g4f.ChatCompletion.create( + model=g4f.models.default, + messages=[{"role": "user", "content": "Hallo"}], + provider=Bing, + #cookies=g4f.get_cookies(".google.com"), + cookies={"cookie_name": "value", "cookie_name2": "value2"}, + auth=True +) +``` + +##### Unterstützung für asynchrone Ausführung + +Um die Geschwindigkeit und Gesamtleistung zu verbessern, führen Sie Anbieter asynchron aus. Die Gesamtausführungszeit wird durch die Dauer der langsamsten Anbieterausführung bestimmt. + +```python +import g4f +import asyncio + +_providers = [ + g4f.Provider.Aichat, + g4f.Provider.ChatBase, + g4f.Provider.Bing, + g4f.Provider.GptGo, + g4f.Provider.You, + g4f.Provider.Yqcloud, +] + +async def run_provider(provider: g4f.Provider.BaseProvider): + try: + response = await g4f.ChatCompletion.create_async( + model=g4f.models.default, + messages=[{"role": "user", "content": "Hallo"}], + provider=provider, + ) + print(f"{provider.__name__}:", response) + except Exception as e: + print(f"{provider.__name__}:", e) + +async def run_all(): + calls = [ + run_provider(provider) for provider in _providers + ] + await asyncio.gather(*calls) + +asyncio.run(run_all()) +``` + +##### Unterstützung für Proxy und Timeout + +Alle Anbieter unterstützen das Angeben eines Proxy und das Erhöhen des Timeouts in den Erstellungsfunktionen. + +```python +import g4f + +response = g4f.ChatCompletion.create( + model=g4f.models.default, + messages=[{"role": "user", "content": "Hallo"}], + proxy="http://host:port", + # oder socks5://user:pass@host:port + timeout=120, # in Sekunden +) + +print(f"Ergebnis:", response) +``` + +### Interference openai-proxy API (Verwendung mit dem openai Python-Paket) + +#### Führen Sie die Interference API aus dem PyPi-Paket aus + +```python +from g4f.api import run_api + +run_api() +``` + +#### Führen Sie die Interference API aus dem Repository aus + +Wenn Sie die Einbettungsfunktion verwenden möchten, benötigen Sie einen Hugging Face-Token. Sie können einen unter [Hugging Face Tokens](https://huggingface.co/settings/tokens) erhalten. Stellen Sie sicher, dass Ihre Rolle auf Schreiben eingestellt ist. Wenn Sie Ihren Token haben, verwenden Sie ihn einfach anstelle des OpenAI-API-Schlüssels. + +Server ausführen: + +```sh +g4f api +``` + +oder + +```sh +python -m g4f.api +``` + +```python +import openai + +# Setzen Sie Ihren Hugging Face-Token als API-Schlüssel, wenn Sie Einbettungen verwenden +# Wenn Sie keine Einbettungen verwenden, lassen Sie es leer +openai.api_key = "IHR_HUGGING_FACE_TOKEN" # Ersetzen Sie dies durch Ihren tatsächlichen Token + +# Setzen Sie die API-Basis-URL, falls erforderlich, z.B. für eine lokale Entwicklungsumgebung +openai.api_base = "http://localhost:1337/v1" + +def main(): + chat_completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "schreibe ein Gedicht über einen Baum"}], + stream=True, + ) + + if isinstance(chat_completion, dict): + # Nicht gestreamt + print(chat_completion.choices[0].message.content) + else: + # Gestreamt + for token in chat_completion: + content = token["choices"][0]["delta"].get("content") + if content is not None: + print(content, end="", flush=True) + +if __name__ == "__main__": + main() +``` + +## 🚀 Anbieter und Modelle + +### GPT-4 + +| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth | +| ------ | ------- | ------- | ----- | ------ | ------ | ---- | +| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [chat.geekgpt.org](https://chat.geekgpt.org) | `g4f.Provider.GeekGpt` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔️ | ✔️ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [www.phind.com](https://www.phind.com) | `g4f.Provider.Phind` | ❌ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | + +### GPT-3.5 + +| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth | +| ------ | ------- | ------- | ----- | ------ | ------ | ---- | +| [www.aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [e.aiask.me](https://e.aiask.me) | `g4f.Provider.AiAsk` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [chat-gpt.org](https://chat-gpt.org/chat) | `g4f.Provider.Aichat` | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [www.chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [chat-shared2.zhile.io](https://chat-shared2.zhile.io) | `g4f.Provider.FakeGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [freegpts1.aifree.site](https://freegpts1.aifree.site/) | `g4f.Provider.FreeGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [gptalk.net](https://gptalk.net) | `g4f.Provider.GPTalk` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [hashnode.com](https://hashnode.com) | `g4f.Provider.Hashnode` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [noowai.com](https://noowai.com) | `g4f.Provider.NoowAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [chat.openai.com](https://chat.openai.com) | `g4f.Provider.OpenaiChat` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | +| [theb.ai](https://theb.ai) | `g4f.Provider.Theb` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | +| [sdk.vercel.ai](https://sdk.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [chat9.yqcloud.top](https://chat9.yqcloud.top/) | `g4f.Provider.Yqcloud` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [chat.acytoo.com](https://chat.acytoo.com) | `g4f.Provider.Acytoo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [aibn.cc](https://aibn.cc) | `g4f.Provider.Aibn` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [ai.ls](https://ai.ls) | `g4f.Provider.Ails` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [chat.chatgptdemo.net](https://chat.chatgptdemo.net) | `g4f.Provider.ChatgptDemo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [chatgptduo.com](https://chatgptduo.com) | `g4f.Provider.ChatgptDuo` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [cromicle.top](https://cromicle.top) | `g4f.Provider.Cromicle` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [opchatgpts.net](https://opchatgpts.net) | `g4f.Provider.Opchatgpts` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | +| [chat.ylokh.xyz](https://chat.ylokh.xyz) | `g4f.Provider.Ylokh` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | + +### Andere + +| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth | +| ------ | ------- | ------- | ----- | ------ | ------ | ---- | +| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | +| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | +| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | +| [www.llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama2` | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | +| [open-assistant.io](https://open-assistant.io/chat) | `g4f.Provider.OpenAssistant` | ❌ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ | + +### Modelle + +| Model | Base Provider | Provider | Website | +| --------------------------------------- | ------------- | ------------------- | ------------------------------------------- | +| palm | Google | g4f.Provider.Bard | [bard.google.com](https://bard.google.com/) | +| h2ogpt-gm-oasst1-en-2048-falcon-7b-v3 | Hugging Face | g4f.Provider.H2o | [www.h2o.ai](https://www.h2o.ai/) | +| h2ogpt-gm-oasst1-en-2048-falcon-40b-v1 | Hugging Face | g4f.Provider.H2o | [www.h2o.ai](https://www.h2o.ai/) | +| h2ogpt-gm-oasst1-en-2048-open-llama-13b | Hugging Face | g4f.Provider.H2o | [www.h2o.ai](https://www.h2o.ai/) | +| claude-instant-v1 | Anthropic | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| claude-v1 | Anthropic | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| claude-v2 | Anthropic | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| command-light-nightly | Cohere | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| command-nightly | Cohere | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| gpt-neox-20b | Hugging Face | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| oasst-sft-1-pythia-12b | Hugging Face | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| oasst-sft-4-pythia-12b-epoch-3.5 | Hugging Face | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| santacoder | Hugging Face | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| bloom | Hugging Face | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| flan-t5-xxl | Hugging Face | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| code-davinci-002 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| gpt-3.5-turbo-16k | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| gpt-3.5-turbo-16k-0613 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| gpt-4-0613 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| text-ada-001 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| text-babbage-001 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| text-curie-001 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| text-davinci-002 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| text-davinci-003 | OpenAI | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| llama13b-v2-chat | Replicate | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | +| llama7b-v2-chat | Replicate | g4f.Provider.Vercel | [sdk.vercel.ai](https://sdk.vercel.ai/) | + + +## 🔗 Verwandte GPT4Free-Projekte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
🎁 Projects⭐ Stars📚 Forks🛎 Issues📬 Pull requests
gpt4freeStarsForksIssuesPull Requests
gpt4free-tsStarsForksIssuesPull Requests
Free AI API's & Potential Providers ListStarsForksIssuesPull Requests
ChatGPT-CloneStarsForksIssuesPull Requests
ChatGpt Discord BotStarsForksIssuesPull Requests
Nyx-Bot (Discord)StarsForksIssuesPull Requests
LangChain gpt4freeStarsForksIssuesPull Requests
ChatGpt Telegram BotStarsForksIssuesPull Requests
ChatGpt Line BotStarsForksIssuesPull Requests
Action Translate ReadmeStarsForksIssuesPull Requests
Langchain Document GPTStarsForksIssuesPull Requests
+ + + +## 🤝 Mitwirken + +#### Erstellen Sie einen Anbieter mit AI-Tool + +Rufen Sie im Terminal das Skript `create_provider.py` auf: +```bash +python etc/tool/create_provider.py +``` +1. Geben Sie Ihren Namen für den neuen Anbieter ein. +2. Kopieren Sie den `cURL`-Befehl aus den Entwicklertools Ihres Browsers und fügen Sie ihn ein. +3. Lassen Sie die KI den Anbieter für Sie erstellen. +4. Passen Sie den Anbieter nach Ihren Bedürfnissen an. + +#### Anbieter erstellen + +1. Überprüfen Sie die aktuelle [Liste potenzieller Anbieter](https://github.com/zukixa/cool-ai-stuff#ai-chat-websites) oder finden Sie Ihre eigene Anbieterquelle! +2. Erstellen Sie eine neue Datei in [g4f/Provider](./g4f/Provider) mit dem Namen des Anbieters. +3. Implementieren Sie eine Klasse, die von [BaseProvider](./g4f/Provider/base_provider.py) erbt. + +```py +from __future__ import annotations + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider + +class HogeService(AsyncGeneratorProvider): + url = "https://chat-gpt.com" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + yield "" +``` + +4. Hier können Sie die Einstellungen anpassen, zum Beispiel, wenn die Website Streaming unterstützt, setzen Sie `supports_stream` auf `True`... +5. Schreiben Sie Code, um den Anbieter in `create_async_generator` anzufordern und die Antwort mit `yield` zurückzugeben, selbst wenn es sich um eine einmalige Antwort handelt. Zögern Sie nicht, sich bei anderen Anbietern inspirieren zu lassen. +6. Fügen Sie den Namen des Anbieters in [`g4f/Provider/__init__.py`](./g4f/Provider/__init__.py) hinzu. + +```py +from .HogeService import HogeService + +__all__ = [ + HogeService, +] +``` + +7. Sie sind fertig! Testen Sie den Anbieter, indem Sie ihn aufrufen: + +```py +import g4f + +response = g4f.ChatCompletion.create(model='gpt-3.5-turbo', provider=g4f.Provider.PROVIDERNAME, + messages=[{"role": "user", "content": "test"}], stream=g4f.Provider.PROVIDERNAME.supports_stream) + +for message in response: + print(message, flush=True, end='') +``` + + +## 🙌 Mitwirkende + +Eine Liste der Mitwirkenden ist [hier](https://github.com/xtekky/gpt4free/graphs/contributors) verfügbar. +Die Datei [`Vercel.py`](https://github.com/xtekky/gpt4free/blob/main/g4f/Provider/Vercel.py) enthält Code von [vercel-llm-api](https://github.com/ading2210/vercel-llm-api) von [@ading2210](https://github.com/ading2210), der unter der [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt) lizenziert ist. +Top 1 Mitwirkender: [@hlohaus](https://github.com/hlohaus) + +## ©️ Urheberrecht + +This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt) + +``` +xtekky/gpt4free: Copyright (C) 2023 xtekky + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +``` + +## ⭐ Sternenverlauf + +
+ Star History Chart + + + +## 📄 Lizenz + + + + + + +
+

+

+
+ Dieses Projekt steht unter der GNU_GPL_v3.0-Lizenz. +
+ +

(🔼 Zurück nach oben)

\ No newline at end of file diff --git a/README.md b/README.md index 6a1df499aef..7d2aa860152 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ + + Öffnen en DE + + ![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9) Buy Me a Coffee at ko-fi.com @@ -245,12 +249,7 @@ for message in response: ##### Using Browser -Some providers using a browser to bypass the bot protection. -They using the selenium webdriver to control the browser. -The browser settings and the login data are saved in a custom directory. -If the headless mode is enabled, the browser windows are loaded invisibly. -For performance reasons, it is recommended to reuse the browser instances -and close them yourself at the end: +Some providers using a a browser to bypass the bot protection. They using the selenium webdriver to control the browser. The browser settings and the login data are saved in a custom directory. If the headless mode is enabled, the browser windows are loaded invisibly. For performance reasons, it is recommended to reuse the browser instances and close them yourself at the end: ```python import g4f @@ -266,16 +265,16 @@ from g4f.Provider import ( options = ChromeOptions() options.add_argument("--incognito"); -browser = Chrome(options=options, headless=True) +webdriver = Chrome(options=options, headless=True) for idx in range(10): response = g4f.ChatCompletion.create( model=g4f.models.default, provider=g4f.Provider.Phind, messages=[{"role": "user", "content": "Suggest me a name."}], - browser=browser + webdriver=webdriver ) print(f"{idx}:", response) -browser.quit() +webdriver.quit() ``` ##### Cookies Required @@ -605,7 +604,7 @@ if __name__ == "__main__": #### Create Provider with AI Tool -Call in your terminal the "create_provider" script: +Call in your terminal the `create_provider.py` script: ```bash python etc/tool/create_provider.py ``` @@ -628,8 +627,8 @@ from .base_provider import AsyncGeneratorProvider class HogeService(AsyncGeneratorProvider): url = "https://chat-gpt.com" - supports_gpt_35_turbo = True working = True + supports_gpt_35_turbo = True @classmethod async def create_async_generator( @@ -644,7 +643,7 @@ class HogeService(AsyncGeneratorProvider): 4. Here, you can adjust the settings, for example, if the website does support streaming, set `supports_stream` to `True`... 5. Write code to request the provider in `create_async_generator` and `yield` the response, _even if_ it's a one-time response, do not hesitate to look at other providers for inspiration -6. Add the Provider Name in [g4f/Provider/**init**.py](./g4f/Provider/__init__.py) +6. Add the Provider Name in [`g4f/Provider/__init__.py`](./g4f/Provider/__init__.py) ```py from .HogeService import HogeService @@ -708,7 +707,7 @@ along with this program. If not, see .
-This project is licensed under
GNU_GPL_v3.0. +This project is licensed under GNU_GPL_v3.0. diff --git a/etc/tool/translate_readme.py b/etc/tool/translate_readme.py new file mode 100644 index 00000000000..43bfdcde681 --- /dev/null +++ b/etc/tool/translate_readme.py @@ -0,0 +1,88 @@ + +import sys +from pathlib import Path +import asyncio + +sys.path.append(str(Path(__file__).parent.parent.parent)) + +import g4f +g4f.debug.logging = True +from g4f.debug import access_token +provider = g4f.Provider.OpenaiChat + +iso = "GE" +language = "german" +translate_prompt = f""" +Translate this markdown document to {language}. +Don't translate or change inline code examples. +```md +""" +keep_note = "Keep this: [!Note] as [!Note].\n" +blacklist = [ + '## ©️ Copyright', + '## 🚀 Providers and Models', + '## 🔗 Related GPT4Free Projects' +] +whitelist = [ + "### Other", + "### Models" +] + +def read_text(text): + start = end = 0 + new = text.strip().split('\n') + for i, line in enumerate(new): + if line.startswith('```'): + if not start: + start = i + 1 + end = i + return '\n'.join(new[start:end]).strip() + +async def translate(text): + prompt = translate_prompt + text.strip() + '\n```' + if "[!Note]" in text: + prompt = keep_note + prompt + result = read_text(await provider.create_async( + model="", + messages=[{"role": "user", "content": prompt}], + access_token=access_token + )) + if text.endswith("```") and not result.endswith("```"): + result += "\n```" + return result + +async def translate_part(part, i): + blacklisted = False + for headline in blacklist: + if headline in part: + blacklisted = True + if blacklisted: + lines = part.split('\n') + lines[0] = await translate(lines[0]) + part = '\n'.join(lines) + for trans in whitelist: + if trans in part: + part = part.replace(trans, await translate(trans)) + else: + part = await translate(part) + print(f"[{i}] translated") + return part + +async def translate_readme(readme) -> str: + parts = readme.split('\n## ') + print(f"{len(parts)} parts...") + parts = await asyncio.gather( + *[translate_part("## " + part, i) for i, part in enumerate(parts)] + ) + return "\n\n".join(parts) + +with open("README.md", "r") as fp: + readme = fp.read() + +print("Translate readme...") +readme = asyncio.run(translate_readme(readme)) + +file = f"README-{iso}.md" +with open(file, "w") as fp: + fp.write(readme) +print(f'"{file}" saved') \ No newline at end of file From a9f15815cd3a7ce4567c924868414e94174af222 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 20 Nov 2023 14:02:51 +0100 Subject: [PATCH 05/21] Support stream in create_async --- g4f/__init__.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/g4f/__init__.py b/g4f/__init__.py index faef79238ed..2c9ef7d765f 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -1,8 +1,8 @@ from __future__ import annotations from requests import get from .models import Model, ModelUtils, _all_models -from .Provider import BaseProvider, RetryProvider -from .typing import Messages, CreateResult, Union, List +from .Provider import BaseProvider, AsyncGeneratorProvider, RetryProvider +from .typing import Messages, CreateResult, AsyncResult, Union, List from . import debug version = '0.1.8.7' @@ -80,13 +80,15 @@ async def create_async(model : Union[Model, str], messages : Messages, provider : Union[type[BaseProvider], None] = None, stream : bool = False, - ignored : List[str] = None, **kwargs) -> str: - - if stream: - raise ValueError('"create_async" does not support "stream" argument') - + ignored : List[str] = None, + **kwargs) -> Union[AsyncResult, str]: model, provider = get_model_and_provider(model, provider, False, ignored) + if stream: + if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider): + return await provider.create_async_generator(model.name, messages, **kwargs) + raise ValueError(f'{provider.__name__} does not support "stream" argument') + return await provider.create_async(model.name, messages, **kwargs) class Completion: From 4a6bcef394190b69c236dfcdd9697361afd92b59 Mon Sep 17 00:00:00 2001 From: egcash <85733533+egcash@users.noreply.github.com> Date: Mon, 20 Nov 2023 21:57:50 +0530 Subject: [PATCH 06/21] Syntax error Fixed : GptGo.py --- g4f/Provider/GptGo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py index ac3f7fe8946..726c7a99af6 100644 --- a/g4f/Provider/GptGo.py +++ b/g4f/Provider/GptGo.py @@ -63,7 +63,7 @@ async def create_async_generator( if line["choices"][0]["finish_reason"] == "stop": break - content = line["choices"][0]["delta"].get("content"): + content = line["choices"][0]["delta"].get("content") if content: yield content @@ -79,4 +79,4 @@ def params(cls): ("temperature", "float"), ] param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file + return f"g4f.provider.{cls.__name__} supports: ({param})" From 60286431adac1205f7f233d0e5cfb846e452abbb Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Mon, 20 Nov 2023 17:28:23 +0100 Subject: [PATCH 07/21] Set min version for curl_cffi --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index e09436dda50..96b5f3f5f92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ requests pycryptodome -curl_cffi +curl_cffi>=0.5.10b4 aiohttp certifi browser_cookie3 @@ -23,6 +23,6 @@ flask py-arkose-generator asyncstdlib async-property -selenium +undetected-chromedriver asyncstdlib -async_property \ No newline at end of file +async_property From ad78589843d183d71b752068a033e1588c1b9e81 Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Mon, 20 Nov 2023 17:34:21 +0100 Subject: [PATCH 08/21] Update GptGo.py --- g4f/Provider/GptGo.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py index be9979f2a05..442aa90dc52 100644 --- a/g4f/Provider/GptGo.py +++ b/g4f/Provider/GptGo.py @@ -62,5 +62,6 @@ async def create_async_generator( line = json.loads(line[len(start):-1]) if line["choices"][0]["finish_reason"] == "stop": break - if content := line["choices"][0]["delta"].get("content"): - yield content \ No newline at end of file + content = line["choices"][0]["delta"].get("content") + if content: + yield content From a2b803a56c9721cf4a3f8a265b78a1333f79a496 Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Mon, 20 Nov 2023 17:35:18 +0100 Subject: [PATCH 09/21] Update AItianhu.py --- g4f/Provider/AItianhu.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py index 05ee5a2030f..34187694d86 100644 --- a/g4f/Provider/AItianhu.py +++ b/g4f/Provider/AItianhu.py @@ -72,7 +72,8 @@ async def create_async_generator( if "detail" not in line: raise RuntimeError(f"Response: {line}") - if content := line["detail"]["choices"][0]["delta"].get( + content = line["detail"]["choices"][0]["delta"].get( "content" - ): - yield content \ No newline at end of file + ) + if content: + yield content From ec12b3096a8958a9a0feeac96da8254ae0bfa7a7 Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Mon, 20 Nov 2023 18:02:09 +0100 Subject: [PATCH 10/21] Update README-DE.md --- README-DE.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README-DE.md b/README-DE.md index a27fdccbdbc..b1e71a0e83b 100644 --- a/README-DE.md +++ b/README-DE.md @@ -157,7 +157,7 @@ Um die Docker-Container zu stoppen, führen Sie einfach aus: docker-compose down ``` -> [!Note] +> [!Hinweis] > Wenn Sie Docker verwenden, werden alle Änderungen, die Sie an Ihren lokalen Dateien vornehmen, im Docker-Container durch die Volumenabbildung in der `docker-compose.yml`-Datei widergespiegelt. Wenn Sie jedoch Abhängigkeiten hinzufügen oder entfernen, müssen Sie das Docker-Image mit `docker-compose build` neu erstellen. @@ -196,7 +196,7 @@ response = g4f.ChatCompletion.create( print(response) ``` -##### Vervollständigung +##### Completion ```python import g4f @@ -723,4 +723,4 @@ along with this program. If not, see . -

(🔼 Zurück nach oben)

\ No newline at end of file +

(🔼 Zurück nach oben)

From 7969a5690a767d6555a73fbffe8122ba2851057b Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Mon, 20 Nov 2023 18:03:31 +0100 Subject: [PATCH 11/21] Update README-DE.md --- README-DE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-DE.md b/README-DE.md index b1e71a0e83b..c28ded284e9 100644 --- a/README-DE.md +++ b/README-DE.md @@ -157,7 +157,7 @@ Um die Docker-Container zu stoppen, führen Sie einfach aus: docker-compose down ``` -> [!Hinweis] +> [!Note] > Wenn Sie Docker verwenden, werden alle Änderungen, die Sie an Ihren lokalen Dateien vornehmen, im Docker-Container durch die Volumenabbildung in der `docker-compose.yml`-Datei widergespiegelt. Wenn Sie jedoch Abhängigkeiten hinzufügen oder entfernen, müssen Sie das Docker-Image mit `docker-compose build` neu erstellen. From 9140541179e1c2fe855acf1c2743e1800fd5052e Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Mon, 20 Nov 2023 18:40:55 +0000 Subject: [PATCH 12/21] ~ | improve compatibility --- etc/tool/vercel.py | 4 +++- g4f/Provider/Chatgpt4Online.py | 4 +++- g4f/Provider/ChatgptAi.py | 4 +++- g4f/Provider/ChatgptDemo.py | 11 ++++++++--- g4f/Provider/ChatgptFree.py | 3 ++- g4f/Provider/ChatgptLogin.py | 13 ++++++++----- g4f/Provider/ChatgptX.py | 16 +++++++++++----- g4f/Provider/GptGod.py | 7 +++++-- g4f/Provider/Vercel.py | 3 +-- g4f/Provider/deprecated/CodeLinkAva.py | 4 +++- g4f/Provider/deprecated/Equing.py | 4 +++- g4f/Provider/deprecated/FastGpt.py | 6 ++++-- g4f/Provider/deprecated/Lockchat.py | 5 ++++- g4f/Provider/deprecated/Vitalentum.py | 4 +++- g4f/Provider/unfinished/ChatAiGpt.py | 7 +++++-- g4f/Provider/unfinished/MikuChat.py | 3 ++- setup.py | 2 +- 17 files changed, 69 insertions(+), 31 deletions(-) diff --git a/etc/tool/vercel.py b/etc/tool/vercel.py index 29856bb3e40..c5ce964c428 100644 --- a/etc/tool/vercel.py +++ b/etc/tool/vercel.py @@ -24,7 +24,9 @@ def get_model_info() -> dict[str, Any]: models_regex = r'let .="\\n\\nHuman:\",r=(.+?),.=' for script in scripts: - if matches := re.findall(models_regex, script): + + matches = re.findall(models_regex, script) + if matches: models_str = matches[0] stop_sequences_regex = r"(?<=stopSequences:{value:\[)\D(?(.*?)<\/div>', response, - ): + ) + + if result: user_id = result.group(1) else: raise RuntimeError("No user id found") @@ -59,5 +62,7 @@ async def create_async_generator( async for line in response.content: if line.startswith(b"data: "): line = json.loads(line[6:-1]) - if chunk := line["choices"][0]["delta"].get("content"): + + chunk = line["choices"][0]["delta"].get("content") + if chunk: yield chunk \ No newline at end of file diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py index 48d6c396611..b9b2544762b 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/ChatgptFree.py @@ -65,7 +65,8 @@ async def create_async( raise RuntimeError("No post id found") cls._post_id = result.group(1) - if result := re.search(r'data-nonce="(.*?)"', response): + result = re.search(r'data-nonce="(.*?)"', response) + if result: cls._nonce = result.group(1) else: diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py index 206e4a89f90..037e0a6ede9 100644 --- a/g4f/Provider/ChatgptLogin.py +++ b/g4f/Provider/ChatgptLogin.py @@ -45,10 +45,12 @@ async def create_async_generator( async with session.get(f"{cls.url}/chat/", proxy=proxy) as response: response.raise_for_status() response = await response.text() - if result := re.search( + result = re.search( r'