diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index 6ad75f2ccae9..c087efe4ff60 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -66,6 +66,7 @@ async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None: usage=RequestUsage(prompt_tokens=10, completion_tokens=5), thought="Calling pass function", cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), "pass", "TERMINATE", @@ -144,18 +145,21 @@ async def test_run_with_tools_and_reflection() -> None: content=[FunctionCall(id="1", arguments=json.dumps({"input": "task"}), name="_pass_function")], usage=RequestUsage(prompt_tokens=10, completion_tokens=5), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), CreateResult( finish_reason="stop", content="Hello", usage=RequestUsage(prompt_tokens=10, completion_tokens=5), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), CreateResult( finish_reason="stop", content="TERMINATE", usage=RequestUsage(prompt_tokens=10, completion_tokens=5), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), ], model_info={ @@ -246,6 +250,7 @@ async def test_run_with_parallel_tools() -> None: usage=RequestUsage(prompt_tokens=10, completion_tokens=5), thought="Calling pass and echo functions", cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), "pass", "TERMINATE", @@ -331,6 +336,7 @@ async def test_run_with_parallel_tools_with_empty_call_ids() -> None: ], usage=RequestUsage(prompt_tokens=10, completion_tokens=5), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), "pass", "TERMINATE", @@ -672,6 +678,7 @@ async def test_handoffs() -> None: ], usage=RequestUsage(prompt_tokens=42, completion_tokens=43), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, thought="Calling handoff function", ) ], @@ -1064,6 +1071,7 @@ async def test_list_chat_messages(monkeypatch: pytest.MonkeyPatch) -> None: content="Response to message 1", usage=RequestUsage(prompt_tokens=10, completion_tokens=5), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ) ] ) @@ -1269,6 +1277,7 @@ async def test_model_client_stream_with_tool_calls() -> None: finish_reason="function_calls", usage=RequestUsage(prompt_tokens=10, completion_tokens=5), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), "Example response 2 to task", ] diff --git a/python/packages/autogen-agentchat/tests/test_code_executor_agent.py b/python/packages/autogen-agentchat/tests/test_code_executor_agent.py index ddfc5d45c072..e971ab3bf1b7 100644 --- a/python/packages/autogen-agentchat/tests/test_code_executor_agent.py +++ b/python/packages/autogen-agentchat/tests/test_code_executor_agent.py @@ -142,11 +142,6 @@ async def test_self_debugging_loop() -> None: numbers = [10, 20, 30, 40, 50] mean = sum(numbers) / len(numbers print("The mean is:", mean) -""".strip() - incorrect_code_result = """ - mean = sum(numbers) / len(numbers - ^ -SyntaxError: '(' was never closed """.strip() correct_code_block = """ numbers = [10, 20, 30, 40, 50] @@ -218,8 +213,8 @@ async def test_self_debugging_loop() -> None: elif isinstance(message, CodeExecutionEvent) and message_id == 1: # Step 2: First code execution assert ( - incorrect_code_result in message.to_text().strip() - ), f"Expected {incorrect_code_result} in execution result, got: {message.to_text().strip()}" + "SyntaxError: '(' was never closed" in message.to_text() + ), f"Expected SyntaxError in execution result, got: {message.to_text().strip()}" incorrect_code_execution_event = message elif isinstance(message, CodeGenerationEvent) and message_id == 2: diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index 37763e866950..6a1a0c8b033d 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -450,6 +450,7 @@ async def test_round_robin_group_chat_with_tools(runtime: AgentRuntime | None) - content=[FunctionCall(id="1", name="pass", arguments=json.dumps({"input": "pass"}))], usage=RequestUsage(prompt_tokens=0, completion_tokens=0), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), "Hello", "TERMINATE", @@ -1267,6 +1268,7 @@ async def test_swarm_handoff_using_tool_calls(runtime: AgentRuntime | None) -> N content=[FunctionCall(id="1", name="handoff_to_agent2", arguments=json.dumps({}))], usage=RequestUsage(prompt_tokens=0, completion_tokens=0), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), "Hello", "TERMINATE", @@ -1367,6 +1369,7 @@ async def test_swarm_with_parallel_tool_calls(runtime: AgentRuntime | None) -> N ], usage=RequestUsage(prompt_tokens=0, completion_tokens=0), cached=False, + raw_response={"id": "mock-id", "provider": "replay"}, ), "Hello", "TERMINATE", diff --git a/python/packages/autogen-core/src/autogen_core/models/_types.py b/python/packages/autogen-core/src/autogen_core/models/_types.py index 6fd2e5c3534d..13489e04c5d4 100644 --- a/python/packages/autogen-core/src/autogen_core/models/_types.py +++ b/python/packages/autogen-core/src/autogen_core/models/_types.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from pydantic import BaseModel, Field from typing_extensions import Annotated @@ -125,3 +125,6 @@ class CreateResult(BaseModel): thought: Optional[str] = None """The reasoning text for the completion if available. Used for reasoning models and additional text content besides function calls.""" + + raw_response: Optional[Dict[str, Any]] = None + """Raw response from the model API, useful for custom field access.""" diff --git a/python/packages/autogen-core/tests/test_tool_agent.py b/python/packages/autogen-core/tests/test_tool_agent.py index 727be91f6707..87deb9362214 100644 --- a/python/packages/autogen-core/tests/test_tool_agent.py +++ b/python/packages/autogen-core/tests/test_tool_agent.py @@ -113,6 +113,7 @@ async def create( usage=RequestUsage(prompt_tokens=0, completion_tokens=0), cached=False, logprobs=None, + raw_response={"id": "mock-id", "provider": "replay"}, ) return CreateResult( content="Done", @@ -120,6 +121,7 @@ async def create( usage=RequestUsage(prompt_tokens=0, completion_tokens=0), cached=False, logprobs=None, + raw_response={"id": "mock-id", "provider": "replay"}, ) def create_stream( diff --git a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/chat_completion_client_recorder.py b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/chat_completion_client_recorder.py index d9cb84a87c5d..8c3098a6c959 100644 --- a/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/chat_completion_client_recorder.py +++ b/python/packages/autogen-ext/src/autogen_ext/experimental/task_centric_memory/utils/chat_completion_client_recorder.py @@ -141,6 +141,7 @@ async def create( finish_reason=data.get("finish_reason", "stop"), usage=data.get("usage", RequestUsage(prompt_tokens=0, completion_tokens=0)), cached=True, + raw_response=data.get("raw_response", {"id": "mock-id", "provider": "replay"}), ) return result diff --git a/python/packages/autogen-ext/src/autogen_ext/models/anthropic/_anthropic_client.py b/python/packages/autogen-ext/src/autogen_ext/models/anthropic/_anthropic_client.py index 8fcec588fc68..6957bd656277 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/anthropic/_anthropic_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/anthropic/_anthropic_client.py @@ -646,6 +646,7 @@ async def create( usage=usage, cached=False, thought=thought, + raw_response=result, ) # Update usage statistics @@ -863,6 +864,12 @@ async def create_stream( # Just text content content = "".join(text_content) + future: asyncio.Task[Message] = asyncio.ensure_future( + self._client.messages.create(**request_args) # type: ignore + ) + + message_result: Message = cast(Message, await future) + # Create the final result result = CreateResult( finish_reason=normalize_stop_reason(stop_reason), @@ -870,6 +877,7 @@ async def create_stream( usage=usage, cached=False, thought=thought, + raw_response=message_result, ) # Emit the end event. diff --git a/python/packages/autogen-ext/src/autogen_ext/models/azure/_azure_ai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/azure/_azure_ai_client.py index 86cafce68661..14c1da89c90c 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/azure/_azure_ai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/azure/_azure_ai_client.py @@ -440,6 +440,7 @@ async def create( usage=usage, cached=False, thought=thought, + raw_response=result, ) self.add_usage(usage) diff --git a/python/packages/autogen-ext/src/autogen_ext/models/llama_cpp/_llama_cpp_completion_client.py b/python/packages/autogen-ext/src/autogen_ext/models/llama_cpp/_llama_cpp_completion_client.py index 32df1b2f67ef..9772c42a6291 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/llama_cpp/_llama_cpp_completion_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/llama_cpp/_llama_cpp_completion_client.py @@ -357,7 +357,11 @@ async def create( if not response_tool_calls and not response_text: logger.debug("DEBUG: No response text found. Returning empty response.") return CreateResult( - content="", usage=RequestUsage(prompt_tokens=0, completion_tokens=0), finish_reason="stop", cached=False + content="", + usage=RequestUsage(prompt_tokens=0, completion_tokens=0), + finish_reason="stop", + cached=False, + raw_response=response, ) # Create a CreateResult object @@ -373,6 +377,7 @@ async def create( usage=cast(RequestUsage, response["usage"]), finish_reason=normalize_stop_reason(finish_reason), # type: ignore cached=False, + raw_response=response, ) # If we are running in the context of a handler we can get the agent_id diff --git a/python/packages/autogen-ext/src/autogen_ext/models/ollama/_ollama_client.py b/python/packages/autogen-ext/src/autogen_ext/models/ollama/_ollama_client.py index 83699bbe548a..e58a01e64b3a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/ollama/_ollama_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/ollama/_ollama_client.py @@ -691,6 +691,7 @@ async def create( usage=usage, cached=False, logprobs=None, + raw_response=result, thought=thought, ) @@ -827,6 +828,7 @@ async def create_stream( usage=usage, cached=False, logprobs=None, + raw_response=None, thought=thought, ) diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index ffe816e599c8..8e0c1b8c627a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -722,6 +722,7 @@ async def create( cached=False, logprobs=logprobs, thought=thought, + raw_response=result, ) self._total_usage = _add_usage(self._total_usage, usage) @@ -956,6 +957,28 @@ async def create_stream( if isinstance(content, str) and self._model_info["family"] == ModelFamily.R1 and thought is None: thought, content = parse_r1_content(content) + create_params = self._process_create_args( + messages, + tools, + json_output, + extra_create_args, + ) + + if create_params.response_format is not None: + result = await self._client.beta.chat.completions.parse( + messages=create_params.messages, + tools=(create_params.tools if len(create_params.tools) > 0 else NOT_GIVEN), + response_format=create_params.response_format, + **create_params.create_args, + ) + else: + result = await self._client.chat.completions.create( + messages=create_params.messages, + stream=False, + tools=(create_params.tools if len(create_params.tools) > 0 else NOT_GIVEN), + **create_params.create_args, + ) + # Create the result. result = CreateResult( finish_reason=normalize_stop_reason(stop_reason), @@ -964,6 +987,7 @@ async def create_stream( cached=False, logprobs=logprobs, thought=thought, + raw_response=result, ) # Log the end of the stream. diff --git a/python/packages/autogen-ext/src/autogen_ext/models/replay/_replay_chat_completion_client.py b/python/packages/autogen-ext/src/autogen_ext/models/replay/_replay_chat_completion_client.py index 6e2b03beb2c7..55f6a9c02a3b 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/replay/_replay_chat_completion_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/replay/_replay_chat_completion_client.py @@ -176,7 +176,11 @@ async def create( _, output_token_count = self._tokenize(response) self._cur_usage = RequestUsage(prompt_tokens=prompt_token_count, completion_tokens=output_token_count) response = CreateResult( - finish_reason="stop", content=response, usage=self._cur_usage, cached=self._cached_bool_value + finish_reason="stop", + content=response, + usage=self._cur_usage, + cached=self._cached_bool_value, + raw_response=response, ) else: self._cur_usage = RequestUsage( @@ -221,7 +225,11 @@ async def create_stream( else: yield token yield CreateResult( - finish_reason="stop", content=response, usage=self._cur_usage, cached=self._cached_bool_value + finish_reason="stop", + content=response, + usage=self._cur_usage, + cached=self._cached_bool_value, + raw_response=response, ) self._update_total_usage() else: diff --git a/python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py b/python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py index 78f0aa5a24de..133cb69882d7 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py @@ -521,6 +521,7 @@ async def create( usage=RequestUsage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens), cached=False, thought=thought, + raw_response=result, ) @staticmethod @@ -676,6 +677,7 @@ async def create_stream( finish_reason="function_calls", usage=RequestUsage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens), cached=False, + raw_response=None, ) return @@ -698,6 +700,7 @@ async def create_stream( usage=RequestUsage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens), cached=False, thought=thought, + raw_response=None, ) # Emit the end event. diff --git a/python/packages/autogen-ext/tests/models/test_sk_chat_completion_adapter.py b/python/packages/autogen-ext/tests/models/test_sk_chat_completion_adapter.py index 0f694b8492ac..41fecb67c9d8 100644 --- a/python/packages/autogen-ext/tests/models/test_sk_chat_completion_adapter.py +++ b/python/packages/autogen-ext/tests/models/test_sk_chat_completion_adapter.py @@ -211,7 +211,7 @@ async def mock_get_streaming_chat_message_contents( created=1736674044, model="gpt-4o-mini-2024-07-18", object="chat.completion.chunk", - service_tier="scale", + service_tier="default", system_fingerprint="fingerprint", usage=CompletionUsage(prompt_tokens=20, completion_tokens=9, total_tokens=29), ), @@ -232,7 +232,7 @@ async def mock_get_streaming_chat_message_contents( created=1736674044, model="gpt-4o-mini-2024-07-18", object="chat.completion.chunk", - service_tier="scale", + service_tier="default", system_fingerprint="fingerprint", usage=CompletionUsage(prompt_tokens=20, completion_tokens=9, total_tokens=29), ), @@ -253,7 +253,7 @@ async def mock_get_streaming_chat_message_contents( created=1736674044, model="gpt-4o-mini-2024-07-18", object="chat.completion.chunk", - service_tier="scale", + service_tier="default", system_fingerprint="fingerprint", usage=CompletionUsage(prompt_tokens=20, completion_tokens=9, total_tokens=29), ), @@ -280,7 +280,7 @@ async def mock_get_streaming_chat_message_contents( created=1736674044, model="gpt-4o-mini-2024-07-18", object="chat.completion.chunk", - service_tier="scale", + service_tier="default", system_fingerprint="fingerprint", usage=CompletionUsage(prompt_tokens=20, completion_tokens=9, total_tokens=29), ), @@ -503,7 +503,7 @@ async def mock_get_streaming_chat_message_contents( created=1736674044, model="r1", object="chat.completion.chunk", - service_tier="scale", + service_tier="default", system_fingerprint="fingerprint", usage=CompletionUsage(prompt_tokens=20, completion_tokens=9, total_tokens=29), ), diff --git a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py index b76279c92a9f..bf5b1b980565 100644 --- a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py +++ b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py @@ -10,6 +10,7 @@ import pytest from autogen_agentchat.messages import BaseChatMessage, TextMessage, ToolCallRequestEvent from autogen_core import CancellationToken +from autogen_core.models import UserMessage from autogen_core.tools._base import BaseTool, Tool from autogen_ext.agents.openai import OpenAIAssistantAgent from azure.identity import DefaultAzureCredential, get_bearer_token_provider @@ -17,6 +18,13 @@ from pydantic import BaseModel +def fake_to_model_message(self): + return UserMessage(content=self.content, source=self.source) + + +TextMessage.to_model_message = fake_to_model_message + + class QuestionType(str, Enum): MULTIPLE_CHOICE = "MULTIPLE_CHOICE" FREE_RESPONSE = "FREE_RESPONSE" diff --git a/python/session_1.json b/python/session_1.json new file mode 100644 index 000000000000..8a072beb9928 --- /dev/null +++ b/python/session_1.json @@ -0,0 +1,24 @@ +[ + { + "mode": "create", + "messages": [ + { + "content": "Message 1", + "source": "User", + "type": "UserMessage" + } + ], + "response": { + "finish_reason": "stop", + "content": "Response to message 1", + "usage": { + "prompt_tokens": 2, + "completion_tokens": 4 + }, + "cached": true, + "logprobs": null, + "thought": null + }, + "stream": [] + } +] \ No newline at end of file diff --git a/python/uv.lock b/python/uv.lock index 0a8e566aee05..713cbded73a0 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -1,4 +1,5 @@ version = 1 +revision = 1 requires-python = ">=3.10, <3.13" resolution-markers = [ "python_full_version >= '3.12.4' and sys_platform == 'darwin'", @@ -90,7 +91,6 @@ wheels = [ [[package]] name = "agbench" -version = "0.0.1a1" source = { editable = "packages/agbench" } dependencies = [ { name = "azure-identity" }, @@ -800,6 +800,7 @@ requires-dist = [ { name = "unidiff", marker = "extra == 'canvas'", specifier = ">=0.7.5" }, { name = "websockets", marker = "extra == 'docker-jupyter-executor'", specifier = ">=15.0.1" }, ] +provides-extras = ["anthropic", "langchain", "azure", "docker", "ollama", "openai", "file-surfer", "llama-cpp", "graphrag", "chromadb", "web-surfer", "magentic-one", "video-surfer", "diskcache", "redis", "grpc", "jupyter-executor", "docker-jupyter-executor", "task-centric-memory", "semantic-kernel-core", "gemini", "semantic-kernel-google", "semantic-kernel-hugging-face", "semantic-kernel-mistralai", "semantic-kernel-ollama", "semantic-kernel-onnx", "semantic-kernel-anthropic", "semantic-kernel-pandas", "semantic-kernel-aws", "semantic-kernel-dapr", "http-tool", "semantic-kernel-all", "rich", "mcp", "canvas"] [package.metadata.requires-dev] dev = [ @@ -829,7 +830,6 @@ requires-dist = [ [[package]] name = "autogenstudio" -version = "0.4.2" source = { editable = "packages/autogen-studio" } dependencies = [ { name = "aiofiles" }, @@ -883,6 +883,7 @@ requires-dist = [ { name = "uvicorn", marker = "extra == 'web'" }, { name = "websockets" }, ] +provides-extras = ["web", "database"] [[package]] name = "autograd" @@ -4729,7 +4730,6 @@ name = "nvidia-cublas-cu12" version = "12.4.5.8" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/7f/7fbae15a3982dc9595e49ce0f19332423b260045d0a6afe93cdbe2f1f624/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3", size = 363333771 }, { url = "https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", size = 363438805 }, ] @@ -4738,7 +4738,6 @@ name = "nvidia-cuda-cupti-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/93/b5/9fb3d00386d3361b03874246190dfec7b206fd74e6e287b26a8fcb359d95/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a", size = 12354556 }, { url = "https://files.pythonhosted.org/packages/67/42/f4f60238e8194a3106d06a058d494b18e006c10bb2b915655bd9f6ea4cb1/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb", size = 13813957 }, ] @@ -4747,7 +4746,6 @@ name = "nvidia-cuda-nvrtc-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/aa/083b01c427e963ad0b314040565ea396f914349914c298556484f799e61b/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198", size = 24133372 }, { url = "https://files.pythonhosted.org/packages/2c/14/91ae57cd4db3f9ef7aa99f4019cfa8d54cb4caa7e00975df6467e9725a9f/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", size = 24640306 }, ] @@ -4756,7 +4754,6 @@ name = "nvidia-cuda-runtime-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/aa/b656d755f474e2084971e9a297def515938d56b466ab39624012070cb773/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3", size = 894177 }, { url = "https://files.pythonhosted.org/packages/ea/27/1795d86fe88ef397885f2e580ac37628ed058a92ed2c39dc8eac3adf0619/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", size = 883737 }, ] @@ -4779,7 +4776,6 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/8a/0e728f749baca3fbeffad762738276e5df60851958be7783af121a7221e7/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", size = 211422548 }, { url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 }, ] @@ -4788,7 +4784,6 @@ name = "nvidia-curand-cu12" version = "10.3.5.147" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/80/9c/a79180e4d70995fdf030c6946991d0171555c6edf95c265c6b2bf7011112/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9", size = 56314811 }, { url = "https://files.pythonhosted.org/packages/8a/6d/44ad094874c6f1b9c654f8ed939590bdc408349f137f9b98a3a23ccec411/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", size = 56305206 }, ] @@ -4802,7 +4797,6 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/46/6b/a5c33cf16af09166845345275c34ad2190944bcc6026797a39f8e0a282e0/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", size = 127634111 }, { url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 }, ] @@ -4814,7 +4808,6 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/96/a9/c0d2f83a53d40a4a41be14cea6a0bf9e668ffcf8b004bd65633f433050c0/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", size = 207381987 }, { url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 }, ] @@ -4831,7 +4824,6 @@ name = "nvidia-nvjitlink-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/45/239d52c05074898a80a900f49b1615d81c07fceadd5ad6c4f86a987c0bc4/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83", size = 20552510 }, { url = "https://files.pythonhosted.org/packages/ff/ff/847841bacfbefc97a00036e0fce5a0f086b640756dc38caea5e1bb002655/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", size = 21066810 }, ] @@ -4840,7 +4832,6 @@ name = "nvidia-nvtx-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/06/39/471f581edbb7804b39e8063d92fc8305bdc7a80ae5c07dbe6ea5c50d14a5/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3", size = 100417 }, { url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 }, ]