From 9173c02ded7447d86181a8bc3e29999bca76409a Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Fri, 19 Sep 2025 16:54:03 -0700 Subject: [PATCH 01/52] refactor to improved liimt handling Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 7ee202d53..d063f9134 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -81,8 +81,8 @@ def __init__(self, *args, **kwargs) -> None: class GlobalHTTPXAsyncClientConfig(BaseModel): - global_httpx_max_connections: int = 1500 - global_httpx_max_retries: int = 3 + global_httpx_max_connections: Optional[int] = None + global_httpx_max_retries: int = 0 def get_global_httpx_client( @@ -98,12 +98,13 @@ def get_global_httpx_client( global_config_dict_parser_cls=global_config_dict_parser_cls, ) cfg = GlobalHTTPXAsyncClientConfig.model_validate(global_config_dict) + limits = Limits( + max_keepalive_connections=cfg.global_httpx_max_connections, + max_connections=cfg.global_httpx_max_connections, + ) client = NeMoGymGlobalAsyncClient( - limits=Limits( - max_keepalive_connections=cfg.global_httpx_max_connections, - max_connections=cfg.global_httpx_max_connections, - ), - transport=AsyncHTTPTransport(retries=cfg.global_httpx_max_retries), + limits=limits, + transport=AsyncHTTPTransport(retries=cfg.global_httpx_max_retries, limits=limits), timeout=None, ) From ac9dbe5a22d6ba1e42f8bd48a315c23efbacc0ec Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Fri, 19 Sep 2025 17:25:40 -0700 Subject: [PATCH 02/52] use httpx-aiohttp Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 13 +- pyproject.toml | 5 + uv.lock | 342 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 351 insertions(+), 9 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index d063f9134..9d43b0e1c 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -21,7 +21,7 @@ import requests import uvicorn from fastapi import FastAPI, Request, Response -from httpx import AsyncClient, AsyncHTTPTransport, Cookies, Limits, Response +from httpx import AsyncClient, Cookies, Limits, Response from httpx._types import ( CookieTypes, HeaderTypes, @@ -30,6 +30,7 @@ RequestData, RequestFiles, ) +from httpx_aiohttp import AiohttpTransport from omegaconf import DictConfig, OmegaConf from pydantic import BaseModel, ConfigDict from requests.exceptions import ConnectionError @@ -65,12 +66,6 @@ def __init__(self, *args, **kwargs) -> None: # ``` # In order to get the most benefit from connection pooling, make sure you're not instantiating multiple client instances - for example by using async with inside a "hot loop". This can be achieved either by having a single scoped client that's passed throughout wherever it's needed, or by having a single global client instance. # ``` -# In plain language: -# - Let's say we have 10 distinct endpoints we want to call 5 times each. -# - A connection pool as defined by the httpx client is for a single distinct endpoint. All requests to that endpoint should use the same httpx client. -# - So the optimal configuration here is to have 10 total httpx clients, one for each distinct endpoint. -# - Additionally, since the connections are pooled, if we had a single global client for all 10 distinct endpoints, we may run into deadlock situations, -# where requests to two different endpoints are waiting for each other to resolve. # # In principle, we use no timeout since various api or model calls may take an indefinite amount of time. Right now, we have no timeout, even for connection errors which may be problematic. We may want to revisit more granular httpx.Timeout later on. # @@ -81,7 +76,7 @@ def __init__(self, *args, **kwargs) -> None: class GlobalHTTPXAsyncClientConfig(BaseModel): - global_httpx_max_connections: Optional[int] = None + global_httpx_max_connections: int = 1500 global_httpx_max_retries: int = 0 @@ -104,7 +99,7 @@ def get_global_httpx_client( ) client = NeMoGymGlobalAsyncClient( limits=limits, - transport=AsyncHTTPTransport(retries=cfg.global_httpx_max_retries, limits=limits), + transport=AiohttpTransport(retries=cfg.global_httpx_max_retries, limits=limits), timeout=None, ) diff --git a/pyproject.toml b/pyproject.toml index 449c0ebb7..8cf4981f3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -126,6 +126,11 @@ dependencies = [ # Updated Wed Sep 17, 2025 with tdigest==0.5.2.2 # License: MIT https://github.com/CamDavidsonPilon/tdigest/blob/e35cfd708962ae5e9d1c5d2b15a99af7b2e2f323/LICENSE.txt "tdigest>=0.5.2.2", + + # httpx-aiohttp: a much faster transport backend as recommended here https://github.com/openai/openai-python/issues/1596#issuecomment-2709021063 + # Updated Fri Sep 19, 2025 with httpx-aiohttp==0.1.8 + # License: BSD 3-Clause https://github.com/karpetrosyan/httpx-aiohttp/blob/e11f0da1580fe73737719e0274a2c8cd6c77a28a/LICENSE + "httpx-aiohttp", ] [dependency-groups] diff --git a/uv.lock b/uv.lock index 68b45efc1..e6e3c8861 100644 --- a/uv.lock +++ b/uv.lock @@ -33,6 +33,79 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a5/45/30bb92d442636f570cb5651bc661f52b610e2eec3f891a5dc3a4c3667db0/aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5", size = 15896, upload-time = "2024-06-24T11:02:01.529Z" }, ] +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.12.15" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/d92a237d8802ca88483906c388f7c201bbe96cd80a165ffd0ac2f6a8d59f/aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2", size = 7823716, upload-time = "2025-07-29T05:52:32.215Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/97/77cb2450d9b35f517d6cf506256bf4f5bda3f93a66b4ad64ba7fc917899c/aiohttp-3.12.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7", size = 702333, upload-time = "2025-07-29T05:50:46.507Z" }, + { url = "https://files.pythonhosted.org/packages/83/6d/0544e6b08b748682c30b9f65640d006e51f90763b41d7c546693bc22900d/aiohttp-3.12.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444", size = 476948, upload-time = "2025-07-29T05:50:48.067Z" }, + { url = "https://files.pythonhosted.org/packages/3a/1d/c8c40e611e5094330284b1aea8a4b02ca0858f8458614fa35754cab42b9c/aiohttp-3.12.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d", size = 469787, upload-time = "2025-07-29T05:50:49.669Z" }, + { url = "https://files.pythonhosted.org/packages/38/7d/b76438e70319796bfff717f325d97ce2e9310f752a267bfdf5192ac6082b/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c", size = 1716590, upload-time = "2025-07-29T05:50:51.368Z" }, + { url = "https://files.pythonhosted.org/packages/79/b1/60370d70cdf8b269ee1444b390cbd72ce514f0d1cd1a715821c784d272c9/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0", size = 1699241, upload-time = "2025-07-29T05:50:53.628Z" }, + { url = "https://files.pythonhosted.org/packages/a3/2b/4968a7b8792437ebc12186db31523f541943e99bda8f30335c482bea6879/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab", size = 1754335, upload-time = "2025-07-29T05:50:55.394Z" }, + { url = "https://files.pythonhosted.org/packages/fb/c1/49524ed553f9a0bec1a11fac09e790f49ff669bcd14164f9fab608831c4d/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb", size = 1800491, upload-time = "2025-07-29T05:50:57.202Z" }, + { url = "https://files.pythonhosted.org/packages/de/5e/3bf5acea47a96a28c121b167f5ef659cf71208b19e52a88cdfa5c37f1fcc/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545", size = 1719929, upload-time = "2025-07-29T05:50:59.192Z" }, + { url = "https://files.pythonhosted.org/packages/39/94/8ae30b806835bcd1cba799ba35347dee6961a11bd507db634516210e91d8/aiohttp-3.12.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c", size = 1635733, upload-time = "2025-07-29T05:51:01.394Z" }, + { url = "https://files.pythonhosted.org/packages/7a/46/06cdef71dd03acd9da7f51ab3a9107318aee12ad38d273f654e4f981583a/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd", size = 1696790, upload-time = "2025-07-29T05:51:03.657Z" }, + { url = "https://files.pythonhosted.org/packages/02/90/6b4cfaaf92ed98d0ec4d173e78b99b4b1a7551250be8937d9d67ecb356b4/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f", size = 1718245, upload-time = "2025-07-29T05:51:05.911Z" }, + { url = "https://files.pythonhosted.org/packages/2e/e6/2593751670fa06f080a846f37f112cbe6f873ba510d070136a6ed46117c6/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d", size = 1658899, upload-time = "2025-07-29T05:51:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/8f/28/c15bacbdb8b8eb5bf39b10680d129ea7410b859e379b03190f02fa104ffd/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519", size = 1738459, upload-time = "2025-07-29T05:51:09.56Z" }, + { url = "https://files.pythonhosted.org/packages/00/de/c269cbc4faa01fb10f143b1670633a8ddd5b2e1ffd0548f7aa49cb5c70e2/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea", size = 1766434, upload-time = "2025-07-29T05:51:11.423Z" }, + { url = "https://files.pythonhosted.org/packages/52/b0/4ff3abd81aa7d929b27d2e1403722a65fc87b763e3a97b3a2a494bfc63bc/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3", size = 1726045, upload-time = "2025-07-29T05:51:13.689Z" }, + { url = "https://files.pythonhosted.org/packages/71/16/949225a6a2dd6efcbd855fbd90cf476052e648fb011aa538e3b15b89a57a/aiohttp-3.12.15-cp312-cp312-win32.whl", hash = "sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1", size = 423591, upload-time = "2025-07-29T05:51:15.452Z" }, + { url = "https://files.pythonhosted.org/packages/2b/d8/fa65d2a349fe938b76d309db1a56a75c4fb8cc7b17a398b698488a939903/aiohttp-3.12.15-cp312-cp312-win_amd64.whl", hash = "sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34", size = 450266, upload-time = "2025-07-29T05:51:17.239Z" }, + { url = "https://files.pythonhosted.org/packages/f2/33/918091abcf102e39d15aba2476ad9e7bd35ddb190dcdd43a854000d3da0d/aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315", size = 696741, upload-time = "2025-07-29T05:51:19.021Z" }, + { url = "https://files.pythonhosted.org/packages/b5/2a/7495a81e39a998e400f3ecdd44a62107254803d1681d9189be5c2e4530cd/aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd", size = 474407, upload-time = "2025-07-29T05:51:21.165Z" }, + { url = "https://files.pythonhosted.org/packages/49/fc/a9576ab4be2dcbd0f73ee8675d16c707cfc12d5ee80ccf4015ba543480c9/aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4", size = 466703, upload-time = "2025-07-29T05:51:22.948Z" }, + { url = "https://files.pythonhosted.org/packages/09/2f/d4bcc8448cf536b2b54eed48f19682031ad182faa3a3fee54ebe5b156387/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7", size = 1705532, upload-time = "2025-07-29T05:51:25.211Z" }, + { url = "https://files.pythonhosted.org/packages/f1/f3/59406396083f8b489261e3c011aa8aee9df360a96ac8fa5c2e7e1b8f0466/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d", size = 1686794, upload-time = "2025-07-29T05:51:27.145Z" }, + { url = "https://files.pythonhosted.org/packages/dc/71/164d194993a8d114ee5656c3b7ae9c12ceee7040d076bf7b32fb98a8c5c6/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b", size = 1738865, upload-time = "2025-07-29T05:51:29.366Z" }, + { url = "https://files.pythonhosted.org/packages/1c/00/d198461b699188a93ead39cb458554d9f0f69879b95078dce416d3209b54/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d", size = 1788238, upload-time = "2025-07-29T05:51:31.285Z" }, + { url = "https://files.pythonhosted.org/packages/85/b8/9e7175e1fa0ac8e56baa83bf3c214823ce250d0028955dfb23f43d5e61fd/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d", size = 1710566, upload-time = "2025-07-29T05:51:33.219Z" }, + { url = "https://files.pythonhosted.org/packages/59/e4/16a8eac9df39b48ae102ec030fa9f726d3570732e46ba0c592aeeb507b93/aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645", size = 1624270, upload-time = "2025-07-29T05:51:35.195Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f8/cd84dee7b6ace0740908fd0af170f9fab50c2a41ccbc3806aabcb1050141/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461", size = 1677294, upload-time = "2025-07-29T05:51:37.215Z" }, + { url = "https://files.pythonhosted.org/packages/ce/42/d0f1f85e50d401eccd12bf85c46ba84f947a84839c8a1c2c5f6e8ab1eb50/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9", size = 1708958, upload-time = "2025-07-29T05:51:39.328Z" }, + { url = "https://files.pythonhosted.org/packages/d5/6b/f6fa6c5790fb602538483aa5a1b86fcbad66244997e5230d88f9412ef24c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d", size = 1651553, upload-time = "2025-07-29T05:51:41.356Z" }, + { url = "https://files.pythonhosted.org/packages/04/36/a6d36ad545fa12e61d11d1932eef273928b0495e6a576eb2af04297fdd3c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693", size = 1727688, upload-time = "2025-07-29T05:51:43.452Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c8/f195e5e06608a97a4e52c5d41c7927301bf757a8e8bb5bbf8cef6c314961/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64", size = 1761157, upload-time = "2025-07-29T05:51:45.643Z" }, + { url = "https://files.pythonhosted.org/packages/05/6a/ea199e61b67f25ba688d3ce93f63b49b0a4e3b3d380f03971b4646412fc6/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51", size = 1710050, upload-time = "2025-07-29T05:51:48.203Z" }, + { url = "https://files.pythonhosted.org/packages/b4/2e/ffeb7f6256b33635c29dbed29a22a723ff2dd7401fff42ea60cf2060abfb/aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0", size = 422647, upload-time = "2025-07-29T05:51:50.718Z" }, + { url = "https://files.pythonhosted.org/packages/1b/8e/78ee35774201f38d5e1ba079c9958f7629b1fd079459aea9467441dbfbf5/aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84", size = 449067, upload-time = "2025-07-29T05:51:52.549Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, +] + [[package]] name = "alabaster" version = "1.0.0" @@ -106,6 +179,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/45/86/4736ac618d82a20d87d2f92ae19441ebc7ac9e7a581d7e58bbe79233b24a/asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24", size = 27764, upload-time = "2023-10-26T10:03:01.789Z" }, ] +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + [[package]] name = "audioop-lts" version = "0.2.2" @@ -713,6 +795,66 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/65/a4/d2f7be3c86708912c02571db0b550121caab8cd88a3c0aacb9cfa15ea66e/fonttools-4.59.2-py3-none-any.whl", hash = "sha256:8bd0f759020e87bb5d323e6283914d9bf4ae35a7307dafb2cbd1e379e720ad37", size = 1132315, upload-time = "2025-08-27T16:40:28.984Z" }, ] +[[package]] +name = "frozenlist" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a2/c8131383f1e66adad5f6ecfcce383d584ca94055a34d683bbb24ac5f2f1c/frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2", size = 81424, upload-time = "2025-06-09T23:00:42.24Z" }, + { url = "https://files.pythonhosted.org/packages/4c/9d/02754159955088cb52567337d1113f945b9e444c4960771ea90eb73de8db/frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb", size = 47952, upload-time = "2025-06-09T23:00:43.481Z" }, + { url = "https://files.pythonhosted.org/packages/01/7a/0046ef1bd6699b40acd2067ed6d6670b4db2f425c56980fa21c982c2a9db/frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478", size = 46688, upload-time = "2025-06-09T23:00:44.793Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a2/a910bafe29c86997363fb4c02069df4ff0b5bc39d33c5198b4e9dd42d8f8/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8", size = 243084, upload-time = "2025-06-09T23:00:46.125Z" }, + { url = "https://files.pythonhosted.org/packages/64/3e/5036af9d5031374c64c387469bfcc3af537fc0f5b1187d83a1cf6fab1639/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08", size = 233524, upload-time = "2025-06-09T23:00:47.73Z" }, + { url = "https://files.pythonhosted.org/packages/06/39/6a17b7c107a2887e781a48ecf20ad20f1c39d94b2a548c83615b5b879f28/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4", size = 248493, upload-time = "2025-06-09T23:00:49.742Z" }, + { url = "https://files.pythonhosted.org/packages/be/00/711d1337c7327d88c44d91dd0f556a1c47fb99afc060ae0ef66b4d24793d/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b", size = 244116, upload-time = "2025-06-09T23:00:51.352Z" }, + { url = "https://files.pythonhosted.org/packages/24/fe/74e6ec0639c115df13d5850e75722750adabdc7de24e37e05a40527ca539/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e", size = 224557, upload-time = "2025-06-09T23:00:52.855Z" }, + { url = "https://files.pythonhosted.org/packages/8d/db/48421f62a6f77c553575201e89048e97198046b793f4a089c79a6e3268bd/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca", size = 241820, upload-time = "2025-06-09T23:00:54.43Z" }, + { url = "https://files.pythonhosted.org/packages/1d/fa/cb4a76bea23047c8462976ea7b7a2bf53997a0ca171302deae9d6dd12096/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df", size = 236542, upload-time = "2025-06-09T23:00:56.409Z" }, + { url = "https://files.pythonhosted.org/packages/5d/32/476a4b5cfaa0ec94d3f808f193301debff2ea42288a099afe60757ef6282/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5", size = 249350, upload-time = "2025-06-09T23:00:58.468Z" }, + { url = "https://files.pythonhosted.org/packages/8d/ba/9a28042f84a6bf8ea5dbc81cfff8eaef18d78b2a1ad9d51c7bc5b029ad16/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025", size = 225093, upload-time = "2025-06-09T23:01:00.015Z" }, + { url = "https://files.pythonhosted.org/packages/bc/29/3a32959e68f9cf000b04e79ba574527c17e8842e38c91d68214a37455786/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01", size = 245482, upload-time = "2025-06-09T23:01:01.474Z" }, + { url = "https://files.pythonhosted.org/packages/80/e8/edf2f9e00da553f07f5fa165325cfc302dead715cab6ac8336a5f3d0adc2/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08", size = 249590, upload-time = "2025-06-09T23:01:02.961Z" }, + { url = "https://files.pythonhosted.org/packages/1c/80/9a0eb48b944050f94cc51ee1c413eb14a39543cc4f760ed12657a5a3c45a/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43", size = 237785, upload-time = "2025-06-09T23:01:05.095Z" }, + { url = "https://files.pythonhosted.org/packages/f3/74/87601e0fb0369b7a2baf404ea921769c53b7ae00dee7dcfe5162c8c6dbf0/frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3", size = 39487, upload-time = "2025-06-09T23:01:06.54Z" }, + { url = "https://files.pythonhosted.org/packages/0b/15/c026e9a9fc17585a9d461f65d8593d281fedf55fbf7eb53f16c6df2392f9/frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a", size = 43874, upload-time = "2025-06-09T23:01:07.752Z" }, + { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" }, + { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" }, + { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" }, + { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" }, + { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" }, + { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" }, + { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" }, + { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" }, + { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" }, + { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" }, + { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" }, + { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" }, + { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" }, + { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" }, + { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" }, + { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" }, + { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" }, + { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" }, + { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" }, + { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" }, + { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" }, + { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" }, + { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" }, + { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" }, + { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" }, + { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" }, + { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" }, + { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" }, + { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" }, + { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, +] + [[package]] name = "fsspec" version = "2025.9.0" @@ -960,6 +1102,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, ] +[[package]] +name = "httpx-aiohttp" +version = "0.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "httpx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/37/19/ae2d2bf1f57fdd23c8ad83675599fb5c407fa13bc20e90f00cffa4dea3aa/httpx_aiohttp-0.1.8.tar.gz", hash = "sha256:756c5e74cdb568c3248ba63fe82bfe8bbe64b928728720f7eaac64b3cf46f308", size = 25401, upload-time = "2025-07-04T10:40:32.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/7a/514c484b88cc4ebbcd2e27e92b86019c0c5bb920582f5fbb10b7e6c78574/httpx_aiohttp-0.1.8-py3-none-any.whl", hash = "sha256:b7bd958d1331f3759a38a0ba22ad29832cb63ca69498c17735228055bf78fa7e", size = 6180, upload-time = "2025-07-04T10:40:31.522Z" }, +] + [[package]] name = "huggingface-hub" version = "0.34.4" @@ -1401,6 +1556,69 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/86/396a17af4e994c7ffa65609739baddc17f4436aec9511478816e157a1bda/mlflow_tracing-3.3.2-py3-none-any.whl", hash = "sha256:9a3175fb3b069c9f541c7a60a663f482b3fcb4ca8f3583da3fdf036a50179e05", size = 1120520, upload-time = "2025-08-27T12:32:13.539Z" }, ] +[[package]] +name = "multidict" +version = "6.6.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/69/7f/0652e6ed47ab288e3756ea9c0df8b14950781184d4bd7883f4d87dd41245/multidict-6.6.4.tar.gz", hash = "sha256:d2d4e4787672911b48350df02ed3fa3fffdc2f2e8ca06dd6afdf34189b76a9dd", size = 101843, upload-time = "2025-08-11T12:08:48.217Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/f6/512ffd8fd8b37fb2680e5ac35d788f1d71bbaf37789d21a820bdc441e565/multidict-6.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0ffb87be160942d56d7b87b0fdf098e81ed565add09eaa1294268c7f3caac4c8", size = 76516, upload-time = "2025-08-11T12:06:53.393Z" }, + { url = "https://files.pythonhosted.org/packages/99/58/45c3e75deb8855c36bd66cc1658007589662ba584dbf423d01df478dd1c5/multidict-6.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d191de6cbab2aff5de6c5723101705fd044b3e4c7cfd587a1929b5028b9714b3", size = 45394, upload-time = "2025-08-11T12:06:54.555Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/e8c4472a93a26e4507c0b8e1f0762c0d8a32de1328ef72fd704ef9cc5447/multidict-6.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38a0956dd92d918ad5feff3db8fcb4a5eb7dba114da917e1a88475619781b57b", size = 43591, upload-time = "2025-08-11T12:06:55.672Z" }, + { url = "https://files.pythonhosted.org/packages/05/51/edf414f4df058574a7265034d04c935aa84a89e79ce90fcf4df211f47b16/multidict-6.6.4-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:6865f6d3b7900ae020b495d599fcf3765653bc927951c1abb959017f81ae8287", size = 237215, upload-time = "2025-08-11T12:06:57.213Z" }, + { url = "https://files.pythonhosted.org/packages/c8/45/8b3d6dbad8cf3252553cc41abea09ad527b33ce47a5e199072620b296902/multidict-6.6.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a2088c126b6f72db6c9212ad827d0ba088c01d951cee25e758c450da732c138", size = 258299, upload-time = "2025-08-11T12:06:58.946Z" }, + { url = "https://files.pythonhosted.org/packages/3c/e8/8ca2e9a9f5a435fc6db40438a55730a4bf4956b554e487fa1b9ae920f825/multidict-6.6.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0f37bed7319b848097085d7d48116f545985db988e2256b2e6f00563a3416ee6", size = 242357, upload-time = "2025-08-11T12:07:00.301Z" }, + { url = "https://files.pythonhosted.org/packages/0f/84/80c77c99df05a75c28490b2af8f7cba2a12621186e0a8b0865d8e745c104/multidict-6.6.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:01368e3c94032ba6ca0b78e7ccb099643466cf24f8dc8eefcfdc0571d56e58f9", size = 268369, upload-time = "2025-08-11T12:07:01.638Z" }, + { url = "https://files.pythonhosted.org/packages/0d/e9/920bfa46c27b05fb3e1ad85121fd49f441492dca2449c5bcfe42e4565d8a/multidict-6.6.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fe323540c255db0bffee79ad7f048c909f2ab0edb87a597e1c17da6a54e493c", size = 269341, upload-time = "2025-08-11T12:07:02.943Z" }, + { url = "https://files.pythonhosted.org/packages/af/65/753a2d8b05daf496f4a9c367fe844e90a1b2cac78e2be2c844200d10cc4c/multidict-6.6.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8eb3025f17b0a4c3cd08cda49acf312a19ad6e8a4edd9dbd591e6506d999402", size = 256100, upload-time = "2025-08-11T12:07:04.564Z" }, + { url = "https://files.pythonhosted.org/packages/09/54/655be13ae324212bf0bc15d665a4e34844f34c206f78801be42f7a0a8aaa/multidict-6.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bbc14f0365534d35a06970d6a83478b249752e922d662dc24d489af1aa0d1be7", size = 253584, upload-time = "2025-08-11T12:07:05.914Z" }, + { url = "https://files.pythonhosted.org/packages/5c/74/ab2039ecc05264b5cec73eb018ce417af3ebb384ae9c0e9ed42cb33f8151/multidict-6.6.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:75aa52fba2d96bf972e85451b99d8e19cc37ce26fd016f6d4aa60da9ab2b005f", size = 251018, upload-time = "2025-08-11T12:07:08.301Z" }, + { url = "https://files.pythonhosted.org/packages/af/0a/ccbb244ac848e56c6427f2392741c06302bbfba49c0042f1eb3c5b606497/multidict-6.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fefd4a815e362d4f011919d97d7b4a1e566f1dde83dc4ad8cfb5b41de1df68d", size = 251477, upload-time = "2025-08-11T12:07:10.248Z" }, + { url = "https://files.pythonhosted.org/packages/0e/b0/0ed49bba775b135937f52fe13922bc64a7eaf0a3ead84a36e8e4e446e096/multidict-6.6.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:db9801fe021f59a5b375ab778973127ca0ac52429a26e2fd86aa9508f4d26eb7", size = 263575, upload-time = "2025-08-11T12:07:11.928Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d9/7fb85a85e14de2e44dfb6a24f03c41e2af8697a6df83daddb0e9b7569f73/multidict-6.6.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a650629970fa21ac1fb06ba25dabfc5b8a2054fcbf6ae97c758aa956b8dba802", size = 259649, upload-time = "2025-08-11T12:07:13.244Z" }, + { url = "https://files.pythonhosted.org/packages/03/9e/b3a459bcf9b6e74fa461a5222a10ff9b544cb1cd52fd482fb1b75ecda2a2/multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:452ff5da78d4720d7516a3a2abd804957532dd69296cb77319c193e3ffb87e24", size = 251505, upload-time = "2025-08-11T12:07:14.57Z" }, + { url = "https://files.pythonhosted.org/packages/86/a2/8022f78f041dfe6d71e364001a5cf987c30edfc83c8a5fb7a3f0974cff39/multidict-6.6.4-cp312-cp312-win32.whl", hash = "sha256:8c2fcb12136530ed19572bbba61b407f655e3953ba669b96a35036a11a485793", size = 41888, upload-time = "2025-08-11T12:07:15.904Z" }, + { url = "https://files.pythonhosted.org/packages/c7/eb/d88b1780d43a56db2cba24289fa744a9d216c1a8546a0dc3956563fd53ea/multidict-6.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:047d9425860a8c9544fed1b9584f0c8bcd31bcde9568b047c5e567a1025ecd6e", size = 46072, upload-time = "2025-08-11T12:07:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/9f/16/b929320bf5750e2d9d4931835a4c638a19d2494a5b519caaaa7492ebe105/multidict-6.6.4-cp312-cp312-win_arm64.whl", hash = "sha256:14754eb72feaa1e8ae528468f24250dd997b8e2188c3d2f593f9eba259e4b364", size = 43222, upload-time = "2025-08-11T12:07:18.328Z" }, + { url = "https://files.pythonhosted.org/packages/3a/5d/e1db626f64f60008320aab00fbe4f23fc3300d75892a3381275b3d284580/multidict-6.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f46a6e8597f9bd71b31cc708195d42b634c8527fecbcf93febf1052cacc1f16e", size = 75848, upload-time = "2025-08-11T12:07:19.912Z" }, + { url = "https://files.pythonhosted.org/packages/4c/aa/8b6f548d839b6c13887253af4e29c939af22a18591bfb5d0ee6f1931dae8/multidict-6.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:22e38b2bc176c5eb9c0a0e379f9d188ae4cd8b28c0f53b52bce7ab0a9e534657", size = 45060, upload-time = "2025-08-11T12:07:21.163Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c6/f5e97e5d99a729bc2aa58eb3ebfa9f1e56a9b517cc38c60537c81834a73f/multidict-6.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5df8afd26f162da59e218ac0eefaa01b01b2e6cd606cffa46608f699539246da", size = 43269, upload-time = "2025-08-11T12:07:22.392Z" }, + { url = "https://files.pythonhosted.org/packages/dc/31/d54eb0c62516776f36fe67f84a732f97e0b0e12f98d5685bebcc6d396910/multidict-6.6.4-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:49517449b58d043023720aa58e62b2f74ce9b28f740a0b5d33971149553d72aa", size = 237158, upload-time = "2025-08-11T12:07:23.636Z" }, + { url = "https://files.pythonhosted.org/packages/c4/1c/8a10c1c25b23156e63b12165a929d8eb49a6ed769fdbefb06e6f07c1e50d/multidict-6.6.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9408439537c5afdca05edd128a63f56a62680f4b3c234301055d7a2000220f", size = 257076, upload-time = "2025-08-11T12:07:25.049Z" }, + { url = "https://files.pythonhosted.org/packages/ad/86/90e20b5771d6805a119e483fd3d1e8393e745a11511aebca41f0da38c3e2/multidict-6.6.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:87a32d20759dc52a9e850fe1061b6e41ab28e2998d44168a8a341b99ded1dba0", size = 240694, upload-time = "2025-08-11T12:07:26.458Z" }, + { url = "https://files.pythonhosted.org/packages/e7/49/484d3e6b535bc0555b52a0a26ba86e4d8d03fd5587d4936dc59ba7583221/multidict-6.6.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:52e3c8d43cdfff587ceedce9deb25e6ae77daba560b626e97a56ddcad3756879", size = 266350, upload-time = "2025-08-11T12:07:27.94Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b4/aa4c5c379b11895083d50021e229e90c408d7d875471cb3abf721e4670d6/multidict-6.6.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ad8850921d3a8d8ff6fbef790e773cecfc260bbfa0566998980d3fa8f520bc4a", size = 267250, upload-time = "2025-08-11T12:07:29.303Z" }, + { url = "https://files.pythonhosted.org/packages/80/e5/5e22c5bf96a64bdd43518b1834c6d95a4922cc2066b7d8e467dae9b6cee6/multidict-6.6.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:497a2954adc25c08daff36f795077f63ad33e13f19bfff7736e72c785391534f", size = 254900, upload-time = "2025-08-11T12:07:30.764Z" }, + { url = "https://files.pythonhosted.org/packages/17/38/58b27fed927c07035abc02befacab42491e7388ca105e087e6e0215ead64/multidict-6.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:024ce601f92d780ca1617ad4be5ac15b501cc2414970ffa2bb2bbc2bd5a68fa5", size = 252355, upload-time = "2025-08-11T12:07:32.205Z" }, + { url = "https://files.pythonhosted.org/packages/d0/a1/dad75d23a90c29c02b5d6f3d7c10ab36c3197613be5d07ec49c7791e186c/multidict-6.6.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a693fc5ed9bdd1c9e898013e0da4dcc640de7963a371c0bd458e50e046bf6438", size = 250061, upload-time = "2025-08-11T12:07:33.623Z" }, + { url = "https://files.pythonhosted.org/packages/b8/1a/ac2216b61c7f116edab6dc3378cca6c70dc019c9a457ff0d754067c58b20/multidict-6.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:190766dac95aab54cae5b152a56520fd99298f32a1266d66d27fdd1b5ac00f4e", size = 249675, upload-time = "2025-08-11T12:07:34.958Z" }, + { url = "https://files.pythonhosted.org/packages/d4/79/1916af833b800d13883e452e8e0977c065c4ee3ab7a26941fbfdebc11895/multidict-6.6.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:34d8f2a5ffdceab9dcd97c7a016deb2308531d5f0fced2bb0c9e1df45b3363d7", size = 261247, upload-time = "2025-08-11T12:07:36.588Z" }, + { url = "https://files.pythonhosted.org/packages/c5/65/d1f84fe08ac44a5fc7391cbc20a7cedc433ea616b266284413fd86062f8c/multidict-6.6.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:59e8d40ab1f5a8597abcef00d04845155a5693b5da00d2c93dbe88f2050f2812", size = 257960, upload-time = "2025-08-11T12:07:39.735Z" }, + { url = "https://files.pythonhosted.org/packages/13/b5/29ec78057d377b195ac2c5248c773703a6b602e132a763e20ec0457e7440/multidict-6.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:467fe64138cfac771f0e949b938c2e1ada2b5af22f39692aa9258715e9ea613a", size = 250078, upload-time = "2025-08-11T12:07:41.525Z" }, + { url = "https://files.pythonhosted.org/packages/c4/0e/7e79d38f70a872cae32e29b0d77024bef7834b0afb406ddae6558d9e2414/multidict-6.6.4-cp313-cp313-win32.whl", hash = "sha256:14616a30fe6d0a48d0a48d1a633ab3b8bec4cf293aac65f32ed116f620adfd69", size = 41708, upload-time = "2025-08-11T12:07:43.405Z" }, + { url = "https://files.pythonhosted.org/packages/9d/34/746696dffff742e97cd6a23da953e55d0ea51fa601fa2ff387b3edcfaa2c/multidict-6.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:40cd05eaeb39e2bc8939451f033e57feaa2ac99e07dbca8afe2be450a4a3b6cf", size = 45912, upload-time = "2025-08-11T12:07:45.082Z" }, + { url = "https://files.pythonhosted.org/packages/c7/87/3bac136181e271e29170d8d71929cdeddeb77f3e8b6a0c08da3a8e9da114/multidict-6.6.4-cp313-cp313-win_arm64.whl", hash = "sha256:f6eb37d511bfae9e13e82cb4d1af36b91150466f24d9b2b8a9785816deb16605", size = 43076, upload-time = "2025-08-11T12:07:46.746Z" }, + { url = "https://files.pythonhosted.org/packages/64/94/0a8e63e36c049b571c9ae41ee301ada29c3fee9643d9c2548d7d558a1d99/multidict-6.6.4-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:6c84378acd4f37d1b507dfa0d459b449e2321b3ba5f2338f9b085cf7a7ba95eb", size = 82812, upload-time = "2025-08-11T12:07:48.402Z" }, + { url = "https://files.pythonhosted.org/packages/25/1a/be8e369dfcd260d2070a67e65dd3990dd635cbd735b98da31e00ea84cd4e/multidict-6.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0e0558693063c75f3d952abf645c78f3c5dfdd825a41d8c4d8156fc0b0da6e7e", size = 48313, upload-time = "2025-08-11T12:07:49.679Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/dd4ade298674b2f9a7b06a32c94ffbc0497354df8285f27317c66433ce3b/multidict-6.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3f8e2384cb83ebd23fd07e9eada8ba64afc4c759cd94817433ab8c81ee4b403f", size = 46777, upload-time = "2025-08-11T12:07:51.318Z" }, + { url = "https://files.pythonhosted.org/packages/89/db/98aa28bc7e071bfba611ac2ae803c24e96dd3a452b4118c587d3d872c64c/multidict-6.6.4-cp313-cp313t-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:f996b87b420995a9174b2a7c1a8daf7db4750be6848b03eb5e639674f7963773", size = 229321, upload-time = "2025-08-11T12:07:52.965Z" }, + { url = "https://files.pythonhosted.org/packages/c7/bc/01ddda2a73dd9d167bd85d0e8ef4293836a8f82b786c63fb1a429bc3e678/multidict-6.6.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc356250cffd6e78416cf5b40dc6a74f1edf3be8e834cf8862d9ed5265cf9b0e", size = 249954, upload-time = "2025-08-11T12:07:54.423Z" }, + { url = "https://files.pythonhosted.org/packages/06/78/6b7c0f020f9aa0acf66d0ab4eb9f08375bac9a50ff5e3edb1c4ccd59eafc/multidict-6.6.4-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:dadf95aa862714ea468a49ad1e09fe00fcc9ec67d122f6596a8d40caf6cec7d0", size = 228612, upload-time = "2025-08-11T12:07:55.914Z" }, + { url = "https://files.pythonhosted.org/packages/00/44/3faa416f89b2d5d76e9d447296a81521e1c832ad6e40b92f990697b43192/multidict-6.6.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7dd57515bebffd8ebd714d101d4c434063322e4fe24042e90ced41f18b6d3395", size = 257528, upload-time = "2025-08-11T12:07:57.371Z" }, + { url = "https://files.pythonhosted.org/packages/05/5f/77c03b89af0fcb16f018f668207768191fb9dcfb5e3361a5e706a11db2c9/multidict-6.6.4-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:967af5f238ebc2eb1da4e77af5492219fbd9b4b812347da39a7b5f5c72c0fa45", size = 256329, upload-time = "2025-08-11T12:07:58.844Z" }, + { url = "https://files.pythonhosted.org/packages/cf/e9/ed750a2a9afb4f8dc6f13dc5b67b514832101b95714f1211cd42e0aafc26/multidict-6.6.4-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2a4c6875c37aae9794308ec43e3530e4aa0d36579ce38d89979bbf89582002bb", size = 247928, upload-time = "2025-08-11T12:08:01.037Z" }, + { url = "https://files.pythonhosted.org/packages/1f/b5/e0571bc13cda277db7e6e8a532791d4403dacc9850006cb66d2556e649c0/multidict-6.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f683a551e92bdb7fac545b9c6f9fa2aebdeefa61d607510b3533286fcab67f5", size = 245228, upload-time = "2025-08-11T12:08:02.96Z" }, + { url = "https://files.pythonhosted.org/packages/f3/a3/69a84b0eccb9824491f06368f5b86e72e4af54c3067c37c39099b6687109/multidict-6.6.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:3ba5aaf600edaf2a868a391779f7a85d93bed147854925f34edd24cc70a3e141", size = 235869, upload-time = "2025-08-11T12:08:04.746Z" }, + { url = "https://files.pythonhosted.org/packages/a9/9d/28802e8f9121a6a0804fa009debf4e753d0a59969ea9f70be5f5fdfcb18f/multidict-6.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:580b643b7fd2c295d83cad90d78419081f53fd532d1f1eb67ceb7060f61cff0d", size = 243446, upload-time = "2025-08-11T12:08:06.332Z" }, + { url = "https://files.pythonhosted.org/packages/38/ea/6c98add069b4878c1d66428a5f5149ddb6d32b1f9836a826ac764b9940be/multidict-6.6.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:37b7187197da6af3ee0b044dbc9625afd0c885f2800815b228a0e70f9a7f473d", size = 252299, upload-time = "2025-08-11T12:08:07.931Z" }, + { url = "https://files.pythonhosted.org/packages/3a/09/8fe02d204473e14c0af3affd50af9078839dfca1742f025cca765435d6b4/multidict-6.6.4-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e1b93790ed0bc26feb72e2f08299691ceb6da5e9e14a0d13cc74f1869af327a0", size = 246926, upload-time = "2025-08-11T12:08:09.467Z" }, + { url = "https://files.pythonhosted.org/packages/37/3d/7b1e10d774a6df5175ecd3c92bff069e77bed9ec2a927fdd4ff5fe182f67/multidict-6.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a506a77ddee1efcca81ecbeae27ade3e09cdf21a8ae854d766c2bb4f14053f92", size = 243383, upload-time = "2025-08-11T12:08:10.981Z" }, + { url = "https://files.pythonhosted.org/packages/50/b0/a6fae46071b645ae98786ab738447de1ef53742eaad949f27e960864bb49/multidict-6.6.4-cp313-cp313t-win32.whl", hash = "sha256:f93b2b2279883d1d0a9e1bd01f312d6fc315c5e4c1f09e112e4736e2f650bc4e", size = 47775, upload-time = "2025-08-11T12:08:12.439Z" }, + { url = "https://files.pythonhosted.org/packages/b2/0a/2436550b1520091af0600dff547913cb2d66fbac27a8c33bc1b1bccd8d98/multidict-6.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:6d46a180acdf6e87cc41dc15d8f5c2986e1e8739dc25dbb7dac826731ef381a4", size = 53100, upload-time = "2025-08-11T12:08:13.823Z" }, + { url = "https://files.pythonhosted.org/packages/97/ea/43ac51faff934086db9c072a94d327d71b7d8b40cd5dcb47311330929ef0/multidict-6.6.4-cp313-cp313t-win_arm64.whl", hash = "sha256:756989334015e3335d087a27331659820d53ba432befdef6a718398b0a8493ad", size = 45501, upload-time = "2025-08-11T12:08:15.173Z" }, + { url = "https://files.pythonhosted.org/packages/fd/69/b547032297c7e63ba2af494edba695d781af8a0c6e89e4d06cf848b21d80/multidict-6.6.4-py3-none-any.whl", hash = "sha256:27d8f8e125c07cb954e54d75d04905a9bba8a439c1d84aca94949d4d03d8601c", size = 12313, upload-time = "2025-08-11T12:08:46.891Z" }, +] + [[package]] name = "mypy" version = "1.17.1" @@ -1466,6 +1684,7 @@ dependencies = [ { name = "devtools" }, { name = "fastapi" }, { name = "gradio" }, + { name = "httpx-aiohttp" }, { name = "hydra-core" }, { name = "mlflow" }, { name = "omegaconf" }, @@ -1507,6 +1726,7 @@ requires-dist = [ { name = "devtools" }, { name = "fastapi" }, { name = "gradio" }, + { name = "httpx-aiohttp" }, { name = "hydra-core" }, { name = "mlflow" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.8.0" }, @@ -1895,6 +2115,63 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5b/a5/987a405322d78a73b66e39e4a90e4ef156fd7141bf71df987e50717c321b/pre_commit-4.3.0-py2.py3-none-any.whl", hash = "sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8", size = 220965, upload-time = "2025-08-09T18:56:13.192Z" }, ] +[[package]] +name = "propcache" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/42/9ca01b0a6f48e81615dca4765a8f1dd2c057e0540f6116a27dc5ee01dfb6/propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10", size = 73674, upload-time = "2025-06-09T22:54:30.551Z" }, + { url = "https://files.pythonhosted.org/packages/af/6e/21293133beb550f9c901bbece755d582bfaf2176bee4774000bd4dd41884/propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154", size = 43570, upload-time = "2025-06-09T22:54:32.296Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c8/0393a0a3a2b8760eb3bde3c147f62b20044f0ddac81e9d6ed7318ec0d852/propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615", size = 43094, upload-time = "2025-06-09T22:54:33.929Z" }, + { url = "https://files.pythonhosted.org/packages/37/2c/489afe311a690399d04a3e03b069225670c1d489eb7b044a566511c1c498/propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db", size = 226958, upload-time = "2025-06-09T22:54:35.186Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ca/63b520d2f3d418c968bf596839ae26cf7f87bead026b6192d4da6a08c467/propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1", size = 234894, upload-time = "2025-06-09T22:54:36.708Z" }, + { url = "https://files.pythonhosted.org/packages/11/60/1d0ed6fff455a028d678df30cc28dcee7af77fa2b0e6962ce1df95c9a2a9/propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c", size = 233672, upload-time = "2025-06-09T22:54:38.062Z" }, + { url = "https://files.pythonhosted.org/packages/37/7c/54fd5301ef38505ab235d98827207176a5c9b2aa61939b10a460ca53e123/propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67", size = 224395, upload-time = "2025-06-09T22:54:39.634Z" }, + { url = "https://files.pythonhosted.org/packages/ee/1a/89a40e0846f5de05fdc6779883bf46ba980e6df4d2ff8fb02643de126592/propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b", size = 212510, upload-time = "2025-06-09T22:54:41.565Z" }, + { url = "https://files.pythonhosted.org/packages/5e/33/ca98368586c9566a6b8d5ef66e30484f8da84c0aac3f2d9aec6d31a11bd5/propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8", size = 222949, upload-time = "2025-06-09T22:54:43.038Z" }, + { url = "https://files.pythonhosted.org/packages/ba/11/ace870d0aafe443b33b2f0b7efdb872b7c3abd505bfb4890716ad7865e9d/propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251", size = 217258, upload-time = "2025-06-09T22:54:44.376Z" }, + { url = "https://files.pythonhosted.org/packages/5b/d2/86fd6f7adffcfc74b42c10a6b7db721d1d9ca1055c45d39a1a8f2a740a21/propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474", size = 213036, upload-time = "2025-06-09T22:54:46.243Z" }, + { url = "https://files.pythonhosted.org/packages/07/94/2d7d1e328f45ff34a0a284cf5a2847013701e24c2a53117e7c280a4316b3/propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535", size = 227684, upload-time = "2025-06-09T22:54:47.63Z" }, + { url = "https://files.pythonhosted.org/packages/b7/05/37ae63a0087677e90b1d14710e532ff104d44bc1efa3b3970fff99b891dc/propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06", size = 234562, upload-time = "2025-06-09T22:54:48.982Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7c/3f539fcae630408d0bd8bf3208b9a647ccad10976eda62402a80adf8fc34/propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1", size = 222142, upload-time = "2025-06-09T22:54:50.424Z" }, + { url = "https://files.pythonhosted.org/packages/7c/d2/34b9eac8c35f79f8a962546b3e97e9d4b990c420ee66ac8255d5d9611648/propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1", size = 37711, upload-time = "2025-06-09T22:54:52.072Z" }, + { url = "https://files.pythonhosted.org/packages/19/61/d582be5d226cf79071681d1b46b848d6cb03d7b70af7063e33a2787eaa03/propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c", size = 41479, upload-time = "2025-06-09T22:54:53.234Z" }, + { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" }, + { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" }, + { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" }, + { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" }, + { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" }, + { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" }, + { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" }, + { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" }, + { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" }, + { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" }, + { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" }, + { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" }, + { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" }, + { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" }, + { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" }, + { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" }, + { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" }, + { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" }, + { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" }, + { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" }, + { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" }, + { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" }, + { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, +] + [[package]] name = "protobuf" version = "6.32.0" @@ -2889,6 +3166,71 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, ] +[[package]] +name = "yarl" +version = "1.20.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/9a/cb7fad7d73c69f296eda6815e4a2c7ed53fc70c2f136479a91c8e5fbdb6d/yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9", size = 133667, upload-time = "2025-06-10T00:43:44.369Z" }, + { url = "https://files.pythonhosted.org/packages/67/38/688577a1cb1e656e3971fb66a3492501c5a5df56d99722e57c98249e5b8a/yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a", size = 91025, upload-time = "2025-06-10T00:43:46.295Z" }, + { url = "https://files.pythonhosted.org/packages/50/ec/72991ae51febeb11a42813fc259f0d4c8e0507f2b74b5514618d8b640365/yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2", size = 89709, upload-time = "2025-06-10T00:43:48.22Z" }, + { url = "https://files.pythonhosted.org/packages/99/da/4d798025490e89426e9f976702e5f9482005c548c579bdae792a4c37769e/yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee", size = 352287, upload-time = "2025-06-10T00:43:49.924Z" }, + { url = "https://files.pythonhosted.org/packages/1a/26/54a15c6a567aac1c61b18aa0f4b8aa2e285a52d547d1be8bf48abe2b3991/yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819", size = 345429, upload-time = "2025-06-10T00:43:51.7Z" }, + { url = "https://files.pythonhosted.org/packages/d6/95/9dcf2386cb875b234353b93ec43e40219e14900e046bf6ac118f94b1e353/yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16", size = 365429, upload-time = "2025-06-10T00:43:53.494Z" }, + { url = "https://files.pythonhosted.org/packages/91/b2/33a8750f6a4bc224242a635f5f2cff6d6ad5ba651f6edcccf721992c21a0/yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6", size = 363862, upload-time = "2025-06-10T00:43:55.766Z" }, + { url = "https://files.pythonhosted.org/packages/98/28/3ab7acc5b51f4434b181b0cee8f1f4b77a65919700a355fb3617f9488874/yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd", size = 355616, upload-time = "2025-06-10T00:43:58.056Z" }, + { url = "https://files.pythonhosted.org/packages/36/a3/f666894aa947a371724ec7cd2e5daa78ee8a777b21509b4252dd7bd15e29/yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a", size = 339954, upload-time = "2025-06-10T00:43:59.773Z" }, + { url = "https://files.pythonhosted.org/packages/f1/81/5f466427e09773c04219d3450d7a1256138a010b6c9f0af2d48565e9ad13/yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38", size = 365575, upload-time = "2025-06-10T00:44:02.051Z" }, + { url = "https://files.pythonhosted.org/packages/2e/e3/e4b0ad8403e97e6c9972dd587388940a032f030ebec196ab81a3b8e94d31/yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef", size = 365061, upload-time = "2025-06-10T00:44:04.196Z" }, + { url = "https://files.pythonhosted.org/packages/ac/99/b8a142e79eb86c926f9f06452eb13ecb1bb5713bd01dc0038faf5452e544/yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f", size = 364142, upload-time = "2025-06-10T00:44:06.527Z" }, + { url = "https://files.pythonhosted.org/packages/34/f2/08ed34a4a506d82a1a3e5bab99ccd930a040f9b6449e9fd050320e45845c/yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8", size = 381894, upload-time = "2025-06-10T00:44:08.379Z" }, + { url = "https://files.pythonhosted.org/packages/92/f8/9a3fbf0968eac704f681726eff595dce9b49c8a25cd92bf83df209668285/yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a", size = 383378, upload-time = "2025-06-10T00:44:10.51Z" }, + { url = "https://files.pythonhosted.org/packages/af/85/9363f77bdfa1e4d690957cd39d192c4cacd1c58965df0470a4905253b54f/yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004", size = 374069, upload-time = "2025-06-10T00:44:12.834Z" }, + { url = "https://files.pythonhosted.org/packages/35/99/9918c8739ba271dcd935400cff8b32e3cd319eaf02fcd023d5dcd487a7c8/yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5", size = 81249, upload-time = "2025-06-10T00:44:14.731Z" }, + { url = "https://files.pythonhosted.org/packages/eb/83/5d9092950565481b413b31a23e75dd3418ff0a277d6e0abf3729d4d1ce25/yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698", size = 86710, upload-time = "2025-06-10T00:44:16.716Z" }, + { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" }, + { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" }, + { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" }, + { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" }, + { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" }, + { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" }, + { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" }, + { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" }, + { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" }, + { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" }, + { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" }, + { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" }, + { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" }, + { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" }, + { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" }, + { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" }, + { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" }, + { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" }, + { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" }, + { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" }, + { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" }, + { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" }, + { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" }, + { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" }, + { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" }, + { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" }, + { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" }, + { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" }, + { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" }, + { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, +] + [[package]] name = "zipp" version = "3.23.0" From 1039075949ed4398da77a474ae2cada4696ebd09 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Fri, 19 Sep 2025 20:28:42 -0700 Subject: [PATCH 03/52] be loop aware Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 56 +++++++++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 9d43b0e1c..ea76c337e 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -13,13 +13,16 @@ # limitations under the License. import json from abc import abstractmethod +from asyncio import _get_running_loop, get_event_loop from os import getenv from threading import Thread -from typing import Any, Dict, Literal, Optional, Tuple, Type, Union +from typing import Any, Literal, Optional, Tuple, Type, Union from uuid import uuid4 import requests import uvicorn +import uvloop +from aiohttp import ClientSession, ClientTimeout, TCPConnector from fastapi import FastAPI, Request, Response from httpx import AsyncClient, Cookies, Limits, Response from httpx._types import ( @@ -67,12 +70,8 @@ def __init__(self, *args, **kwargs) -> None: # In order to get the most benefit from connection pooling, make sure you're not instantiating multiple client instances - for example by using async with inside a "hot loop". This can be achieved either by having a single scoped client that's passed throughout wherever it's needed, or by having a single global client instance. # ``` # -# In principle, we use no timeout since various api or model calls may take an indefinite amount of time. Right now, we have no timeout, even for connection errors which may be problematic. We may want to revisit more granular httpx.Timeout later on. -# -# Eventually, we may also want to parameterize the max connections. For now, we set the max connections to just some very large number. -# -# It's critical that this client is NOT used before uvicorn.run is called. Under the hood, this async client will start and use an event loop, and store a handle to that specific event loop. When uvicorn.run is called, it will replace the event loop policy with its own. So the handle that the async client has is now outdated. -_GLOBAL_HTTPX_CLIENTS: Dict[str, NeMoGymGlobalAsyncClient] = dict() +# We use no timeout since various api or model calls may take an indefinite amount of time. +_GLOBAL_HTTPX_CLIENT: Optional[NeMoGymGlobalAsyncClient] = None class GlobalHTTPXAsyncClientConfig(BaseModel): @@ -81,29 +80,57 @@ class GlobalHTTPXAsyncClientConfig(BaseModel): def get_global_httpx_client( - base_url: str, global_config_dict_parser_config: Optional[GlobalConfigDictParserConfig] = None, global_config_dict_parser_cls: Type[GlobalConfigDictParser] = GlobalConfigDictParser, ) -> NeMoGymGlobalAsyncClient: - if base_url in _GLOBAL_HTTPX_CLIENTS: - return _GLOBAL_HTTPX_CLIENTS[base_url] + if _GLOBAL_HTTPX_CLIENT is not None: + return _GLOBAL_HTTPX_CLIENT + + # Initialize the event loop which is used in aiohttp.ClientSession below + loop = _get_running_loop() + if loop is None: + uvloop.install() + loop = get_event_loop() global_config_dict = get_global_config_dict( global_config_dict_parser_config=global_config_dict_parser_config, global_config_dict_parser_cls=global_config_dict_parser_cls, ) cfg = GlobalHTTPXAsyncClientConfig.model_validate(global_config_dict) + limits = Limits( max_keepalive_connections=cfg.global_httpx_max_connections, max_connections=cfg.global_httpx_max_connections, ) + client_session = ClientSession( + connector=TCPConnector( + limit=limits.max_connections, + keepalive_timeout=limits.keepalive_expiry, + ssl=None, + local_addr=None, + ), + loop=loop, + timeout=ClientTimeout( + total=None, + connect=None, + sock_connect=None, + sock_read=None, + ), + ) + transport = AiohttpTransport( + retries=cfg.global_httpx_max_retries, + limits=limits, + client=client_session, + ) + client = NeMoGymGlobalAsyncClient( limits=limits, - transport=AiohttpTransport(retries=cfg.global_httpx_max_retries, limits=limits), + transport=transport, timeout=None, ) - _GLOBAL_HTTPX_CLIENTS[base_url] = client + global _GLOBAL_HTTPX_CLIENT + _GLOBAL_HTTPX_CLIENT = client return client @@ -305,12 +332,11 @@ def run_webserver(cls) -> None: # pragma: no cover app, host=server.config.host, port=server.config.port, - # TODO eventually we want to make this FastAPI server served across multiple processes or workers. - # Right now this will always use one process. - # workers=server.config.num_fastapi_workers, # We don't have any explicit lifespan logic, so instead of defaulting to "auto" # We just turn lifespan off lifespan="off", + # We set loop none here since a server instance requires a server_client, which will init + loop="none", ) From 81822c283418bc706a62a3c13200353c27388e6f Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Fri, 19 Sep 2025 21:05:14 -0700 Subject: [PATCH 04/52] fixes Signed-off-by: Brian Yu --- nemo_gym/openai_utils.py | 2 +- nemo_gym/server_utils.py | 17 +++-------------- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/nemo_gym/openai_utils.py b/nemo_gym/openai_utils.py index 09de3d937..0d5564d92 100644 --- a/nemo_gym/openai_utils.py +++ b/nemo_gym/openai_utils.py @@ -424,7 +424,7 @@ class NeMoGymAsyncOpenAI(AsyncOpenAI): def __init__(self, **kwargs) -> None: # TODO: this setup is take from https://github.com/NVIDIA/NeMo-Skills/blob/80dc78ac758c4cac81c83a43a729e7ca1280857b/nemo_skills/inference/model/base.py#L318 # However, there may still be a lingering issue regarding saturating at 100 max connections - kwargs["http_client"] = get_global_httpx_client(kwargs["base_url"]) + kwargs["http_client"] = get_global_httpx_client() kwargs["timeout"] = None # Enforce no timeout super().__init__(**kwargs) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index ea76c337e..16ac1a061 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -13,7 +13,6 @@ # limitations under the License. import json from abc import abstractmethod -from asyncio import _get_running_loop, get_event_loop from os import getenv from threading import Thread from typing import Any, Literal, Optional, Tuple, Type, Union @@ -21,7 +20,6 @@ import requests import uvicorn -import uvloop from aiohttp import ClientSession, ClientTimeout, TCPConnector from fastapi import FastAPI, Request, Response from httpx import AsyncClient, Cookies, Limits, Response @@ -83,15 +81,10 @@ def get_global_httpx_client( global_config_dict_parser_config: Optional[GlobalConfigDictParserConfig] = None, global_config_dict_parser_cls: Type[GlobalConfigDictParser] = GlobalConfigDictParser, ) -> NeMoGymGlobalAsyncClient: + global _GLOBAL_HTTPX_CLIENT if _GLOBAL_HTTPX_CLIENT is not None: return _GLOBAL_HTTPX_CLIENT - # Initialize the event loop which is used in aiohttp.ClientSession below - loop = _get_running_loop() - if loop is None: - uvloop.install() - loop = get_event_loop() - global_config_dict = get_global_config_dict( global_config_dict_parser_config=global_config_dict_parser_config, global_config_dict_parser_cls=global_config_dict_parser_cls, @@ -109,7 +102,6 @@ def get_global_httpx_client( ssl=None, local_addr=None, ), - loop=loop, timeout=ClientTimeout( total=None, connect=None, @@ -129,7 +121,6 @@ def get_global_httpx_client( timeout=None, ) - global _GLOBAL_HTTPX_CLIENT _GLOBAL_HTTPX_CLIENT = client return client @@ -199,7 +190,7 @@ async def get( """ server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) - return await get_global_httpx_client(base_url).get( + return await get_global_httpx_client().get( f"{base_url}{url_path}", params=params, headers=headers, @@ -232,7 +223,7 @@ async def post( """ server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) - return await get_global_httpx_client(base_url).post( + return await get_global_httpx_client().post( f"{base_url}{url_path}", content=content, data=data, @@ -335,8 +326,6 @@ def run_webserver(cls) -> None: # pragma: no cover # We don't have any explicit lifespan logic, so instead of defaulting to "auto" # We just turn lifespan off lifespan="off", - # We set loop none here since a server instance requires a server_client, which will init - loop="none", ) From 933788d34f8517d5bdc1598dfcfe578673d98335 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 11:37:51 -0700 Subject: [PATCH 05/52] lazy init Signed-off-by: Brian Yu --- responses_api_models/openai_model/app.py | 23 ++++++++++++----- .../openai_model/tests/test_app.py | 12 +++++---- responses_api_models/vllm_model/app.py | 25 ++++++++++++------- .../vllm_model/tests/test_app.py | 24 ++++++++---------- tests/unit_tests/test_openai_utils.py | 2 +- 5 files changed, 52 insertions(+), 34 deletions(-) diff --git a/responses_api_models/openai_model/app.py b/responses_api_models/openai_model/app.py index 25bbe36c0..2f2ca05da 100644 --- a/responses_api_models/openai_model/app.py +++ b/responses_api_models/openai_model/app.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Union + from nemo_gym.base_responses_api_model import ( BaseResponsesAPIModelConfig, Body, @@ -35,16 +37,25 @@ class SimpleModelServer(SimpleResponsesAPIModel): config: SimpleModelServerConfig def model_post_init(self, context): - self._client = NeMoGymAsyncOpenAI( - base_url=self.config.openai_base_url, - api_key=self.config.openai_api_key, - ) + self._client: Union[None, NeMoGymAsyncOpenAI] = None + return super().model_post_init(context) + @property + def client(self) -> NeMoGymAsyncOpenAI: + # We do lazy init here since NeMoGymAsyncOpenAI requires a running event loop. + if self._client is None: + self._client = NeMoGymAsyncOpenAI( + base_url=self.config.openai_base_url, + api_key=self.config.openai_api_key, + ) + + return self._client + async def responses(self, body: NeMoGymResponseCreateParamsNonStreaming = Body()) -> NeMoGymResponse: body_dict = body.model_dump(exclude_unset=True) body_dict.setdefault("model", self.config.openai_model) - openai_response = await self._client.responses.create(**body_dict) + openai_response = await self.client.responses.create(**body_dict) return NeMoGymResponse(**openai_response.model_dump()) async def chat_completions( @@ -52,7 +63,7 @@ async def chat_completions( ) -> NeMoGymChatCompletion: body_dict = body.model_dump(exclude_unset=True) body_dict.setdefault("model", self.config.openai_model) - openai_response = await self._client.chat.completions.create(**body_dict) + openai_response = await self.client.chat.completions.create(**body_dict) return NeMoGymChatCompletion(**openai_response.model_dump()) diff --git a/responses_api_models/openai_model/tests/test_app.py b/responses_api_models/openai_model/tests/test_app.py index ee97042ba..18d7fb716 100644 --- a/responses_api_models/openai_model/tests/test_app.py +++ b/responses_api_models/openai_model/tests/test_app.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import AsyncMock, MagicMock +from unittest.mock import AsyncMock, MagicMock, PropertyMock from fastapi.testclient import TestClient from pytest import MonkeyPatch @@ -70,8 +70,9 @@ async def mock_create_chat(**kwargs): return NeMoGymChatCompletion(**mock_chat_data) mock_chat = AsyncMock(side_effect=mock_create_chat) - - monkeypatch.setattr(server._client.chat.completions, "create", mock_chat) + mock_client = PropertyMock() + mock_client.return_value.chat.completions.create = mock_chat + monkeypatch.setattr(type(server), "client", mock_client) chat_no_model = client.post( "/v1/chat/completions", @@ -133,8 +134,9 @@ async def mock_create_responses(**kwargs): return NeMoGymResponse(**mock_response_data) mock_response = AsyncMock(side_effect=mock_create_responses) - - monkeypatch.setattr(server._client.responses, "create", mock_response) + mock_client = PropertyMock() + mock_client.return_value.responses.create = mock_response + monkeypatch.setattr(type(server), "client", mock_client) # No model provided should use the one from the config res_no_model = client.post("/v1/responses", json={"input": "hello"}) diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index 15128222a..d9be66324 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -76,13 +76,7 @@ class VLLMModel(SimpleResponsesAPIModel): config: VLLMModelConfig def model_post_init(self, context): - self._clients: List[NeMoGymAsyncOpenAI] = [ - NeMoGymAsyncOpenAI( - base_url=base_url, - api_key=self.config.api_key, - ) - for base_url in self.config.base_url - ] + self._clients: Union[None, List[NeMoGymAsyncOpenAI]] = None self._session_id_to_client: Dict[str, NeMoGymAsyncOpenAI] = dict() @@ -90,6 +84,19 @@ def model_post_init(self, context): return super().model_post_init(context) + @property + def clients(self) -> List[NeMoGymAsyncOpenAI]: + if self._clients is None: + self._clients: List[NeMoGymAsyncOpenAI] = [ + NeMoGymAsyncOpenAI( + base_url=base_url, + api_key=self.config.api_key, + ) + for base_url in self.config.base_url + ] + + return self._clients + async def responses( self, request: Request, body: NeMoGymResponseCreateParamsNonStreaming = Body() ) -> NeMoGymResponse: @@ -142,8 +149,8 @@ async def chat_completions( session_id = request.session[SESSION_ID_KEY] if session_id not in self._session_id_to_client: # There is probably a better way to select the endpoint for this request. But this will do for now. - client_idx = len(self._session_id_to_client) % len(self._clients) - client = self._clients[client_idx] + client_idx = len(self._session_id_to_client) % len(self.clients) + client = self.clients[client_idx] self._session_id_to_client[session_id] = client client = self._session_id_to_client[session_id] diff --git a/responses_api_models/vllm_model/tests/test_app.py b/responses_api_models/vllm_model/tests/test_app.py index ba8a8b8fb..574417b27 100644 --- a/responses_api_models/vllm_model/tests/test_app.py +++ b/responses_api_models/vllm_model/tests/test_app.py @@ -13,13 +13,14 @@ # limitations under the License. import json from typing import Any, Union -from unittest.mock import AsyncMock, MagicMock +from unittest.mock import AsyncMock, MagicMock, PropertyMock from fastapi.testclient import TestClient from pytest import MonkeyPatch, mark from nemo_gym import PARENT_DIR from nemo_gym.openai_utils import ( + NeMoGymAsyncOpenAI, NeMoGymChatCompletion, NeMoGymChatCompletionAssistantMessageForTrainingParam, NeMoGymChatCompletionAssistantMessageParam, @@ -1504,24 +1505,21 @@ def test_client_session_routing(self, monkeypatch: MonkeyPatch): input=input_messages, ) - assert len(server._clients) == 2 - mock_chat_completion_1 = mock_chat_completion.model_copy(deep=True) mock_chat_completion_1.choices[0].message.content = "1" mock_method_1 = AsyncMock(return_value=mock_chat_completion_1) - monkeypatch.setattr( - server._clients[0].chat.completions, - "create", - mock_method_1, - ) + client_1 = MagicMock(spec=NeMoGymAsyncOpenAI) + client_1.chat.completions.create = mock_method_1 + mock_chat_completion_2 = mock_chat_completion.model_copy(deep=True) mock_chat_completion_2.choices[0].message.content = "2" mock_method_2 = AsyncMock(return_value=mock_chat_completion_2) - monkeypatch.setattr( - server._clients[1].chat.completions, - "create", - mock_method_2, - ) + client_2 = MagicMock(spec=NeMoGymAsyncOpenAI) + client_2.chat.completions.create = mock_method_2 + + mock_clients = PropertyMock() + mock_clients.return_value = [client_1, client_2] + monkeypatch.setattr(type(server), "clients", mock_clients) # Test first query by client 1 goes to underlying client 1 client_1 = TestClient(app) diff --git a/tests/unit_tests/test_openai_utils.py b/tests/unit_tests/test_openai_utils.py index 4c95c6432..43427c5df 100644 --- a/tests/unit_tests/test_openai_utils.py +++ b/tests/unit_tests/test_openai_utils.py @@ -15,5 +15,5 @@ class TestOpenAIUtils: - def test_NeMoGymAsyncOpenAI(self) -> None: + async def test_NeMoGymAsyncOpenAI(self) -> None: NeMoGymAsyncOpenAI(api_key="abc", base_url="https://api.openai.com/v1") From 31c3453ad127c0f0abb8d2f1bd1d0f79fb0657a4 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 16:06:38 -0700 Subject: [PATCH 06/52] idk Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 16ac1a061..3b197c3cb 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -20,7 +20,7 @@ import requests import uvicorn -from aiohttp import ClientSession, ClientTimeout, TCPConnector +from aiohttp import ClientSession, TCPConnector from fastapi import FastAPI, Request, Response from httpx import AsyncClient, Cookies, Limits, Response from httpx._types import ( @@ -99,15 +99,8 @@ def get_global_httpx_client( connector=TCPConnector( limit=limits.max_connections, keepalive_timeout=limits.keepalive_expiry, - ssl=None, - local_addr=None, - ), - timeout=ClientTimeout( - total=None, - connect=None, - sock_connect=None, - sock_read=None, ), + timeout=None, ) transport = AiohttpTransport( retries=cfg.global_httpx_max_retries, From a6da2191b75f0e38896fac6d132bcc6aa2d27b79 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 16:39:02 -0700 Subject: [PATCH 07/52] add back origin based httpx client Signed-off-by: Brian Yu --- nemo_gym/openai_utils.py | 2 +- nemo_gym/server_utils.py | 22 ++++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/nemo_gym/openai_utils.py b/nemo_gym/openai_utils.py index 0d5564d92..09de3d937 100644 --- a/nemo_gym/openai_utils.py +++ b/nemo_gym/openai_utils.py @@ -424,7 +424,7 @@ class NeMoGymAsyncOpenAI(AsyncOpenAI): def __init__(self, **kwargs) -> None: # TODO: this setup is take from https://github.com/NVIDIA/NeMo-Skills/blob/80dc78ac758c4cac81c83a43a729e7ca1280857b/nemo_skills/inference/model/base.py#L318 # However, there may still be a lingering issue regarding saturating at 100 max connections - kwargs["http_client"] = get_global_httpx_client() + kwargs["http_client"] = get_global_httpx_client(kwargs["base_url"]) kwargs["timeout"] = None # Enforce no timeout super().__init__(**kwargs) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 3b197c3cb..82877a061 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -15,7 +15,7 @@ from abc import abstractmethod from os import getenv from threading import Thread -from typing import Any, Literal, Optional, Tuple, Type, Union +from typing import Any, Dict, Literal, Optional, Tuple, Type, Union from uuid import uuid4 import requests @@ -67,9 +67,15 @@ def __init__(self, *args, **kwargs) -> None: # ``` # In order to get the most benefit from connection pooling, make sure you're not instantiating multiple client instances - for example by using async with inside a "hot loop". This can be achieved either by having a single scoped client that's passed throughout wherever it's needed, or by having a single global client instance. # ``` +# In plain language: +# - Let's say we have 10 distinct endpoints we want to call 5 times each. +# - A connection pool as defined by the httpx client is for a single distinct endpoint. All requests to that endpoint should use the same httpx client. +# - So the optimal configuration here is to have 10 total httpx clients, one for each distinct endpoint. +# - Additionally, since the connections are pooled, if we had a single global client for all 10 distinct endpoints, we may run into deadlock situations, +# where requests to two different endpoints are waiting for each other to resolve. # # We use no timeout since various api or model calls may take an indefinite amount of time. -_GLOBAL_HTTPX_CLIENT: Optional[NeMoGymGlobalAsyncClient] = None +_GLOBAL_HTTPX_CLIENTS: Dict[str, NeMoGymGlobalAsyncClient] = dict() class GlobalHTTPXAsyncClientConfig(BaseModel): @@ -78,12 +84,12 @@ class GlobalHTTPXAsyncClientConfig(BaseModel): def get_global_httpx_client( + base_url: str, global_config_dict_parser_config: Optional[GlobalConfigDictParserConfig] = None, global_config_dict_parser_cls: Type[GlobalConfigDictParser] = GlobalConfigDictParser, ) -> NeMoGymGlobalAsyncClient: - global _GLOBAL_HTTPX_CLIENT - if _GLOBAL_HTTPX_CLIENT is not None: - return _GLOBAL_HTTPX_CLIENT + if base_url in _GLOBAL_HTTPX_CLIENTS: + return _GLOBAL_HTTPX_CLIENTS[base_url] global_config_dict = get_global_config_dict( global_config_dict_parser_config=global_config_dict_parser_config, @@ -114,7 +120,7 @@ def get_global_httpx_client( timeout=None, ) - _GLOBAL_HTTPX_CLIENT = client + _GLOBAL_HTTPX_CLIENTS[base_url] = client return client @@ -183,7 +189,7 @@ async def get( """ server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) - return await get_global_httpx_client().get( + return await get_global_httpx_client(base_url).get( f"{base_url}{url_path}", params=params, headers=headers, @@ -216,7 +222,7 @@ async def post( """ server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) - return await get_global_httpx_client().post( + return await get_global_httpx_client(base_url).post( f"{base_url}{url_path}", content=content, data=data, From cf3efb99c352f3f7e0e7b34bc4add79f4a7b7564 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 17:08:34 -0700 Subject: [PATCH 08/52] skip tokenize call Signed-off-by: Brian Yu --- responses_api_models/vllm_model/app.py | 37 +++++++++----------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index d9be66324..0622d51ed 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -158,9 +158,11 @@ async def chat_completions( if self.config.return_token_id_information: create_params |= dict( logprobs=True, - # The extra body below is VLLM specific to get the generation log probs associated with generation token IDs. extra_body={ - "return_tokens_as_token_ids": True, + # For prompt and generatino token IDs + "return_token_ids": True, + # For prompt token IDs + "prompt_logprobs": 0, }, ) @@ -174,37 +176,22 @@ async def chat_completions( if self.config.return_token_id_information: log_probs = openai_response.choices[0].logprobs.content - generation_token_ids = [] - generation_log_probs = [] - for log_prob in log_probs: - # Looks like `"token_id:151667"` - generation_token_ids.append(int(log_prob.token.removeprefix("token_id:"))) - generation_log_probs.append(log_prob.logprob) - - # The tokenize endpoint doesn't accept any sampling parameters - # The only relevant params are model, messages, and tools. - tokenize_body_dict = dict() - for key in ("model", "messages", "tools"): - if key in body_dict: - tokenize_body_dict[key] = body_dict[key] - - # The base url has /v1 at the end but vLLM's tokenize endpoint does not have v1, hence the .. - # I can't believe the path is resolved correctly LOL - tokenize_response = await client.post( - "../tokenize", - cast_to=VLLMTokenizeResponse, - body=tokenize_body_dict, - ) + generation_log_probs = [log_prob.logprob for log_prob in log_probs] message_dict = chat_completion_dict["choices"][0]["message"] message_dict.update( dict( - prompt_token_ids=tokenize_response.tokens, - generation_token_ids=generation_token_ids, + prompt_token_ids=chat_completion_dict["prompt_token_ids"], + generation_token_ids=openai_response.choices[0].token_ids, generation_log_probs=generation_log_probs, ) ) + # Clean the duplicated information + chat_completion_dict.pop("prompt_token_ids") + chat_completion_dict["choices"][0].pop("token_ids") + chat_completion_dict["choices"][0].pop("logprobs") + return NeMoGymChatCompletion(**chat_completion_dict) From 1846e7006d7611406ef0064e4665922005807d42 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 17:49:42 -0700 Subject: [PATCH 09/52] quiet httpx logs Signed-off-by: Brian Yu --- nemo_gym/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nemo_gym/__init__.py b/nemo_gym/__init__.py index 509ca8d68..4cb1031c6 100644 --- a/nemo_gym/__init__.py +++ b/nemo_gym/__init__.py @@ -1,3 +1,4 @@ +import logging import sys from os import environ from os.path import abspath, dirname, join @@ -27,6 +28,10 @@ # Turn off Gradio analytics environ["GRADIO_ANALYTICS_ENABLED"] = "False" +# Quiet httpx INFO logs 200s +logger = logging.getLogger("httpx") +logger.setLevel(logging.WARNING) + from nemo_gym.package_info import ( __contact_emails__, __contact_names__, From e9d28ba0297743a0a74d85c6ef9bbca5fddf475b Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 17:50:33 -0700 Subject: [PATCH 10/52] improve rollout collection efficiency Signed-off-by: Brian Yu --- nemo_gym/rollout_collection.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/nemo_gym/rollout_collection.py b/nemo_gym/rollout_collection.py index 09cc7895b..d8816811d 100644 --- a/nemo_gym/rollout_collection.py +++ b/nemo_gym/rollout_collection.py @@ -55,20 +55,20 @@ async def _collect_rollouts(config: RolloutCollectionConfig): # pragma: no cove if config.num_samples_in_parallel: semaphore = Semaphore(config.num_samples_in_parallel) - async def _post_coroutine(row: dict): - async with semaphore: - return await server_client.post(server_name=config.agent_name, url_path="/run", json=row) - - tasks = list(map(_post_coroutine, rows)) - metrics = Counter() - pbar = tqdm.as_completed(tasks, desc="Collecting rollouts") with open(config.output_jsonl_fpath, "a") as f: - for future in pbar: - result = await future - result = result.json() - f.write(json.dumps(result) + "\n") - metrics += Counter({k: v for k, v in result.items() if isinstance(v, (int, float))}) + + async def _post_coroutine(i: int, row: dict) -> None: + async with semaphore: + response = await server_client.post( + server_name=config.agent_name, url_path="/run", json=row, headers={"i": str(i)} + ) + result = response.json() + f.write(json.dumps(result) + "\n") + metrics.update({k: v for k, v in result.items() if isinstance(v, (int, float))}) + + tasks = list(map(_post_coroutine, range(len(rows)), rows)) + await tqdm.gather(*tasks, desc="Collecting rollouts") avg_metrics = {k: v / len(tasks) for k, v in metrics.items()} print(json.dumps(avg_metrics, indent=4)) From a65d3897a6bb3a44b8e13badb5418b0a4d3bc5e0 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 18:30:30 -0700 Subject: [PATCH 11/52] try make model response read more efficient Signed-off-by: Brian Yu --- responses_api_agents/simple_agent/app.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/responses_api_agents/simple_agent/app.py b/responses_api_agents/simple_agent/app.py index 028e3bb24..58fb37a51 100644 --- a/responses_api_agents/simple_agent/app.py +++ b/responses_api_agents/simple_agent/app.py @@ -85,14 +85,12 @@ async def responses( json=new_body, cookies=model_server_cookies, ) - model_response_json = model_response.json() + model_response_json_str = (await model_response.aread()).decode() model_server_cookies = model_response.cookies try: - model_response = NeMoGymResponse.model_validate(model_response_json) + model_response = NeMoGymResponse.model_validate_json(model_response_json_str) except ValidationError as e: - raise RuntimeError( - f"Received an invalid response from model server: {json.dumps(model_response_json)}" - ) from e + raise RuntimeError(f"Received an invalid response from model server: {model_response_json_str}") from e output = model_response.output new_outputs.extend(output) From 567cc008036091473133cabc4f30815753d151fc Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 18:34:45 -0700 Subject: [PATCH 12/52] improve server utils Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 82877a061..bfe529b20 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -79,8 +79,11 @@ def __init__(self, *args, **kwargs) -> None: class GlobalHTTPXAsyncClientConfig(BaseModel): - global_httpx_max_connections: int = 1500 - global_httpx_max_retries: int = 0 + global_httpx_max_connections: int = 100 + global_httpx_max_keepalive_connections: int = 20 + + # Since we use AiohttpTransport, we don't support retries like with the default httpx transport. + # global_httpx_max_retries: int = 0 def get_global_httpx_client( @@ -98,8 +101,8 @@ def get_global_httpx_client( cfg = GlobalHTTPXAsyncClientConfig.model_validate(global_config_dict) limits = Limits( - max_keepalive_connections=cfg.global_httpx_max_connections, max_connections=cfg.global_httpx_max_connections, + max_keepalive_connections=cfg.global_httpx_max_keepalive_connections, ) client_session = ClientSession( connector=TCPConnector( @@ -109,7 +112,7 @@ def get_global_httpx_client( timeout=None, ) transport = AiohttpTransport( - retries=cfg.global_httpx_max_retries, + retries=0, # This value doesn't actually matter since AiohttpTransport won't retry anyways. limits=limits, client=client_session, ) From 86c5a34a4bf55bf74cebc62e53ee077aacaaded7 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 19:16:28 -0700 Subject: [PATCH 13/52] set keepalive expiry very large Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index bfe529b20..19b307770 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -91,6 +91,7 @@ def get_global_httpx_client( global_config_dict_parser_config: Optional[GlobalConfigDictParserConfig] = None, global_config_dict_parser_cls: Type[GlobalConfigDictParser] = GlobalConfigDictParser, ) -> NeMoGymGlobalAsyncClient: + """THE NETWORKING PERFORMANCE OF GYM IS VERY SENSITIVE TO THE CONFIGURATION IN THIS FUNCTION. PLEASE DO NOT TOUCH IT.""" if base_url in _GLOBAL_HTTPX_CLIENTS: return _GLOBAL_HTTPX_CLIENTS[base_url] @@ -103,6 +104,7 @@ def get_global_httpx_client( limits = Limits( max_connections=cfg.global_httpx_max_connections, max_keepalive_connections=cfg.global_httpx_max_keepalive_connections, + keepalive_expiry=1_000_000, # 1M seconds, some ridiculously big number to prevent client-side timeouts. ) client_session = ClientSession( connector=TCPConnector( From 31ba06a85ff900a239c93e317b9d3827b7e8cfb2 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 19:27:42 -0700 Subject: [PATCH 14/52] retry in server client Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 74 +++++++++++++++++++++++++++++----------- 1 file changed, 54 insertions(+), 20 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 19b307770..822799e50 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -11,11 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import asyncio import json from abc import abstractmethod from os import getenv from threading import Thread -from typing import Any, Dict, Literal, Optional, Tuple, Type, Union +from typing import Any, ClassVar, Dict, Literal, Optional, Tuple, Type, Union from uuid import uuid4 import requests @@ -104,7 +105,7 @@ def get_global_httpx_client( limits = Limits( max_connections=cfg.global_httpx_max_connections, max_keepalive_connections=cfg.global_httpx_max_keepalive_connections, - keepalive_expiry=1_000_000, # 1M seconds, some ridiculously big number to prevent client-side timeouts. + keepalive_expiry=1_000_000, # 1M seconds, some ridiculously big number to prevent client-side connection pool timeouts. ) client_session = ClientSession( connector=TCPConnector( @@ -142,6 +143,9 @@ class ServerClient(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) + # This is not intended to be changed. If you want to increase this, we should probably figure out how to improve server-side robustness. + MAX_NUM_TRIES: ClassVar[int] = 3 + @classmethod def load_head_server_config(cls) -> BaseServerConfig: global_config_dict = get_global_config_dict() @@ -194,13 +198,28 @@ async def get( """ server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) - return await get_global_httpx_client(base_url).get( - f"{base_url}{url_path}", - params=params, - headers=headers, - cookies=cookies, - **kwargs, - ) + + num_tries = 1 + while True: + try: + return await get_global_httpx_client(base_url).get( + f"{base_url}{url_path}", + params=params, + headers=headers, + cookies=cookies, + **kwargs, + ) + except Exception as e: + print( + f"""Hit an exception while making a request (try {num_tries}): {e} +Sleeping 0.5s and retrying... +""" + ) + if num_tries >= self.MAX_NUM_TRIES: + raise e + + num_tries += 1 + await asyncio.sleep(0.5) async def post( self, @@ -227,17 +246,32 @@ async def post( """ server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) - return await get_global_httpx_client(base_url).post( - f"{base_url}{url_path}", - content=content, - data=data, - files=files, - json=json.model_dump(exclude_unset=True) if isinstance(json, BaseModel) else json, - params=params, - headers=headers, - cookies=cookies, - **kwargs, - ) + + num_tries = 1 + while True: + try: + return await get_global_httpx_client(base_url).post( + f"{base_url}{url_path}", + content=content, + data=data, + files=files, + json=json.model_dump(exclude_unset=True) if isinstance(json, BaseModel) else json, + params=params, + headers=headers, + cookies=cookies, + **kwargs, + ) + except Exception as e: + print( + f"""Hit an exception while making a request (try {num_tries}): {e} +Sleeping 0.5s and retrying... +""" + ) + if num_tries >= self.MAX_NUM_TRIES: + raise e + + num_tries += 1 + await asyncio.sleep(0.5) def poll_for_status(self, server_name: str) -> ServerStatus: # pragma: no cover if server_name == HEAD_SERVER_KEY_NAME: From 056be914be53e2a25ba763bdc07fe28a9f827798 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 19:29:40 -0700 Subject: [PATCH 15/52] comment Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 822799e50..42dd34c51 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -112,7 +112,7 @@ def get_global_httpx_client( limit=limits.max_connections, keepalive_timeout=limits.keepalive_expiry, ), - timeout=None, + timeout=None, # No timeouts ) transport = AiohttpTransport( retries=0, # This value doesn't actually matter since AiohttpTransport won't retry anyways. @@ -123,7 +123,7 @@ def get_global_httpx_client( client = NeMoGymGlobalAsyncClient( limits=limits, transport=transport, - timeout=None, + timeout=None, # No timeouts ) _GLOBAL_HTTPX_CLIENTS[base_url] = client From b24bdd4b990c278a3a42d969a0d0443d7e766b5f Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 19:31:03 -0700 Subject: [PATCH 16/52] revert read Signed-off-by: Brian Yu --- responses_api_agents/simple_agent/app.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/responses_api_agents/simple_agent/app.py b/responses_api_agents/simple_agent/app.py index 58fb37a51..028e3bb24 100644 --- a/responses_api_agents/simple_agent/app.py +++ b/responses_api_agents/simple_agent/app.py @@ -85,12 +85,14 @@ async def responses( json=new_body, cookies=model_server_cookies, ) - model_response_json_str = (await model_response.aread()).decode() + model_response_json = model_response.json() model_server_cookies = model_response.cookies try: - model_response = NeMoGymResponse.model_validate_json(model_response_json_str) + model_response = NeMoGymResponse.model_validate(model_response_json) except ValidationError as e: - raise RuntimeError(f"Received an invalid response from model server: {model_response_json_str}") from e + raise RuntimeError( + f"Received an invalid response from model server: {json.dumps(model_response_json)}" + ) from e output = model_response.output new_outputs.extend(output) From d49225ac743006d1e9ad1716015408367cc8e0c5 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 19:39:37 -0700 Subject: [PATCH 17/52] comment Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 42dd34c51..931e59ff7 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -80,6 +80,7 @@ def __init__(self, *args, **kwargs) -> None: class GlobalHTTPXAsyncClientConfig(BaseModel): + # These are httpx defaults. global_httpx_max_connections: int = 100 global_httpx_max_keepalive_connections: int = 20 From 618e7bdb84a04ecc43082240465cac2fefedcf60 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 19:41:49 -0700 Subject: [PATCH 18/52] clean headers Signed-off-by: Brian Yu --- nemo_gym/rollout_collection.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nemo_gym/rollout_collection.py b/nemo_gym/rollout_collection.py index d8816811d..6274d8c05 100644 --- a/nemo_gym/rollout_collection.py +++ b/nemo_gym/rollout_collection.py @@ -60,9 +60,7 @@ async def _collect_rollouts(config: RolloutCollectionConfig): # pragma: no cove async def _post_coroutine(i: int, row: dict) -> None: async with semaphore: - response = await server_client.post( - server_name=config.agent_name, url_path="/run", json=row, headers={"i": str(i)} - ) + response = await server_client.post(server_name=config.agent_name, url_path="/run", json=row) result = response.json() f.write(json.dumps(result) + "\n") metrics.update({k: v for k, v in result.items() if isinstance(v, (int, float))}) From 4350f5c9a5366f2c08fdec74e79fd55b4e8eb785 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 19:43:38 -0700 Subject: [PATCH 19/52] use model validate Signed-off-by: Brian Yu --- responses_api_models/vllm_model/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index 0622d51ed..f937199a6 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -192,7 +192,7 @@ async def chat_completions( chat_completion_dict["choices"][0].pop("token_ids") chat_completion_dict["choices"][0].pop("logprobs") - return NeMoGymChatCompletion(**chat_completion_dict) + return NeMoGymChatCompletion.model_validate(chat_completion_dict) class VLLMConverterResponsesToChatCompletionsState(BaseModel): From 6ac625e54cc5c4eb525711860a86f298c404d460 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 20:37:11 -0700 Subject: [PATCH 20/52] try set ulimit Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 931e59ff7..951a12a73 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -13,6 +13,7 @@ # limitations under the License. import asyncio import json +import resource from abc import abstractmethod from os import getenv from threading import Thread @@ -316,6 +317,25 @@ def load_config_from_global_config(cls) -> "BaseRunServerInstanceConfig": return server_config +# From https://github.com/vllm-project/vllm/blob/86647d1cd0f3c82c7d678324db7e925654ac5665/vllm/utils/__init__.py#L2810 +def set_ulimit(target_soft_limit=65535): + resource_type = resource.RLIMIT_NOFILE + current_soft, current_hard = resource.getrlimit(resource_type) + + if current_soft < target_soft_limit: + try: + resource.setrlimit(resource_type, (target_soft_limit, current_hard)) + except ValueError as e: + print( + "Found ulimit of %s and failed to automatically increase " + "with error %s. This can cause fd limit errors like " + "`OSError: [Errno 24] Too many open files`. Consider " + "increasing with ulimit -n", + current_soft, + e, + ) + + class SimpleServer(BaseServer): server_client: ServerClient @@ -358,6 +378,8 @@ def run_webserver(cls) -> None: # pragma: no cover app = server.setup_webserver() + set_ulimit() + uvicorn.run( app, host=server.config.host, From be15207de0b3274b5e75ddebec0139f7c3eae989 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 20:45:48 -0700 Subject: [PATCH 21/52] idk tweak Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 951a12a73..2a208ab37 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -22,7 +22,7 @@ import requests import uvicorn -from aiohttp import ClientSession, TCPConnector +from aiohttp import ClientSession, ClientTimeout, TCPConnector from fastapi import FastAPI, Request, Response from httpx import AsyncClient, Cookies, Limits, Response from httpx._types import ( @@ -81,9 +81,9 @@ def __init__(self, *args, **kwargs) -> None: class GlobalHTTPXAsyncClientConfig(BaseModel): - # These are httpx defaults. - global_httpx_max_connections: int = 100 - global_httpx_max_keepalive_connections: int = 20 + # These are OpenAI defaults. + global_httpx_max_connections: int = 1000 + global_httpx_max_keepalive_connections: int = 100 # Since we use AiohttpTransport, we don't support retries like with the default httpx transport. # global_httpx_max_retries: int = 0 @@ -107,14 +107,13 @@ def get_global_httpx_client( limits = Limits( max_connections=cfg.global_httpx_max_connections, max_keepalive_connections=cfg.global_httpx_max_keepalive_connections, - keepalive_expiry=1_000_000, # 1M seconds, some ridiculously big number to prevent client-side connection pool timeouts. ) client_session = ClientSession( connector=TCPConnector( limit=limits.max_connections, keepalive_timeout=limits.keepalive_expiry, ), - timeout=None, # No timeouts + timeout=ClientTimeout(connect=5.0), ) transport = AiohttpTransport( retries=0, # This value doesn't actually matter since AiohttpTransport won't retry anyways. From ff25bd39f408ba0073cc7441bf243f05d352419e Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 20:46:11 -0700 Subject: [PATCH 22/52] remove this ulimit Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 2a208ab37..3a2c89a4e 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -13,7 +13,6 @@ # limitations under the License. import asyncio import json -import resource from abc import abstractmethod from os import getenv from threading import Thread @@ -316,25 +315,6 @@ def load_config_from_global_config(cls) -> "BaseRunServerInstanceConfig": return server_config -# From https://github.com/vllm-project/vllm/blob/86647d1cd0f3c82c7d678324db7e925654ac5665/vllm/utils/__init__.py#L2810 -def set_ulimit(target_soft_limit=65535): - resource_type = resource.RLIMIT_NOFILE - current_soft, current_hard = resource.getrlimit(resource_type) - - if current_soft < target_soft_limit: - try: - resource.setrlimit(resource_type, (target_soft_limit, current_hard)) - except ValueError as e: - print( - "Found ulimit of %s and failed to automatically increase " - "with error %s. This can cause fd limit errors like " - "`OSError: [Errno 24] Too many open files`. Consider " - "increasing with ulimit -n", - current_soft, - e, - ) - - class SimpleServer(BaseServer): server_client: ServerClient @@ -377,8 +357,6 @@ def run_webserver(cls) -> None: # pragma: no cover app = server.setup_webserver() - set_ulimit() - uvicorn.run( app, host=server.config.host, From 33f691f04fd2a3f10523f73fc5bebeb8bb3ab9c0 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:06:43 -0700 Subject: [PATCH 23/52] start refactor to only use aiohttp Signed-off-by: Brian Yu --- nemo_gym/__init__.py | 4 - nemo_gym/openai_utils.py | 18 ---- nemo_gym/server_utils.py | 185 +++++++++++---------------------------- pyproject.toml | 5 -- 4 files changed, 52 insertions(+), 160 deletions(-) diff --git a/nemo_gym/__init__.py b/nemo_gym/__init__.py index 4cb1031c6..6a3c444ee 100644 --- a/nemo_gym/__init__.py +++ b/nemo_gym/__init__.py @@ -28,10 +28,6 @@ # Turn off Gradio analytics environ["GRADIO_ANALYTICS_ENABLED"] = "False" -# Quiet httpx INFO logs 200s -logger = logging.getLogger("httpx") -logger.setLevel(logging.WARNING) - from nemo_gym.package_info import ( __contact_emails__, __contact_names__, diff --git a/nemo_gym/openai_utils.py b/nemo_gym/openai_utils.py index 09de3d937..0f05c6a75 100644 --- a/nemo_gym/openai_utils.py +++ b/nemo_gym/openai_utils.py @@ -21,7 +21,6 @@ Union, ) -from openai import AsyncOpenAI from openai.types.chat import ( ChatCompletion, ChatCompletionAssistantMessageParam, @@ -75,8 +74,6 @@ from pydantic import BaseModel, ConfigDict, Field from typing_extensions import TypedDict -from nemo_gym.server_utils import get_global_httpx_client - ######################################## # Training-specific @@ -413,18 +410,3 @@ class NeMoGymChatCompletionCreateParamsNonStreaming(BaseModel): # Disallow deprecated args # function_call: FunctionCall # functions: Iterable[Function] - - -######################################## -# Clients -######################################## - - -class NeMoGymAsyncOpenAI(AsyncOpenAI): - def __init__(self, **kwargs) -> None: - # TODO: this setup is take from https://github.com/NVIDIA/NeMo-Skills/blob/80dc78ac758c4cac81c83a43a729e7ca1280857b/nemo_skills/inference/model/base.py#L318 - # However, there may still be a lingering issue regarding saturating at 100 max connections - kwargs["http_client"] = get_global_httpx_client(kwargs["base_url"]) - kwargs["timeout"] = None # Enforce no timeout - - super().__init__(**kwargs) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 3a2c89a4e..e6cf6db5d 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -16,23 +16,14 @@ from abc import abstractmethod from os import getenv from threading import Thread -from typing import Any, ClassVar, Dict, Literal, Optional, Tuple, Type, Union +from typing import ClassVar, Literal, Optional, Tuple, Type, Union, Unpack from uuid import uuid4 import requests import uvicorn -from aiohttp import ClientSession, ClientTimeout, TCPConnector +from aiohttp import ClientResponse, ClientSession, ClientTimeout, DummyCookieJar, TCPConnector +from aiohttp.client import _RequestOptions from fastapi import FastAPI, Request, Response -from httpx import AsyncClient, Cookies, Limits, Response -from httpx._types import ( - CookieTypes, - HeaderTypes, - QueryParamTypes, - RequestContent, - RequestData, - RequestFiles, -) -from httpx_aiohttp import AiohttpTransport from omegaconf import DictConfig, OmegaConf from pydantic import BaseModel, ConfigDict from requests.exceptions import ConnectionError @@ -52,83 +43,37 @@ ) -class NeMoGymStatelessCookies(Cookies): - def extract_cookies(self, response): - pass - - -class NeMoGymGlobalAsyncClient(AsyncClient): - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - - self._cookies = NeMoGymStatelessCookies(self._cookies) - - -# We create a single global httpx client as recommended by https://www.python-httpx.org/async/ -# ``` -# In order to get the most benefit from connection pooling, make sure you're not instantiating multiple client instances - for example by using async with inside a "hot loop". This can be achieved either by having a single scoped client that's passed throughout wherever it's needed, or by having a single global client instance. -# ``` -# In plain language: -# - Let's say we have 10 distinct endpoints we want to call 5 times each. -# - A connection pool as defined by the httpx client is for a single distinct endpoint. All requests to that endpoint should use the same httpx client. -# - So the optimal configuration here is to have 10 total httpx clients, one for each distinct endpoint. -# - Additionally, since the connections are pooled, if we had a single global client for all 10 distinct endpoints, we may run into deadlock situations, -# where requests to two different endpoints are waiting for each other to resolve. -# -# We use no timeout since various api or model calls may take an indefinite amount of time. -_GLOBAL_HTTPX_CLIENTS: Dict[str, NeMoGymGlobalAsyncClient] = dict() +_GLOBAL_AIOHTTP_CLIENT: Union[None, ClientSession] = None -class GlobalHTTPXAsyncClientConfig(BaseModel): - # These are OpenAI defaults. - global_httpx_max_connections: int = 1000 - global_httpx_max_keepalive_connections: int = 100 +class GlobalAIOHTTPAsyncClientConfig(BaseModel): + global_aiohttp_max_connections: int = 1000 - # Since we use AiohttpTransport, we don't support retries like with the default httpx transport. - # global_httpx_max_retries: int = 0 - -def get_global_httpx_client( - base_url: str, +def get_global_aiohttp_client( global_config_dict_parser_config: Optional[GlobalConfigDictParserConfig] = None, global_config_dict_parser_cls: Type[GlobalConfigDictParser] = GlobalConfigDictParser, -) -> NeMoGymGlobalAsyncClient: - """THE NETWORKING PERFORMANCE OF GYM IS VERY SENSITIVE TO THE CONFIGURATION IN THIS FUNCTION. PLEASE DO NOT TOUCH IT.""" - if base_url in _GLOBAL_HTTPX_CLIENTS: - return _GLOBAL_HTTPX_CLIENTS[base_url] +) -> ClientSession: + global _GLOBAL_AIOHTTP_CLIENT + + if _GLOBAL_AIOHTTP_CLIENT is not None: + return _GLOBAL_AIOHTTP_CLIENT global_config_dict = get_global_config_dict( global_config_dict_parser_config=global_config_dict_parser_config, global_config_dict_parser_cls=global_config_dict_parser_cls, ) - cfg = GlobalHTTPXAsyncClientConfig.model_validate(global_config_dict) + cfg = GlobalAIOHTTPAsyncClientConfig.model_validate(global_config_dict) - limits = Limits( - max_connections=cfg.global_httpx_max_connections, - max_keepalive_connections=cfg.global_httpx_max_keepalive_connections, - ) client_session = ClientSession( - connector=TCPConnector( - limit=limits.max_connections, - keepalive_timeout=limits.keepalive_expiry, - ), + connector=TCPConnector(limit=cfg.global_aiohttp_max_connections), timeout=ClientTimeout(connect=5.0), - ) - transport = AiohttpTransport( - retries=0, # This value doesn't actually matter since AiohttpTransport won't retry anyways. - limits=limits, - client=client_session, + cookie_jar=DummyCookieJar(), ) - client = NeMoGymGlobalAsyncClient( - limits=limits, - transport=transport, - timeout=None, # No timeouts - ) - - _GLOBAL_HTTPX_CLIENTS[base_url] = client + _GLOBAL_AIOHTTP_CLIENT = client_session - return client + return _GLOBAL_AIOHTTP_CLIENT DEFAULT_HEAD_SERVER_PORT = 11000 @@ -177,36 +122,20 @@ def load_from_global_config(cls, head_server_config: Optional[BaseServerConfig] def _build_server_base_url(self, server_config_dict: OmegaConf) -> str: return f"http://{server_config_dict.host}:{server_config_dict.port}" - async def get( - self, - server_name: str, - url_path: str, - params: QueryParamTypes | None = None, - headers: HeaderTypes | None = None, - cookies: CookieTypes | None = None, - **kwargs, - ) -> Response: - """ - This function definition is directly copied from httpx._client.AsyncClient. We omit some kwargs since they are most likely not used. We omit the url arg and replace it with the `server_name` and `url_path` args below. - - Args: - server_name: str - The name of the server you are trying to call. - url_path: str - The URL path in the server you are trying to call e.g. "/v1/responses". + async def request( + self, server_name: str, url_path: str, method: str, **kwargs: Unpack[_RequestOptions] + ) -> ClientResponse: + client = get_global_aiohttp_client() - """ server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) num_tries = 1 while True: try: - return await get_global_httpx_client(base_url).get( - f"{base_url}{url_path}", - params=params, - headers=headers, - cookies=cookies, + return await client.request( + method=method, + url=f"{base_url}{url_path}", **kwargs, ) except Exception as e: @@ -221,22 +150,13 @@ async def get( num_tries += 1 await asyncio.sleep(0.5) - async def post( + async def get( self, server_name: str, url_path: str, - content: RequestContent | None = None, - data: RequestData | None = None, - files: RequestFiles | None = None, - json: Any | BaseModel | None = None, - params: QueryParamTypes | None = None, - headers: HeaderTypes | None = None, - cookies: CookieTypes | None = None, - **kwargs, - ) -> Response: + **kwargs: Unpack[_RequestOptions], + ) -> ClientResponse: """ - This function definition is directly copied from httpx._client.AsyncClient. We omit some kwargs since they are most likely not used. We omit the url arg and replace it with the `server_name` and `url_path` args below. - Args: server_name: str The name of the server you are trying to call. @@ -244,34 +164,33 @@ async def post( The URL path in the server you are trying to call e.g. "/v1/responses". """ - server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) - base_url = self._build_server_base_url(server_config_dict) + return self.request( + server_name=server_name, + url_path=url_path, + method="GET", + **kwargs, + ) - num_tries = 1 - while True: - try: - return await get_global_httpx_client(base_url).post( - f"{base_url}{url_path}", - content=content, - data=data, - files=files, - json=json.model_dump(exclude_unset=True) if isinstance(json, BaseModel) else json, - params=params, - headers=headers, - cookies=cookies, - **kwargs, - ) - except Exception as e: - print( - f"""Hit an exception while making a request (try {num_tries}): {e} -Sleeping 0.5s and retrying... -""" - ) - if num_tries >= self.MAX_NUM_TRIES: - raise e + async def post( + self, + server_name: str, + url_path: str, + **kwargs: Unpack[_RequestOptions], + ) -> ClientResponse: + """ + Args: + server_name: str + The name of the server you are trying to call. + url_path: str + The URL path in the server you are trying to call e.g. "/v1/responses". - num_tries += 1 - await asyncio.sleep(0.5) + """ + return self.request( + server_name=server_name, + url_path=url_path, + method="POST", + **kwargs, + ) def poll_for_status(self, server_name: str) -> ServerStatus: # pragma: no cover if server_name == HEAD_SERVER_KEY_NAME: diff --git a/pyproject.toml b/pyproject.toml index 8cf4981f3..449c0ebb7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -126,11 +126,6 @@ dependencies = [ # Updated Wed Sep 17, 2025 with tdigest==0.5.2.2 # License: MIT https://github.com/CamDavidsonPilon/tdigest/blob/e35cfd708962ae5e9d1c5d2b15a99af7b2e2f323/LICENSE.txt "tdigest>=0.5.2.2", - - # httpx-aiohttp: a much faster transport backend as recommended here https://github.com/openai/openai-python/issues/1596#issuecomment-2709021063 - # Updated Fri Sep 19, 2025 with httpx-aiohttp==0.1.8 - # License: BSD 3-Clause https://github.com/karpetrosyan/httpx-aiohttp/blob/e11f0da1580fe73737719e0274a2c8cd6c77a28a/LICENSE - "httpx-aiohttp", ] [dependency-groups] From ab03285527d943b2e001fb7b55ef0a8c5e828124 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:27:06 -0700 Subject: [PATCH 24/52] start refactor Signed-off-by: Brian Yu --- nemo_gym/openai_utils.py | 32 ++++++++++++++++++ nemo_gym/rollout_collection.py | 2 +- nemo_gym/server_utils.py | 4 +-- responses_api_models/openai_model/app.py | 25 +++++---------- responses_api_models/vllm_model/app.py | 41 +++++++++--------------- 5 files changed, 59 insertions(+), 45 deletions(-) diff --git a/nemo_gym/openai_utils.py b/nemo_gym/openai_utils.py index 0f05c6a75..3af03e297 100644 --- a/nemo_gym/openai_utils.py +++ b/nemo_gym/openai_utils.py @@ -74,6 +74,8 @@ from pydantic import BaseModel, ConfigDict, Field from typing_extensions import TypedDict +from nemo_gym.server_utils import get_global_aiohttp_client + ######################################## # Training-specific @@ -410,3 +412,33 @@ class NeMoGymChatCompletionCreateParamsNonStreaming(BaseModel): # Disallow deprecated args # function_call: FunctionCall # functions: Iterable[Function] + + +######################################## +# Clients +######################################## + + +class NeMoGymAsyncOpenAI(BaseModel): + """This is just a stub class that wraps around aiohttp""" + + base_url: str + api_key: str + + async def create_chat_completions(self, **kwargs): + client = get_global_aiohttp_client() + response = await client.post( + url=f"{self.base_url}/chat/completions", + json=kwargs, + headers={"Authorization": f"Bearer {self.api_key}"}, + ) + return await response.json() + + async def create_responses(self, **kwargs): + client = get_global_aiohttp_client() + response = await client.post( + url=f"{self.base_url}/responses", + json=kwargs, + headers={"Authorization": f"Bearer {self.api_key}"}, + ) + return await response.json() diff --git a/nemo_gym/rollout_collection.py b/nemo_gym/rollout_collection.py index 6274d8c05..4a90e65cb 100644 --- a/nemo_gym/rollout_collection.py +++ b/nemo_gym/rollout_collection.py @@ -61,7 +61,7 @@ async def _collect_rollouts(config: RolloutCollectionConfig): # pragma: no cove async def _post_coroutine(i: int, row: dict) -> None: async with semaphore: response = await server_client.post(server_name=config.agent_name, url_path="/run", json=row) - result = response.json() + result = await response.json() f.write(json.dumps(result) + "\n") metrics.update({k: v for k, v in result.items() if isinstance(v, (int, float))}) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index e6cf6db5d..96d2cec7b 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -164,7 +164,7 @@ async def get( The URL path in the server you are trying to call e.g. "/v1/responses". """ - return self.request( + return await self.request( server_name=server_name, url_path=url_path, method="GET", @@ -185,7 +185,7 @@ async def post( The URL path in the server you are trying to call e.g. "/v1/responses". """ - return self.request( + return await self.request( server_name=server_name, url_path=url_path, method="POST", diff --git a/responses_api_models/openai_model/app.py b/responses_api_models/openai_model/app.py index 2f2ca05da..b3dfc3dc8 100644 --- a/responses_api_models/openai_model/app.py +++ b/responses_api_models/openai_model/app.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union from nemo_gym.base_responses_api_model import ( BaseResponsesAPIModelConfig, @@ -37,34 +36,26 @@ class SimpleModelServer(SimpleResponsesAPIModel): config: SimpleModelServerConfig def model_post_init(self, context): - self._client: Union[None, NeMoGymAsyncOpenAI] = None + self._client = NeMoGymAsyncOpenAI( + base_url=self.config.openai_base_url, + api_key=self.config.openai_api_key, + ) return super().model_post_init(context) - @property - def client(self) -> NeMoGymAsyncOpenAI: - # We do lazy init here since NeMoGymAsyncOpenAI requires a running event loop. - if self._client is None: - self._client = NeMoGymAsyncOpenAI( - base_url=self.config.openai_base_url, - api_key=self.config.openai_api_key, - ) - - return self._client - async def responses(self, body: NeMoGymResponseCreateParamsNonStreaming = Body()) -> NeMoGymResponse: body_dict = body.model_dump(exclude_unset=True) body_dict.setdefault("model", self.config.openai_model) - openai_response = await self.client.responses.create(**body_dict) - return NeMoGymResponse(**openai_response.model_dump()) + openai_response_dict = await self._client.create_responses(**body_dict) + return NeMoGymResponse.model_validate(openai_response_dict) async def chat_completions( self, body: NeMoGymChatCompletionCreateParamsNonStreaming = Body() ) -> NeMoGymChatCompletion: body_dict = body.model_dump(exclude_unset=True) body_dict.setdefault("model", self.config.openai_model) - openai_response = await self.client.chat.completions.create(**body_dict) - return NeMoGymChatCompletion(**openai_response.model_dump()) + openai_response_dict = await self._client.create_chat_completions(**body_dict) + return NeMoGymChatCompletion.model_validate(openai_response_dict) if __name__ == "__main__": diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index f937199a6..8c11fabdd 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -76,7 +76,13 @@ class VLLMModel(SimpleResponsesAPIModel): config: VLLMModelConfig def model_post_init(self, context): - self._clients: Union[None, List[NeMoGymAsyncOpenAI]] = None + self._clients = [ + NeMoGymAsyncOpenAI( + base_url=base_url, + api_key=self.config.api_key, + ) + for base_url in self.config.base_url + ] self._session_id_to_client: Dict[str, NeMoGymAsyncOpenAI] = dict() @@ -84,19 +90,6 @@ def model_post_init(self, context): return super().model_post_init(context) - @property - def clients(self) -> List[NeMoGymAsyncOpenAI]: - if self._clients is None: - self._clients: List[NeMoGymAsyncOpenAI] = [ - NeMoGymAsyncOpenAI( - base_url=base_url, - api_key=self.config.api_key, - ) - for base_url in self.config.base_url - ] - - return self._clients - async def responses( self, request: Request, body: NeMoGymResponseCreateParamsNonStreaming = Body() ) -> NeMoGymResponse: @@ -166,31 +159,29 @@ async def chat_completions( }, ) - openai_response = await client.chat.completions.create(**create_params) - assert not getattr(openai_response.choices[0].message, "reasoning_content", None), ( + chat_completion_dict = await client.create_chat_completions(**create_params) + choice_dict = chat_completion_dict["choices"][0] + assert "reasoning_content" not in choice_dict["message"], ( "Please do not use a reasoning parser in vLLM! There is one source of truth for handling data (including reasoning), which is NeMo Gym!" ) - openai_response: NeMoGymChatCompletion - - chat_completion_dict = openai_response.model_dump() if self.config.return_token_id_information: - log_probs = openai_response.choices[0].logprobs.content - generation_log_probs = [log_prob.logprob for log_prob in log_probs] + log_probs = choice_dict["logprobs"]["content"] + generation_log_probs = [log_prob["logprob"] for log_prob in log_probs] - message_dict = chat_completion_dict["choices"][0]["message"] + message_dict = choice_dict["message"] message_dict.update( dict( prompt_token_ids=chat_completion_dict["prompt_token_ids"], - generation_token_ids=openai_response.choices[0].token_ids, + generation_token_ids=choice_dict["token_ids"], generation_log_probs=generation_log_probs, ) ) # Clean the duplicated information chat_completion_dict.pop("prompt_token_ids") - chat_completion_dict["choices"][0].pop("token_ids") - chat_completion_dict["choices"][0].pop("logprobs") + choice_dict.pop("token_ids") + choice_dict.pop("logprobs") return NeMoGymChatCompletion.model_validate(chat_completion_dict) From c0c09eba3675d366f2253cabb8f82e3c9ca1c0f2 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:27:18 -0700 Subject: [PATCH 25/52] fixes Signed-off-by: Brian Yu --- resources_servers/equivalence_llm_judge/app.py | 2 +- resources_servers/google_search/client.py | 2 +- resources_servers/library_judge_math/app.py | 2 +- resources_servers/library_judge_math/client.py | 2 +- resources_servers/multiverse_math_hard/client.py | 2 +- resources_servers/python_math_exec/client.py | 2 +- resources_servers/simple_weather/client.py | 2 +- resources_servers/stateful_counter/client.py | 2 +- resources_servers/workbench/client.py | 2 +- responses_api_agents/simple_agent/app.py | 8 +++++--- responses_api_agents/simple_agent/client.py | 2 +- responses_api_models/openai_model/client.py | 4 ++-- responses_api_models/vllm_model/client.py | 8 ++++---- 13 files changed, 21 insertions(+), 19 deletions(-) diff --git a/resources_servers/equivalence_llm_judge/app.py b/resources_servers/equivalence_llm_judge/app.py index 810151798..8610c517e 100644 --- a/resources_servers/equivalence_llm_judge/app.py +++ b/resources_servers/equivalence_llm_judge/app.py @@ -288,7 +288,7 @@ async def _generate_judge_evaluation( url_path="/v1/responses", json=responses_create_params, ) - judge_response = NeMoGymResponse.model_validate(response.json()) + judge_response = NeMoGymResponse.model_validate(await response.json()) eval_record = JudgeEvaluation( responses_create_params=responses_create_params, response=judge_response, diff --git a/resources_servers/google_search/client.py b/resources_servers/google_search/client.py index a81534ea8..5bc4dc6f4 100644 --- a/resources_servers/google_search/client.py +++ b/resources_servers/google_search/client.py @@ -68,4 +68,4 @@ ), ) result = run(task) -print(json.dumps(result.json(), indent=4)) +print(json.dumps(run(result.json()), indent=4)) diff --git a/resources_servers/library_judge_math/app.py b/resources_servers/library_judge_math/app.py index c7a7a21a2..941f59131 100644 --- a/resources_servers/library_judge_math/app.py +++ b/resources_servers/library_judge_math/app.py @@ -252,7 +252,7 @@ async def _generate_judge_evaluation( url_path="/v1/responses", json=responses_create_params, ) - judge_response = NeMoGymResponse.model_validate(response.json()) + judge_response = NeMoGymResponse.model_validate(await response.json()) judge_evaluation = JudgeEvaluation(responses_create_params=responses_create_params, response=judge_response) # Currently, for all the cases in which the response from the LLM judge diff --git a/resources_servers/library_judge_math/client.py b/resources_servers/library_judge_math/client.py index d551b41c7..90f7a0531 100644 --- a/resources_servers/library_judge_math/client.py +++ b/resources_servers/library_judge_math/client.py @@ -59,4 +59,4 @@ }, ) result = asyncio.run(task) -print(json.dumps(result.json(), indent=4)) +print(json.dumps(asyncio.run(result.json()), indent=4)) diff --git a/resources_servers/multiverse_math_hard/client.py b/resources_servers/multiverse_math_hard/client.py index 803abdd83..89d281d0d 100644 --- a/resources_servers/multiverse_math_hard/client.py +++ b/resources_servers/multiverse_math_hard/client.py @@ -27,4 +27,4 @@ }, ) result = run(task) -print(json.dumps(result.json(), indent=4)) +print(json.dumps(run(result.json()), indent=4)) diff --git a/resources_servers/python_math_exec/client.py b/resources_servers/python_math_exec/client.py index 5db309e3b..295216064 100644 --- a/resources_servers/python_math_exec/client.py +++ b/resources_servers/python_math_exec/client.py @@ -30,4 +30,4 @@ }, ) result = run(task) -print(json.dumps(result.json(), indent=4)) +print(json.dumps(run(result.json()), indent=4)) diff --git a/resources_servers/simple_weather/client.py b/resources_servers/simple_weather/client.py index 684cd4893..a50b57cc9 100644 --- a/resources_servers/simple_weather/client.py +++ b/resources_servers/simple_weather/client.py @@ -26,4 +26,4 @@ }, ) result = run(task) -print(json.dumps(result.json(), indent=4)) +print(json.dumps(run(result.json()), indent=4)) diff --git a/resources_servers/stateful_counter/client.py b/resources_servers/stateful_counter/client.py index ab107b691..104473501 100644 --- a/resources_servers/stateful_counter/client.py +++ b/resources_servers/stateful_counter/client.py @@ -63,4 +63,4 @@ }, ) result = run(task) -print(json.dumps(result.json(), indent=4)) +print(json.dumps(run(result.json()), indent=4)) diff --git a/resources_servers/workbench/client.py b/resources_servers/workbench/client.py index 2da2dcb80..43d3d61bc 100644 --- a/resources_servers/workbench/client.py +++ b/resources_servers/workbench/client.py @@ -27,4 +27,4 @@ }, ) result = run(task) -print(json.dumps(result.json(), indent=4)) +print(json.dumps(run(result.json()), indent=4)) diff --git a/responses_api_agents/simple_agent/app.py b/responses_api_agents/simple_agent/app.py index 028e3bb24..8113d068e 100644 --- a/responses_api_agents/simple_agent/app.py +++ b/responses_api_agents/simple_agent/app.py @@ -85,7 +85,7 @@ async def responses( json=new_body, cookies=model_server_cookies, ) - model_response_json = model_response.json() + model_response_json = await model_response.json() model_server_cookies = model_response.cookies try: model_response = NeMoGymResponse.model_validate(model_response_json) @@ -150,7 +150,9 @@ async def run(self, request: Request, body: SimpleAgentRunRequest) -> SimpleAgen ) cookies = response.cookies - verify_request = SimpleAgentVerifyRequest.model_validate(body.model_dump() | {"response": response.json()}) + verify_request = SimpleAgentVerifyRequest.model_validate( + body.model_dump() | {"response": await response.json()} + ) verify_response = await self.server_client.post( server_name=self.config.resources_server.name, @@ -158,7 +160,7 @@ async def run(self, request: Request, body: SimpleAgentRunRequest) -> SimpleAgen json=verify_request.model_dump(), cookies=cookies, ) - return SimpleAgentVerifyResponse.model_validate(verify_response.json()) + return SimpleAgentVerifyResponse.model_validate(await verify_response.json()) if __name__ == "__main__": diff --git a/responses_api_agents/simple_agent/client.py b/responses_api_agents/simple_agent/client.py index 1d95705e2..29c0234bc 100644 --- a/responses_api_agents/simple_agent/client.py +++ b/responses_api_agents/simple_agent/client.py @@ -52,4 +52,4 @@ ), ) result = run(task) -print(json.dumps(result.json()["output"], indent=4)) +print(json.dumps(run(result.json())["output"], indent=4)) diff --git a/responses_api_models/openai_model/client.py b/responses_api_models/openai_model/client.py index d88cf7071..dd107ee20 100644 --- a/responses_api_models/openai_model/client.py +++ b/responses_api_models/openai_model/client.py @@ -32,8 +32,8 @@ async def main(): "messages": [{"role": "user", "content": "hello"}], }, ) - print(task_1.json()) - print(task_2.json()) + print(await task_1.json()) + print(await task_2.json()) if __name__ == "__main__": diff --git a/responses_api_models/vllm_model/client.py b/responses_api_models/vllm_model/client.py index 48b02c582..48311f13e 100644 --- a/responses_api_models/vllm_model/client.py +++ b/responses_api_models/vllm_model/client.py @@ -90,10 +90,10 @@ async def main(): ], }, ) - print(task_1a.json()) - print(task_1b.json()) - print(task_2a.json()) - print(task_2b.json()) + print(await task_1a.json()) + print(await task_1b.json()) + print(await task_2a.json()) + print(await task_2b.json()) if __name__ == "__main__": From ef6c43fb4c2b1d63e663d60d8b54cb2e012fbbaa Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:29:57 -0700 Subject: [PATCH 26/52] fixes Signed-off-by: Brian Yu --- tests/unit_tests/test_server_utils.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/unit_tests/test_server_utils.py b/tests/unit_tests/test_server_utils.py index 54d5b6664..6a64c32da 100644 --- a/tests/unit_tests/test_server_utils.py +++ b/tests/unit_tests/test_server_utils.py @@ -107,11 +107,10 @@ async def test_ServerClient_get_post_sanity(self, monkeypatch: MonkeyPatch) -> N ) httpx_client_mock = MagicMock() - httpx_client_get_post_mock = AsyncMock() - httpx_client_get_post_mock.return_value = "my mock response" - httpx_client_mock.return_value.get = httpx_client_get_post_mock - httpx_client_mock.return_value.post = httpx_client_get_post_mock - monkeypatch.setattr(nemo_gym.server_utils, "get_global_httpx_client", httpx_client_mock) + httpx_client_request_mock = AsyncMock() + httpx_client_request_mock.return_value = "my mock response" + httpx_client_mock.return_value.request = httpx_client_request_mock + monkeypatch.setattr(nemo_gym.server_utils, "get_global_aiohttp_client", httpx_client_mock) actual_response = await server_client.get( server_name="my_server", From fb3a4c51723a85a557082ea0b459500ac4d132ef Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:44:30 -0700 Subject: [PATCH 27/52] fix name Signed-off-by: Brian Yu --- nemo_gym/openai_utils.py | 4 ++-- responses_api_models/openai_model/app.py | 4 ++-- responses_api_models/vllm_model/app.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nemo_gym/openai_utils.py b/nemo_gym/openai_utils.py index 3af03e297..acaa87c2f 100644 --- a/nemo_gym/openai_utils.py +++ b/nemo_gym/openai_utils.py @@ -425,7 +425,7 @@ class NeMoGymAsyncOpenAI(BaseModel): base_url: str api_key: str - async def create_chat_completions(self, **kwargs): + async def create_chat_completion(self, **kwargs): client = get_global_aiohttp_client() response = await client.post( url=f"{self.base_url}/chat/completions", @@ -434,7 +434,7 @@ async def create_chat_completions(self, **kwargs): ) return await response.json() - async def create_responses(self, **kwargs): + async def create_response(self, **kwargs): client = get_global_aiohttp_client() response = await client.post( url=f"{self.base_url}/responses", diff --git a/responses_api_models/openai_model/app.py b/responses_api_models/openai_model/app.py index b3dfc3dc8..11c3c2f8a 100644 --- a/responses_api_models/openai_model/app.py +++ b/responses_api_models/openai_model/app.py @@ -46,7 +46,7 @@ def model_post_init(self, context): async def responses(self, body: NeMoGymResponseCreateParamsNonStreaming = Body()) -> NeMoGymResponse: body_dict = body.model_dump(exclude_unset=True) body_dict.setdefault("model", self.config.openai_model) - openai_response_dict = await self._client.create_responses(**body_dict) + openai_response_dict = await self._client.create_response(**body_dict) return NeMoGymResponse.model_validate(openai_response_dict) async def chat_completions( @@ -54,7 +54,7 @@ async def chat_completions( ) -> NeMoGymChatCompletion: body_dict = body.model_dump(exclude_unset=True) body_dict.setdefault("model", self.config.openai_model) - openai_response_dict = await self._client.create_chat_completions(**body_dict) + openai_response_dict = await self._client.create_chat_completion(**body_dict) return NeMoGymChatCompletion.model_validate(openai_response_dict) diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index 8c11fabdd..77b7ee39b 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -159,7 +159,7 @@ async def chat_completions( }, ) - chat_completion_dict = await client.create_chat_completions(**create_params) + chat_completion_dict = await client.create_chat_completion(**create_params) choice_dict = chat_completion_dict["choices"][0] assert "reasoning_content" not in choice_dict["message"], ( "Please do not use a reasoning parser in vLLM! There is one source of truth for handling data (including reasoning), which is NeMo Gym!" From a5a5aa07f4b003cc4c7ec76265bba87d0131d09b Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:45:16 -0700 Subject: [PATCH 28/52] test fixes Signed-off-by: Brian Yu --- .../openai_model/tests/test_app.py | 26 ++++++++----------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/responses_api_models/openai_model/tests/test_app.py b/responses_api_models/openai_model/tests/test_app.py index 18d7fb716..7a3922786 100644 --- a/responses_api_models/openai_model/tests/test_app.py +++ b/responses_api_models/openai_model/tests/test_app.py @@ -11,14 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import AsyncMock, MagicMock, PropertyMock +from unittest.mock import AsyncMock, MagicMock from fastapi.testclient import TestClient from pytest import MonkeyPatch -from nemo_gym.openai_utils import NeMoGymChatCompletion, NeMoGymResponse from nemo_gym.server_utils import ServerClient from responses_api_models.openai_model.app import ( + NeMoGymAsyncOpenAI, SimpleModelServer, SimpleModelServerConfig, ) @@ -67,12 +67,10 @@ async def test_chat_completions(self, monkeypatch: MonkeyPatch) -> None: async def mock_create_chat(**kwargs): nonlocal called_args_chat called_args_chat = kwargs - return NeMoGymChatCompletion(**mock_chat_data) + return mock_chat_data - mock_chat = AsyncMock(side_effect=mock_create_chat) - mock_client = PropertyMock() - mock_client.return_value.chat.completions.create = mock_chat - monkeypatch.setattr(type(server), "client", mock_client) + server._client = MagicMock(spec=NeMoGymAsyncOpenAI) + server._client.create_chat_completion = AsyncMock(side_effect=mock_create_chat) chat_no_model = client.post( "/v1/chat/completions", @@ -91,7 +89,7 @@ async def mock_create_chat(**kwargs): assert chat_with_model.status_code == 200 assert called_args_chat.get("model") == "override_model" - mock_chat.assert_any_await( + server._client.create_chat_completion.assert_any_await( messages=[{"role": "user", "content": "hi"}], model="override_model", ) @@ -128,15 +126,13 @@ async def test_responses(self, monkeypatch: MonkeyPatch) -> None: called_args_response = {} - async def mock_create_responses(**kwargs): + async def mock_create_response(**kwargs): nonlocal called_args_response called_args_response = kwargs - return NeMoGymResponse(**mock_response_data) + return mock_response_data - mock_response = AsyncMock(side_effect=mock_create_responses) - mock_client = PropertyMock() - mock_client.return_value.responses.create = mock_response - monkeypatch.setattr(type(server), "client", mock_client) + server._client = MagicMock(spec=NeMoGymAsyncOpenAI) + server._client.create_response = AsyncMock(side_effect=mock_create_response) # No model provided should use the one from the config res_no_model = client.post("/v1/responses", json={"input": "hello"}) @@ -148,4 +144,4 @@ async def mock_create_responses(**kwargs): assert res_with_model.status_code == 200 assert called_args_response.get("model") == "override_model" - mock_response.assert_any_await(input="hello", model="override_model") + server._client.create_response.assert_any_await(input="hello", model="override_model") From 2e11e45aeb0279ac3d386293010cd3944c52d5e1 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:47:48 -0700 Subject: [PATCH 29/52] fixes Signed-off-by: Brian Yu --- responses_api_models/vllm_model/app.py | 4 ++-- .../vllm_model/tests/test_app.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index 77b7ee39b..eee989caa 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -142,8 +142,8 @@ async def chat_completions( session_id = request.session[SESSION_ID_KEY] if session_id not in self._session_id_to_client: # There is probably a better way to select the endpoint for this request. But this will do for now. - client_idx = len(self._session_id_to_client) % len(self.clients) - client = self.clients[client_idx] + client_idx = len(self._session_id_to_client) % len(self._clients) + client = self._clients[client_idx] self._session_id_to_client[session_id] = client client = self._session_id_to_client[session_id] diff --git a/responses_api_models/vllm_model/tests/test_app.py b/responses_api_models/vllm_model/tests/test_app.py index 574417b27..d48b0d920 100644 --- a/responses_api_models/vllm_model/tests/test_app.py +++ b/responses_api_models/vllm_model/tests/test_app.py @@ -13,7 +13,7 @@ # limitations under the License. import json from typing import Any, Union -from unittest.mock import AsyncMock, MagicMock, PropertyMock +from unittest.mock import AsyncMock, MagicMock from fastapi.testclient import TestClient from pytest import MonkeyPatch, mark @@ -1475,6 +1475,8 @@ def test_client_session_routing(self, monkeypatch: MonkeyPatch): server = VLLMModel(config=config, server_client=MagicMock(spec=ServerClient)) app = server.setup_webserver() + assert len(server._clients) == 2 + mock_chat_completion = NeMoGymChatCompletion( id="chtcmpl", object="chat.completion", @@ -1507,19 +1509,17 @@ def test_client_session_routing(self, monkeypatch: MonkeyPatch): mock_chat_completion_1 = mock_chat_completion.model_copy(deep=True) mock_chat_completion_1.choices[0].message.content = "1" - mock_method_1 = AsyncMock(return_value=mock_chat_completion_1) + mock_method_1 = AsyncMock(return_value=mock_chat_completion_1.model_dump()) client_1 = MagicMock(spec=NeMoGymAsyncOpenAI) - client_1.chat.completions.create = mock_method_1 + client_1.create_chat_completion = mock_method_1 mock_chat_completion_2 = mock_chat_completion.model_copy(deep=True) mock_chat_completion_2.choices[0].message.content = "2" - mock_method_2 = AsyncMock(return_value=mock_chat_completion_2) + mock_method_2 = AsyncMock(return_value=mock_chat_completion_2.model_dump()) client_2 = MagicMock(spec=NeMoGymAsyncOpenAI) - client_2.chat.completions.create = mock_method_2 + client_2.create_chat_completion = mock_method_2 - mock_clients = PropertyMock() - mock_clients.return_value = [client_1, client_2] - monkeypatch.setattr(type(server), "clients", mock_clients) + server._clients = [client_1, client_2] # Test first query by client 1 goes to underlying client 1 client_1 = TestClient(app) From cc6dfe95e5568764ba7c5d512f6fe6210fc453c3 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:51:14 -0700 Subject: [PATCH 30/52] fixes Signed-off-by: Brian Yu --- responses_api_agents/simple_agent/tests/test_app.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/responses_api_agents/simple_agent/tests/test_app.py b/responses_api_agents/simple_agent/tests/test_app.py index daaa0c299..5c8694619 100644 --- a/responses_api_agents/simple_agent/tests/test_app.py +++ b/responses_api_agents/simple_agent/tests/test_app.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import MagicMock, call +from unittest.mock import AsyncMock, MagicMock, call from fastapi.testclient import TestClient from pytest import MonkeyPatch @@ -93,8 +93,9 @@ async def test_responses(self, monkeypatch: MonkeyPatch) -> None: "tools": [], } - dotjson_mock = MagicMock() + dotjson_mock = AsyncMock() dotjson_mock.json.return_value = mock_response_data + dotjson_mock.cookies = MagicMock() server.server_client.post.return_value = dotjson_mock # No model provided should use the one from the config @@ -223,8 +224,9 @@ async def test_responses_continues_on_reasoning_only(self, monkeypatch: MonkeyPa "tools": [], } - dotjson_mock = MagicMock() + dotjson_mock = AsyncMock() dotjson_mock.json.side_effect = [mock_response_reasoning_data, mock_response_chat_data] + dotjson_mock.cookies = MagicMock() server.server_client.post.return_value = dotjson_mock # No model provided should use the one from the config From d5139c4f3418b15928917db3be1cfbe25510ecf8 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 21:53:37 -0700 Subject: [PATCH 31/52] fixes Signed-off-by: Brian Yu --- resources_servers/stateful_counter/tests/test_app.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/resources_servers/stateful_counter/tests/test_app.py b/resources_servers/stateful_counter/tests/test_app.py index 8f18111f8..124069b37 100644 --- a/resources_servers/stateful_counter/tests/test_app.py +++ b/resources_servers/stateful_counter/tests/test_app.py @@ -14,8 +14,9 @@ from unittest.mock import MagicMock from fastapi.testclient import TestClient +from httpx import Cookies -from nemo_gym.server_utils import NeMoGymStatelessCookies, ServerClient +from nemo_gym.server_utils import ServerClient from resources_servers.stateful_counter.app import StatefulCounterResourcesServer, StatefulCounterResourcesServerConfig @@ -32,8 +33,11 @@ def test_sanity(self) -> None: app = server.setup_webserver() client = TestClient(app) - # This is the same override as in NeMoGymGlobalAsyncClient - client._cookies = NeMoGymStatelessCookies(client._cookies) + class StatelessCookies(Cookies): + def extract_cookies(self, response): + pass + + client._cookies = StatelessCookies(client._cookies) # Check that we are at 0 response = client.post("/get_counter_value") From 1db456e72de8580803952eb47dd81a55ad844861 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 22:03:28 -0700 Subject: [PATCH 32/52] runtime fixes Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 5 +++++ responses_api_agents/simple_agent/app.py | 2 +- responses_api_models/vllm_model/app.py | 14 +++++++------- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 96d2cec7b..8d00d79a0 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -130,6 +130,11 @@ async def request( server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) + if "json" in kwargs: + json_obj = kwargs["json"] + if isinstance(json_obj, BaseModel): + kwargs["json"] = json_obj.model_dump(exclude_unset=True) + num_tries = 1 while True: try: diff --git a/responses_api_agents/simple_agent/app.py b/responses_api_agents/simple_agent/app.py index 8113d068e..102860af1 100644 --- a/responses_api_agents/simple_agent/app.py +++ b/responses_api_agents/simple_agent/app.py @@ -116,7 +116,7 @@ async def responses( tool_response = NeMoGymFunctionCallOutput( type="function_call_output", call_id=output_function_call.call_id, - output=api_response.content.decode(), + output=(await api_response.content.read()).decode(), ) new_outputs.append(tool_response) diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index eee989caa..224bb6fa6 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -151,17 +151,16 @@ async def chat_completions( if self.config.return_token_id_information: create_params |= dict( logprobs=True, - extra_body={ - # For prompt and generatino token IDs - "return_token_ids": True, - # For prompt token IDs - "prompt_logprobs": 0, - }, + # Typically passed via OpenAI client extra_body. + # For prompt and generation token IDs + return_token_ids=True, + # For prompt token IDs + prompt_logprobs=0, ) chat_completion_dict = await client.create_chat_completion(**create_params) choice_dict = chat_completion_dict["choices"][0] - assert "reasoning_content" not in choice_dict["message"], ( + assert not choice_dict["message"].get("reasoning_content"), ( "Please do not use a reasoning parser in vLLM! There is one source of truth for handling data (including reasoning), which is NeMo Gym!" ) @@ -183,6 +182,7 @@ async def chat_completions( choice_dict.pop("token_ids") choice_dict.pop("logprobs") + print(chat_completion_dict) return NeMoGymChatCompletion.model_validate(chat_completion_dict) From 0436512dc3ae71ed46b1f6273e1adc5e0e218bb8 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 22:06:15 -0700 Subject: [PATCH 33/52] add shutdown Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 8d00d79a0..ffc703a18 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import asyncio +import atexit import json from abc import abstractmethod from os import getenv @@ -76,6 +77,16 @@ def get_global_aiohttp_client( return _GLOBAL_AIOHTTP_CLIENT +def global_aiohttp_client_exit(): + if _GLOBAL_AIOHTTP_CLIENT is None: + return + + asyncio.run(_GLOBAL_AIOHTTP_CLIENT.close()) + + +atexit.register(global_aiohttp_client_exit) + + DEFAULT_HEAD_SERVER_PORT = 11000 ServerStatus = Union[Literal["success"], Literal["connection_error"], Literal["timeout"], Literal["unknown_error"]] From 62943c323850d58b1183f1642cefd4d43871ca37 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 22:06:26 -0700 Subject: [PATCH 34/52] clean print Signed-off-by: Brian Yu --- responses_api_models/vllm_model/app.py | 1 - 1 file changed, 1 deletion(-) diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index 224bb6fa6..d7782e751 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -182,7 +182,6 @@ async def chat_completions( choice_dict.pop("token_ids") choice_dict.pop("logprobs") - print(chat_completion_dict) return NeMoGymChatCompletion.model_validate(chat_completion_dict) From 0231e010442e0ed1af8f194a1c4a8cd24d21a09e Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 22:08:09 -0700 Subject: [PATCH 35/52] fixes Signed-off-by: Brian Yu --- resources_servers/library_judge_math/tests/test_app.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/resources_servers/library_judge_math/tests/test_app.py b/resources_servers/library_judge_math/tests/test_app.py index 53523ff44..f8a6afe65 100644 --- a/resources_servers/library_judge_math/tests/test_app.py +++ b/resources_servers/library_judge_math/tests/test_app.py @@ -116,7 +116,7 @@ def _create_response_output_message(self, message_text: str) -> NeMoGymResponseO async def test_verify(self, config: LibraryJudgeMathResourcesServerConfig) -> None: server_mock = MagicMock(spec=ServerClient) resources_server = LibraryJudgeMathResourcesServer(config=config, server_client=server_mock) - response_mock = MagicMock() + response_mock = AsyncMock() post_mock = MagicMock() post_mock.json = response_mock server_mock.post = AsyncMock(return_value=post_mock) @@ -213,7 +213,7 @@ async def test_verify(self, config: LibraryJudgeMathResourcesServerConfig) -> No async def test_verify_answer(self, config: LibraryJudgeMathResourcesServerConfig) -> None: server_mock = MagicMock(spec=ServerClient) resources_server = LibraryJudgeMathResourcesServer(config=config, server_client=server_mock) - response_mock = MagicMock() + response_mock = AsyncMock() post_mock = MagicMock() post_mock.json = response_mock server_mock.post = AsyncMock(return_value=post_mock) @@ -345,7 +345,7 @@ def test_verify_answer_with_library(self, config: LibraryJudgeMathResourcesServe async def test_verify_answer_with_judge(self, config: LibraryJudgeMathResourcesServerConfig) -> None: server_mock = MagicMock(spec=ServerClient) resources_server = LibraryJudgeMathResourcesServer(config=config, server_client=server_mock) - response_mock = MagicMock() + response_mock = AsyncMock() post_mock = MagicMock() post_mock.json = response_mock server_mock.post = AsyncMock(return_value=post_mock) @@ -486,7 +486,7 @@ async def test_generate_judge_evaluation(self, config: LibraryJudgeMathResources judge_config.judge_responses_create_params.max_output_tokens = 1024 server_mock = MagicMock(spec=ServerClient) resources_server = LibraryJudgeMathResourcesServer(config=judge_config, server_client=server_mock) - response_mock = MagicMock() + response_mock = AsyncMock() post_mock = MagicMock() post_mock.json = response_mock server_mock.post = AsyncMock(return_value=post_mock) From 2fe6eae50b151e19d04608910c9b69c8c473254d Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 22:09:17 -0700 Subject: [PATCH 36/52] fxies Signed-off-by: Brian Yu --- .../equivalence_llm_judge/tests/test_app.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/resources_servers/equivalence_llm_judge/tests/test_app.py b/resources_servers/equivalence_llm_judge/tests/test_app.py index ccc10db63..5d3cfc40e 100644 --- a/resources_servers/equivalence_llm_judge/tests/test_app.py +++ b/resources_servers/equivalence_llm_judge/tests/test_app.py @@ -85,7 +85,7 @@ async def test_verify_equal_then_confirm(self, config: LLMJudgeResourcesServerCo # First: judge says equal; Second: judge says equal => reward 1 post_mock = MagicMock() - post_mock.json = MagicMock() + post_mock.json = AsyncMock() server_mock.post = AsyncMock(return_value=post_mock) # Only the first call is used when check_twice_swap is False @@ -121,7 +121,7 @@ async def test_verify_equal_then_confirm(self, config: LLMJudgeResourcesServerCo rs_twice = LLMJudgeResourcesServer(config=config_twice, server_client=server_mock) post_mock2 = MagicMock() - post_mock2.json = MagicMock() + post_mock2.json = AsyncMock() server_mock.post = AsyncMock(return_value=post_mock2) post_mock2.json.side_effect = [ self._create_response("first", self._msg("[[A=B]]")), @@ -141,7 +141,7 @@ async def test_verify_not_equal_first(self, config: LLMJudgeResourcesServerConfi rs = LLMJudgeResourcesServer(config=config, server_client=server_mock) post_mock = MagicMock() - post_mock.json = MagicMock(return_value=self._create_response("f", self._msg("[[A!=B]]"))) + post_mock.json = AsyncMock(return_value=self._create_response("f", self._msg("[[A!=B]]"))) server_mock.post = AsyncMock(return_value=post_mock) model_create_params = NeMoGymResponseCreateParamsNonStreaming(input=[{"role": "user", "content": "Q: 1+1?"}]) @@ -170,7 +170,7 @@ async def test_unexpected_judge_output_defaults_to_not_equal(self, config: LLMJu rs = LLMJudgeResourcesServer(config=config, server_client=server_mock) post_mock = MagicMock() - post_mock.json = MagicMock(return_value=self._create_response("f", self._msg("no label present"))) + post_mock.json = AsyncMock(return_value=self._create_response("f", self._msg("no label present"))) server_mock.post = AsyncMock(return_value=post_mock) req = LLMJudgeVerifyRequest( @@ -198,7 +198,7 @@ async def test_swap_fails_uses_configured_reward(self, config: LLMJudgeResources rs = LLMJudgeResourcesServer(config=cfg, server_client=server_mock) post_mock = MagicMock() - post_mock.json = MagicMock() + post_mock.json = AsyncMock() server_mock.post = AsyncMock(return_value=post_mock) # First pass equal, second pass not equal -> use configured -1.0 post_mock.json.side_effect = [ From 21e51230af5094bfdde4d99f75cd3fc97118c33e Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 22:30:41 -0700 Subject: [PATCH 37/52] refactor into requests function Signed-off-by: Brian Yu --- nemo_gym/openai_utils.py | 10 +++---- nemo_gym/server_utils.py | 60 +++++++++++++++++++++------------------- 2 files changed, 37 insertions(+), 33 deletions(-) diff --git a/nemo_gym/openai_utils.py b/nemo_gym/openai_utils.py index acaa87c2f..d4b3542b4 100644 --- a/nemo_gym/openai_utils.py +++ b/nemo_gym/openai_utils.py @@ -74,7 +74,7 @@ from pydantic import BaseModel, ConfigDict, Field from typing_extensions import TypedDict -from nemo_gym.server_utils import get_global_aiohttp_client +from nemo_gym.server_utils import request ######################################## @@ -426,8 +426,8 @@ class NeMoGymAsyncOpenAI(BaseModel): api_key: str async def create_chat_completion(self, **kwargs): - client = get_global_aiohttp_client() - response = await client.post( + response = await request( + method="POST", url=f"{self.base_url}/chat/completions", json=kwargs, headers={"Authorization": f"Bearer {self.api_key}"}, @@ -435,8 +435,8 @@ async def create_chat_completion(self, **kwargs): return await response.json() async def create_response(self, **kwargs): - client = get_global_aiohttp_client() - response = await client.post( + response = await request( + method="POST", url=f"{self.base_url}/responses", json=kwargs, headers={"Authorization": f"Bearer {self.api_key}"}, diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index ffc703a18..7b02c9a6f 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -17,7 +17,7 @@ from abc import abstractmethod from os import getenv from threading import Thread -from typing import ClassVar, Literal, Optional, Tuple, Type, Union, Unpack +from typing import Literal, Optional, Tuple, Type, Union, Unpack from uuid import uuid4 import requests @@ -48,7 +48,8 @@ class GlobalAIOHTTPAsyncClientConfig(BaseModel): - global_aiohttp_max_connections: int = 1000 + global_aiohttp_connector_limit: int = 1000 + global_aiohttp_connector_limit_per_host: int = 100 def get_global_aiohttp_client( @@ -67,8 +68,11 @@ def get_global_aiohttp_client( cfg = GlobalAIOHTTPAsyncClientConfig.model_validate(global_config_dict) client_session = ClientSession( - connector=TCPConnector(limit=cfg.global_aiohttp_max_connections), - timeout=ClientTimeout(connect=5.0), + connector=TCPConnector( + limit=cfg.global_aiohttp_connector_limit, + limit_per_host=cfg.global_aiohttp_connector_limit_per_host, + ), + timeout=ClientTimeout(), cookie_jar=DummyCookieJar(), ) @@ -87,6 +91,29 @@ def global_aiohttp_client_exit(): atexit.register(global_aiohttp_client_exit) +# This is not intended to be changed. If you want to increase this, we should probably figure out how to improve server-side robustness. +MAX_NUM_TRIES = 3 + + +async def request(method: str, url: str, **kwargs: Unpack[_RequestOptions]) -> ClientResponse: + client = get_global_aiohttp_client() + num_tries = 1 + while True: + try: + return await client.request(method=method, url=url, **kwargs) + except Exception as e: + print( + f"""Hit an exception while making a request (try {num_tries}): {e} +Sleeping 0.5s and retrying... +""" + ) + if num_tries >= MAX_NUM_TRIES: + raise e + + num_tries += 1 + await asyncio.sleep(0.5) + + DEFAULT_HEAD_SERVER_PORT = 11000 ServerStatus = Union[Literal["success"], Literal["connection_error"], Literal["timeout"], Literal["unknown_error"]] @@ -99,9 +126,6 @@ class ServerClient(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) - # This is not intended to be changed. If you want to increase this, we should probably figure out how to improve server-side robustness. - MAX_NUM_TRIES: ClassVar[int] = 3 - @classmethod def load_head_server_config(cls) -> BaseServerConfig: global_config_dict = get_global_config_dict() @@ -136,8 +160,6 @@ def _build_server_base_url(self, server_config_dict: OmegaConf) -> str: async def request( self, server_name: str, url_path: str, method: str, **kwargs: Unpack[_RequestOptions] ) -> ClientResponse: - client = get_global_aiohttp_client() - server_config_dict = get_first_server_config_dict(self.global_config_dict, server_name) base_url = self._build_server_base_url(server_config_dict) @@ -146,25 +168,7 @@ async def request( if isinstance(json_obj, BaseModel): kwargs["json"] = json_obj.model_dump(exclude_unset=True) - num_tries = 1 - while True: - try: - return await client.request( - method=method, - url=f"{base_url}{url_path}", - **kwargs, - ) - except Exception as e: - print( - f"""Hit an exception while making a request (try {num_tries}): {e} -Sleeping 0.5s and retrying... -""" - ) - if num_tries >= self.MAX_NUM_TRIES: - raise e - - num_tries += 1 - await asyncio.sleep(0.5) + return await request(method=method, url=f"{base_url}{url_path}", **kwargs) async def get( self, From afc30b0ad2f699be7803d08538608cf08afc8239 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 22:42:51 -0700 Subject: [PATCH 38/52] dont increment retries for server disconnected error Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 7b02c9a6f..cec53e13b 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -22,7 +22,7 @@ import requests import uvicorn -from aiohttp import ClientResponse, ClientSession, ClientTimeout, DummyCookieJar, TCPConnector +from aiohttp import ClientResponse, ClientSession, ClientTimeout, DummyCookieJar, ServerDisconnectedError, TCPConnector from aiohttp.client import _RequestOptions from fastapi import FastAPI, Request, Response from omegaconf import DictConfig, OmegaConf @@ -101,6 +101,8 @@ async def request(method: str, url: str, **kwargs: Unpack[_RequestOptions]) -> C while True: try: return await client.request(method=method, url=url, **kwargs) + except ServerDisconnectedError: + await asyncio.sleep(0.5) except Exception as e: print( f"""Hit an exception while making a request (try {num_tries}): {e} From 96592551daa35f5c97d937988867b742deae0f40 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sat, 20 Sep 2025 23:12:59 -0700 Subject: [PATCH 39/52] don't poll http statuses during server lifetime Signed-off-by: Brian Yu --- nemo_gym/cli.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nemo_gym/cli.py b/nemo_gym/cli.py index 016b92157..397b4e0d3 100644 --- a/nemo_gym/cli.py +++ b/nemo_gym/cli.py @@ -243,10 +243,6 @@ async def sleep(): # Indefinitely while True: self.poll() - - statuses = self.check_http_server_statuses() - assert statuses.count("success") == len(statuses), "Found non-success statuses" - await asyncio.sleep(60) try: From f1ab7b856f472a200ce065a4565a17499fd99e0b Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 10:58:25 -0700 Subject: [PATCH 40/52] clean Signed-off-by: Brian Yu --- nemo_gym/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nemo_gym/__init__.py b/nemo_gym/__init__.py index 6a3c444ee..509ca8d68 100644 --- a/nemo_gym/__init__.py +++ b/nemo_gym/__init__.py @@ -1,4 +1,3 @@ -import logging import sys from os import environ from os.path import abspath, dirname, join From c83c88f5d3e7b6cb85f6ebd03ca11f5ce56c3ea4 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 11:13:06 -0700 Subject: [PATCH 41/52] swap back to old tokenizatino flow and add comments there Signed-off-by: Brian Yu --- nemo_gym/openai_utils.py | 10 ++++++ responses_api_models/vllm_model/app.py | 42 +++++++++++++++++++------- 2 files changed, 41 insertions(+), 11 deletions(-) diff --git a/nemo_gym/openai_utils.py b/nemo_gym/openai_utils.py index d4b3542b4..4ba19d00f 100644 --- a/nemo_gym/openai_utils.py +++ b/nemo_gym/openai_utils.py @@ -442,3 +442,13 @@ async def create_response(self, **kwargs): headers={"Authorization": f"Bearer {self.api_key}"}, ) return await response.json() + + async def create_tokenize(self, **kwargs): + base_url = self.base_url.removesuffix("/v1") + response = await request( + method="POST", + url=f"{base_url}/tokenize", + json=kwargs, + headers={"Authorization": f"Bearer {self.api_key}"}, + ) + return await response.json() diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index d7782e751..b244f81ea 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -17,7 +17,6 @@ from uuid import uuid4 from fastapi import Request -from openai import BaseModel as OpenAIBaseModel from pydantic import BaseModel, Field from nemo_gym.base_responses_api_model import ( @@ -67,11 +66,6 @@ def model_post_init(self, context): return super().model_post_init(context) -# This needs to be OpenAI BaseModel since it is casted to below by the OpenAI client. -class VLLMTokenizeResponse(OpenAIBaseModel): - tokens: List[int] - - class VLLMModel(SimpleResponsesAPIModel): config: VLLMModelConfig @@ -152,10 +146,12 @@ async def chat_completions( create_params |= dict( logprobs=True, # Typically passed via OpenAI client extra_body. + return_tokens_as_token_ids=True, + # TODO add this when NeMo RL upgrades to vLLM 0.10.2 support for prompt token ids # For prompt and generation token IDs - return_token_ids=True, + # return_token_ids=True, # For prompt token IDs - prompt_logprobs=0, + # prompt_logprobs=0, ) chat_completion_dict = await client.create_chat_completion(**create_params) @@ -168,17 +164,41 @@ async def chat_completions( log_probs = choice_dict["logprobs"]["content"] generation_log_probs = [log_prob["logprob"] for log_prob in log_probs] + """ + START TODO remove this when NeMo RL upgrades to vLLM 0.10.2 support for prompt token ids + """ + # Looks like `"token_id:151667"` + generation_token_ids = [log_prob["token"].removeprefix("token_id:") for log_prob in log_probs] + + # The tokenize endpoint doesn't accept any sampling parameters + # The only relevant params are model, messages, and tools. + tokenize_body_dict = dict() + for key in ("model", "messages", "tools"): + if key in body_dict: + tokenize_body_dict[key] = body_dict[key] + + # The base url has /v1 at the end but vLLM's tokenize endpoint does not have v1, hence the .. + # I can't believe the path is resolved correctly LOL + tokenize_response = await client.create_tokenize(**tokenize_body_dict) + """ + END + """ + message_dict = choice_dict["message"] message_dict.update( dict( - prompt_token_ids=chat_completion_dict["prompt_token_ids"], - generation_token_ids=choice_dict["token_ids"], + # TODO add this when NeMo RL upgrades to vLLM 0.10.2 support for prompt token ids + # prompt_token_ids=chat_completion_dict["prompt_token_ids"], + prompt_token_ids=tokenize_response["tokens"], + # generation_token_ids=choice_dict["token_ids"], + generation_token_ids=generation_token_ids, generation_log_probs=generation_log_probs, ) ) # Clean the duplicated information - chat_completion_dict.pop("prompt_token_ids") + # TODO add this when NeMo RL upgrades to vLLM 0.10.2 support for prompt token ids + # chat_completion_dict.pop("prompt_token_ids") choice_dict.pop("token_ids") choice_dict.pop("logprobs") From f9511651b5a98c814445ceb85468ee05dd96bc26 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 11:21:29 -0700 Subject: [PATCH 42/52] dont pop token ids Signed-off-by: Brian Yu --- responses_api_models/vllm_model/app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/responses_api_models/vllm_model/app.py b/responses_api_models/vllm_model/app.py index b244f81ea..99f31daa1 100644 --- a/responses_api_models/vllm_model/app.py +++ b/responses_api_models/vllm_model/app.py @@ -197,10 +197,10 @@ async def chat_completions( ) # Clean the duplicated information + choice_dict.pop("logprobs") # TODO add this when NeMo RL upgrades to vLLM 0.10.2 support for prompt token ids # chat_completion_dict.pop("prompt_token_ids") - choice_dict.pop("token_ids") - choice_dict.pop("logprobs") + # choice_dict.pop("token_ids") return NeMoGymChatCompletion.model_validate(chat_completion_dict) From 876c0325890427fd41f3ea09816086a1a757d6b4 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 14:25:52 -0700 Subject: [PATCH 43/52] add set global aiohttp client fn Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index cec53e13b..33c1b7f93 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -67,6 +67,10 @@ def get_global_aiohttp_client( ) cfg = GlobalAIOHTTPAsyncClientConfig.model_validate(global_config_dict) + return set_global_aiohttp_client(cfg) + + +def set_global_aiohttp_client(cfg: GlobalAIOHTTPAsyncClientConfig) -> ClientSession: client_session = ClientSession( connector=TCPConnector( limit=cfg.global_aiohttp_connector_limit, @@ -76,6 +80,7 @@ def get_global_aiohttp_client( cookie_jar=DummyCookieJar(), ) + global _GLOBAL_AIOHTTP_CLIENT _GLOBAL_AIOHTTP_CLIENT = client_session return _GLOBAL_AIOHTTP_CLIENT From 837d0aa4e5232a634ed41b052f6ab80eea21dc42 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 14:26:01 -0700 Subject: [PATCH 44/52] cleanup rollout collection Signed-off-by: Brian Yu --- nemo_gym/rollout_collection.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/nemo_gym/rollout_collection.py b/nemo_gym/rollout_collection.py index 4a90e65cb..5461006ba 100644 --- a/nemo_gym/rollout_collection.py +++ b/nemo_gym/rollout_collection.py @@ -58,17 +58,16 @@ async def _collect_rollouts(config: RolloutCollectionConfig): # pragma: no cove metrics = Counter() with open(config.output_jsonl_fpath, "a") as f: - async def _post_coroutine(i: int, row: dict) -> None: + async def _post_coroutine(row: dict) -> None: async with semaphore: response = await server_client.post(server_name=config.agent_name, url_path="/run", json=row) result = await response.json() f.write(json.dumps(result) + "\n") metrics.update({k: v for k, v in result.items() if isinstance(v, (int, float))}) - tasks = list(map(_post_coroutine, range(len(rows)), rows)) - await tqdm.gather(*tasks, desc="Collecting rollouts") + await tqdm.gather(*map(_post_coroutine, rows), desc="Collecting rollouts") - avg_metrics = {k: v / len(tasks) for k, v in metrics.items()} + avg_metrics = {k: v / len(rows) for k, v in metrics.items()} print(json.dumps(avg_metrics, indent=4)) From 6e2d73e31c6ee859d9c0d6e892c67ccc828e6df1 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 14:27:32 -0700 Subject: [PATCH 45/52] close and reopen Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 33c1b7f93..4080a2d4f 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -71,6 +71,8 @@ def get_global_aiohttp_client( def set_global_aiohttp_client(cfg: GlobalAIOHTTPAsyncClientConfig) -> ClientSession: + global_aiohttp_client_exit() + client_session = ClientSession( connector=TCPConnector( limit=cfg.global_aiohttp_connector_limit, From 582ff21be0be85ef01fdaa598c616365857c3cad Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 15:01:14 -0700 Subject: [PATCH 46/52] refactor to accommodate rollout collection helper Signed-off-by: Brian Yu --- nemo_gym/cli.py | 1 + nemo_gym/rollout_collection.py | 90 ++++++++++++++++++++++------------ nemo_gym/server_utils.py | 13 ++++- 3 files changed, 72 insertions(+), 32 deletions(-) diff --git a/nemo_gym/cli.py b/nemo_gym/cli.py index 397b4e0d3..42864da3b 100644 --- a/nemo_gym/cli.py +++ b/nemo_gym/cli.py @@ -223,6 +223,7 @@ def wait_for_spinup(self) -> None: sleep(sleep_interval) def shutdown(self) -> None: + # TODO there is possibly a better way to handle the server shutdowns. for process_name, process in self._processes.items(): print(f"Killing `{process_name}`") process.kill() diff --git a/nemo_gym/rollout_collection.py b/nemo_gym/rollout_collection.py index 5461006ba..dd1f8eca3 100644 --- a/nemo_gym/rollout_collection.py +++ b/nemo_gym/rollout_collection.py @@ -17,12 +17,18 @@ from collections import Counter from contextlib import nullcontext from itertools import chain, repeat -from typing import Optional +from typing import Dict, List, Optional from pydantic import BaseModel from tqdm.asyncio import tqdm -from nemo_gym.server_utils import ServerClient, get_global_config_dict +from nemo_gym.server_utils import ( + GlobalAIOHTTPAsyncClientConfig, + ServerClient, + get_global_config_dict, + is_global_aiohttp_client_setup, + set_global_aiohttp_client, +) class RolloutCollectionConfig(BaseModel): @@ -34,43 +40,67 @@ class RolloutCollectionConfig(BaseModel): num_samples_in_parallel: Optional[int] = None -async def _collect_rollouts(config: RolloutCollectionConfig): # pragma: no cover - with open(config.input_jsonl_fpath) as input_dataset: - rows = list(map(json.loads, input_dataset)) - print(f"Found {len(rows)} rows!") +class RolloutCollectionHelper(BaseModel): # pragma: no cover + async def run_from_config(self, config: RolloutCollectionConfig): + with open(config.input_jsonl_fpath) as input_dataset: + rows = list(map(json.loads, input_dataset)) + print(f"Found {len(rows)} rows!") - if config.limit: - previous_length = len(rows) - rows = rows[: config.limit] - print(f"Limiting rows from {previous_length} to {len(rows)}!") + if config.limit: + previous_length = len(rows) + rows = rows[: config.limit] + print(f"Limiting rows from {previous_length} to {len(rows)}!") - if config.num_repeats: - previous_length = len(rows) - rows = list(chain.from_iterable(repeat(row, config.num_repeats) for row in rows)) - print(f"Repeating rows (in a pattern of abc to aabbcc) from {previous_length} to {len(rows)}!") + if config.num_repeats: + previous_length = len(rows) + rows = list(chain.from_iterable(repeat(row, config.num_repeats) for row in rows)) + print(f"Repeating rows (in a pattern of abc to aabbcc) from {previous_length} to {len(rows)}!") - server_client = ServerClient.load_from_global_config() + semaphore = nullcontext() + if config.num_samples_in_parallel: + semaphore = Semaphore(config.num_samples_in_parallel) - semaphore = nullcontext() - if config.num_samples_in_parallel: - semaphore = Semaphore(config.num_samples_in_parallel) + server_client = self.setup_server_client() - metrics = Counter() - with open(config.output_jsonl_fpath, "a") as f: + metrics = Counter() + with open(config.output_jsonl_fpath, "a") as f: - async def _post_coroutine(row: dict) -> None: - async with semaphore: - response = await server_client.post(server_name=config.agent_name, url_path="/run", json=row) - result = await response.json() - f.write(json.dumps(result) + "\n") - metrics.update({k: v for k, v in result.items() if isinstance(v, (int, float))}) + async def _post_coroutine(row: dict) -> None: + async with semaphore: + response = await server_client.post(server_name=config.agent_name, url_path="/run", json=row) + result = await response.json() + f.write(json.dumps(result) + "\n") + metrics.update({k: v for k, v in result.items() if isinstance(v, (int, float))}) - await tqdm.gather(*map(_post_coroutine, rows), desc="Collecting rollouts") + await tqdm.gather(*map(_post_coroutine, rows), desc="Collecting rollouts") - avg_metrics = {k: v / len(rows) for k, v in metrics.items()} - print(json.dumps(avg_metrics, indent=4)) + avg_metrics = {k: v / len(rows) for k, v in metrics.items()} + + print(json.dumps(avg_metrics, indent=4)) + + async def run_examples(self, examples: List[Dict]) -> List[Dict]: + server_client = self.setup_server_client() + + async def _post_subroutine(row: Dict) -> Dict: + res = await server_client.post(server_name=row.pop("agent_ref")["name"], url_path="/run", json=row) + return await res.json() + + return await tqdm.gather(*map(_post_subroutine, examples), desc="Collecting rollouts") + + def setup_server_client(self) -> ServerClient: + server_client = ServerClient.load_from_global_config() + + # We set this rollout global aiohttp client to use the same max connections as the underlying head server global config. + if not is_global_aiohttp_client_setup(): + set_global_aiohttp_client( + cfg=GlobalAIOHTTPAsyncClientConfig.model_validate(server_client.global_config_dict) + ) + + return server_client def collect_rollouts(): # pragma: no cover config = RolloutCollectionConfig.model_validate(get_global_config_dict()) - asyncio.run(_collect_rollouts(config)) + rch = RolloutCollectionHelper() + + asyncio.run(rch.run_from_config(config)) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index 4080a2d4f..caf4695cd 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -71,7 +71,9 @@ def get_global_aiohttp_client( def set_global_aiohttp_client(cfg: GlobalAIOHTTPAsyncClientConfig) -> ClientSession: - global_aiohttp_client_exit() + assert not is_global_aiohttp_client_setup(), ( + "There is already a global aiohttp client setup. Please refactor your code or call `global_aiohttp_client_exit` if you want to explicitly re-make the client!" + ) client_session = ClientSession( connector=TCPConnector( @@ -88,12 +90,19 @@ def set_global_aiohttp_client(cfg: GlobalAIOHTTPAsyncClientConfig) -> ClientSess return _GLOBAL_AIOHTTP_CLIENT +def is_global_aiohttp_client_setup() -> bool: + return _GLOBAL_AIOHTTP_CLIENT is not None + + def global_aiohttp_client_exit(): - if _GLOBAL_AIOHTTP_CLIENT is None: + if not is_global_aiohttp_client_setup(): return + global _GLOBAL_AIOHTTP_CLIENT asyncio.run(_GLOBAL_AIOHTTP_CLIENT.close()) + _GLOBAL_AIOHTTP_CLIENT = None + atexit.register(global_aiohttp_client_exit) From 2c8c6f96857c58a267c921fb3902571e7beaa60d Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 15:03:06 -0700 Subject: [PATCH 47/52] pass through head server config Signed-off-by: Brian Yu --- nemo_gym/rollout_collection.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/nemo_gym/rollout_collection.py b/nemo_gym/rollout_collection.py index dd1f8eca3..d4658c094 100644 --- a/nemo_gym/rollout_collection.py +++ b/nemo_gym/rollout_collection.py @@ -22,6 +22,7 @@ from pydantic import BaseModel from tqdm.asyncio import tqdm +from nemo_gym.config_types import BaseServerConfig from nemo_gym.server_utils import ( GlobalAIOHTTPAsyncClientConfig, ServerClient, @@ -78,8 +79,10 @@ async def _post_coroutine(row: dict) -> None: print(json.dumps(avg_metrics, indent=4)) - async def run_examples(self, examples: List[Dict]) -> List[Dict]: - server_client = self.setup_server_client() + async def run_examples( + self, examples: List[Dict], head_server_config: Optional[BaseServerConfig] = None + ) -> List[Dict]: + server_client = self.setup_server_client(head_server_config) async def _post_subroutine(row: Dict) -> Dict: res = await server_client.post(server_name=row.pop("agent_ref")["name"], url_path="/run", json=row) @@ -87,8 +90,8 @@ async def _post_subroutine(row: Dict) -> Dict: return await tqdm.gather(*map(_post_subroutine, examples), desc="Collecting rollouts") - def setup_server_client(self) -> ServerClient: - server_client = ServerClient.load_from_global_config() + def setup_server_client(self, head_server_config: Optional[BaseServerConfig] = None) -> ServerClient: + server_client = ServerClient.load_from_global_config(head_server_config) # We set this rollout global aiohttp client to use the same max connections as the underlying head server global config. if not is_global_aiohttp_client_setup(): From 48cb4ea3901909c20aefc055071e27704a3ef2e5 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 15:58:34 -0700 Subject: [PATCH 48/52] filter out 200 ok messages to clean up server logs Signed-off-by: Brian Yu --- nemo_gym/server_utils.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/nemo_gym/server_utils.py b/nemo_gym/server_utils.py index caf4695cd..55c57b9a3 100644 --- a/nemo_gym/server_utils.py +++ b/nemo_gym/server_utils.py @@ -15,6 +15,8 @@ import atexit import json from abc import abstractmethod +from logging import Filter as LoggingFilter +from logging import LogRecord, getLogger from os import getenv from threading import Thread from typing import Literal, Optional, Tuple, Type, Union, Unpack @@ -314,6 +316,18 @@ def run_webserver(cls) -> None: # pragma: no cover app = server.setup_webserver() + class No200Filter(LoggingFilter): + def filter(self, record: LogRecord) -> bool: + msg = record.getMessage() + return not msg.strip().endswith("200") + + uvicorn_logger = getLogger("uvicorn.access") + uvicorn_logger.addFilter(No200Filter()) + + print( + "Adding a uvicorn logging filter so that the logs aren't spammed with 200 OK messages. This is to help errors pop up better and filter out noise." + ) + uvicorn.run( app, host=server.config.host, From 597cceedcccc5eb2e418f66dc76f55273542e2c6 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 16:16:32 -0700 Subject: [PATCH 49/52] add faq Signed-off-by: Brian Yu --- README.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/README.md b/README.md index 41469410b..2689752bf 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ - [FAQ: build-docs / Build docs CI failures](#faq-build-docs--build-docs-ci-failures) - [FAQ: NeMo Gym, training frameworks, and token IDs](#faq-nemo-gym-training-frameworks-and-token-ids) - [FAQ: NeMo Gym what CI/CD do I need to pass?](#faq-nemo-gym-what-cicd-do-i-need-to-pass) +- [FAQ: Why aiohttp backend and not httpx/httpcore for async http?](#faq-why-aiohttp-backend-and-not-httpxhttpcore-for-async-http) # NeMo-Gym @@ -886,3 +887,31 @@ Examples of PR checks that most PRs do not need to wait for to pass: 1. CICD NeMo / cicd-container-build / build / main (push) 2. CICD NeMo / Nemo_CICD_Test (push) ... + + +# FAQ: Why aiohttp backend and not httpx/httpcore for async http? + +TL;DR: httpx is O(n^2) runtime where n is the number of queued requests (i.e. for each request, we check all other queued requests). This is terribly inefficient and results in major slowdowns. + +On Wed Sep 17, 2025, inspired by the Deepseek R1 Nature paper, we tried launching a larger rollout batch run with up to 16 off policy steps in NeMo RL. Our setting resulted in Gym being slammed with 16k concurrent requests. At the time, we were using a single Gym instance with multiple data-parallel vLLM workers, and that setup hung for 40 minutes before the first request was processed. Something was wrong. + +Before that time, we had also gotten reports that the rollout collection in Gym couldn't be used with high concurrency i.e. in some cases people had to set the concurrency to 32 requests in parallel. Putting these two data points together, we figured something was wrong with the concurrency setup in Gym. + +Eventually, we isolated the issue to our async http backend -- httpx and httpcore. We originally decided to use httpx for the async http backend in Gym because the OpenAI client uses it by default so we can share the same backend http client. Unfortunately, the httpcore connection pool subroutine for pooling connections over requests is O(n^2) where n is the number of queued requests. + +Here are the key calls in the stack trace: +1. OpenAI client at some point calls httpx client +2. httpx client calls into the transport [here](https://github.com/encode/httpx/blob/4b23574cf83307ce27d3b14b4a425dc58c57d28d/httpx/_client.py#L1014) +3. Transport calls into httpcore connection pool [here](https://github.com/encode/httpx/blob/4b23574cf83307ce27d3b14b4a425dc58c57d28d/httpx/_transports/default.py#L250) +4. For each request, the httpcore connection pool calls this `_assign_requests_to_connections` subroutine [here](https://github.com/encode/httpcore/blob/5974b03c7df89d3ee4e23779900d5349d550753c/httpcore/_async/connection_pool.py#L228) + 1. This subroutine loops through connections [here](https://github.com/encode/httpcore/blob/5974b03c7df89d3ee4e23779900d5349d550753c/httpcore/_async/connection_pool.py#L284) + 2. and loops through queued requests [here](https://github.com/encode/httpcore/blob/5974b03c7df89d3ee4e23779900d5349d550753c/httpcore/_async/connection_pool.py#L303) + 3. Which results in a total of O(n^2) runtime if the number of queued requests is large. Which is always the case if we slam with some larger number of requests. + +In the end, we decided to swap our http backend from httpx to aiohttp since we had good prior experience working with aiohttp in production infra. + +Here are some Github issues related to this problem. They didn't help too much, but they did validate our solution (kind of) to use aiohttp as as async http backend instead. +- https://github.com/openai/openai-python/issues/1596 +- https://github.com/encode/httpx/issues/3215#issuecomment-2220795088 + +If you are using AsyncOpenAI client with a parallelism > 32, you may also want to check if this kind of inefficiency also affects your setup. From fb4a4fcfe474f5de095e9551806537f65ab546b7 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 16:18:10 -0700 Subject: [PATCH 50/52] empty commit for qa Signed-off-by: Brian Yu From a3af3bb43c17894508820bf624fbfd8a55f1ae64 Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 16:23:02 -0700 Subject: [PATCH 51/52] add aiohttp package Signed-off-by: Brian Yu --- pyproject.toml | 5 +++++ uv.lock | 17 ++--------------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 449c0ebb7..caaa05874 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -126,6 +126,11 @@ dependencies = [ # Updated Wed Sep 17, 2025 with tdigest==0.5.2.2 # License: MIT https://github.com/CamDavidsonPilon/tdigest/blob/e35cfd708962ae5e9d1c5d2b15a99af7b2e2f323/LICENSE.txt "tdigest>=0.5.2.2", + + # aiohttp: async http backend + # Updated Sun Sep 21, 2025 with aiohttp==3.12.15 + # License: Apache 2.0 https://github.com/aio-libs/aiohttp/blob/9a2f146a12e3525b43e96723ef41584bf9cf784e/LICENSE.txt + "aiohttp", ] [dependency-groups] diff --git a/uv.lock b/uv.lock index e6e3c8861..3ed8c3292 100644 --- a/uv.lock +++ b/uv.lock @@ -1102,19 +1102,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, ] -[[package]] -name = "httpx-aiohttp" -version = "0.1.8" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "httpx" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/37/19/ae2d2bf1f57fdd23c8ad83675599fb5c407fa13bc20e90f00cffa4dea3aa/httpx_aiohttp-0.1.8.tar.gz", hash = "sha256:756c5e74cdb568c3248ba63fe82bfe8bbe64b928728720f7eaac64b3cf46f308", size = 25401, upload-time = "2025-07-04T10:40:32.329Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/54/7a/514c484b88cc4ebbcd2e27e92b86019c0c5bb920582f5fbb10b7e6c78574/httpx_aiohttp-0.1.8-py3-none-any.whl", hash = "sha256:b7bd958d1331f3759a38a0ba22ad29832cb63ca69498c17735228055bf78fa7e", size = 6180, upload-time = "2025-07-04T10:40:31.522Z" }, -] - [[package]] name = "huggingface-hub" version = "0.34.4" @@ -1681,10 +1668,10 @@ wheels = [ name = "nemo-gym" source = { editable = "." } dependencies = [ + { name = "aiohttp" }, { name = "devtools" }, { name = "fastapi" }, { name = "gradio" }, - { name = "httpx-aiohttp" }, { name = "hydra-core" }, { name = "mlflow" }, { name = "omegaconf" }, @@ -1722,11 +1709,11 @@ docs = [ [package.metadata] requires-dist = [ + { name = "aiohttp" }, { name = "coverage", extras = ["toml"], marker = "extra == 'dev'" }, { name = "devtools" }, { name = "fastapi" }, { name = "gradio" }, - { name = "httpx-aiohttp" }, { name = "hydra-core" }, { name = "mlflow" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.8.0" }, From c8f240a2f0607b4d46898d58e5bfb4e411178d1e Mon Sep 17 00:00:00 2001 From: Brian Yu Date: Sun, 21 Sep 2025 17:56:51 -0700 Subject: [PATCH 52/52] improve readme Signed-off-by: Brian Yu --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 2689752bf..81d24e0a0 100644 --- a/README.md +++ b/README.md @@ -897,8 +897,20 @@ On Wed Sep 17, 2025, inspired by the Deepseek R1 Nature paper, we tried launchin Before that time, we had also gotten reports that the rollout collection in Gym couldn't be used with high concurrency i.e. in some cases people had to set the concurrency to 32 requests in parallel. Putting these two data points together, we figured something was wrong with the concurrency setup in Gym. +For some context, Gym is a set of servers that end up calling a model endpoint server at some point. And it's really important that we never artificially restrict the concurrency in the Gym side since technically we are always clients of that model endpoint server, since the model endpoint server could handle many more requests than we're restricting the concurrency to. So we always want Gym to be as efficient as possible and not have e.g. max parallel requests or smth parameter in Gym. + Eventually, we isolated the issue to our async http backend -- httpx and httpcore. We originally decided to use httpx for the async http backend in Gym because the OpenAI client uses it by default so we can share the same backend http client. Unfortunately, the httpcore connection pool subroutine for pooling connections over requests is O(n^2) where n is the number of queued requests. +Networking mental model: +1. A request is sent by Gym to the model endpoint server. +2. This request requires a connection from our client side to the server side. + 1. This connection is a socket (identified by a port) and a socket is an open file (managed by the operating system). + 2. If we are sending 100 requests, in the worst case we could open 100 connections == 100 open files. This quickly becomes very expensive. + 3. So, async http backends will pool requests across connections to a single endpoint, where multiple requests can leverage the same file if they are going to the same endpoint origin. + 4. This is called connection pooling. And it's possible that all 100 requests share a single connection. +3. But this connection pooling now needs some management logic. When the client sends a new request, it needs to determine if that request can reuse an existing connection. + 1. And this is where the httpcore connection pool logic is very inefficient. + Here are the key calls in the stack trace: 1. OpenAI client at some point calls httpx client 2. httpx client calls into the transport [here](https://github.com/encode/httpx/blob/4b23574cf83307ce27d3b14b4a425dc58c57d28d/httpx/_client.py#L1014)