@@ -18,11 +18,11 @@ license = { text = "Apache 2.0" }
1818dependencies = [
1919 " setuptools" ,
2020 " ninja" , # for flash-attn parallel build
21- " torch==2.8 .0" ,
21+ " torch==2.9 .0" ,
2222 " triton; sys_platform == 'linux' and (platform_machine == 'x86_64' or platform_machine == 'aarch64')" ,
2323 " colored==2.2.3" ,
2424 " ray[default]==2.49.2" ,
25- " transformers>=4.55.4 " ,
25+ " transformers>=4.57.1 " ,
2626 " wandb" ,
2727 " numpy" ,
2828 " datasets>=4.0.0" ,
@@ -57,7 +57,7 @@ automodel = [
5757 # Flash-attn version should be selected to satisfy both TE + vLLM requirements (xformers in particular)
5858 # https://github.com/NVIDIA/TransformerEngine/blob/v2.3/transformer_engine/pytorch/attention/dot_product_attention/utils.py#L108
5959 # https://github.com/facebookresearch/xformers/blob/8354497deb2c04c67fbb2e2ad911e86530da0e90/xformers/ops/fmha/flash.py#L76
60- " vllm==0.11.0 " , # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/811 resolved
60+ " vllm==0.11.2 " , # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/811 resolved
6161 " flash-attn==2.8.1" ,
6262 " mamba-ssm" ,
6363 " causal-conv1d" ,
@@ -69,7 +69,7 @@ vllm = [
6969 # sudo apt-get update
7070 # sudo apt-get install libibverbs-dev
7171 " deep_ep @ git+https://github.com/deepseek-ai/DeepEP.git@e3908bf5bd0cc6265bcb225d15cd8c996d4759ef" ,
72- " vllm==0.11.0 " ,
72+ " vllm==0.11.2 " ,
7373 " num2words>=0.5.14" ,
7474 # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/501 resolved
7575 " flash-attn==2.8.1" ,
@@ -92,7 +92,7 @@ mcore = [
9292 " megatron-core" ,
9393 " megatron-bridge" ,
9494 # Remove this once https://github.com/NVIDIA-NeMo/RL/issues/501 resolved
95- " vllm==0.11.0 " ,
95+ " vllm==0.11.2 " ,
9696 # Flash-attn version should be selected to satisfy both TE + vLLM requirements (xformers in particular)
9797 # https://github.com/NVIDIA/TransformerEngine/blob/v2.3/transformer_engine/pytorch/attention/dot_product_attention/utils.py#L108
9898 # https://github.com/facebookresearch/xformers/blob/8354497deb2c04c67fbb2e2ad911e86530da0e90/xformers/ops/fmha/flash.py#L76
@@ -105,7 +105,7 @@ penguin = ["penguin"]
105105# This is a default group so that we install these even with bare `uv sync`
106106build = [
107107 # Build requirement for TE
108- " torch==2.8 .0" ,
108+ " torch==2.9 .0" ,
109109 # Build requirement for TE
110110 " setuptools" ,
111111 " packaging" ,
0 commit comments