From 4b7fb624fe196b548a26feba620d3d24689e25cc Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Tue, 6 Jan 2026 08:40:50 +0000 Subject: [PATCH] fix: remove seq_parallel + tp restriction in dtensor v2 Signed-off-by: Terry Kong --- nemo_rl/models/policy/workers/dtensor_policy_worker_v2.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nemo_rl/models/policy/workers/dtensor_policy_worker_v2.py b/nemo_rl/models/policy/workers/dtensor_policy_worker_v2.py index 738146a7e2..47d6765780 100644 --- a/nemo_rl/models/policy/workers/dtensor_policy_worker_v2.py +++ b/nemo_rl/models/policy/workers/dtensor_policy_worker_v2.py @@ -324,10 +324,6 @@ def __init__( print( "[WARNING]: sequence_parallel=True, but tp_size=1 which has no effect. Enable tp_size > 1 to use sequence parallelism." ) - elif sequence_parallel_enabled and tp_size > 1: - raise RuntimeError( - "Sequence parallel + tp_size >1 is currently broken in torch==2.8.0. See https://github.com/NVIDIA-NeMo/Automodel/issues/652 for more details." - ) if cp_size > 1: assert not isinstance(self.model, Gemma3ForCausalLM), (