Skip to content

Commit cfe2eda

Browse files
authored
[BUG] fix local_rank in initialize_dp_attention (#7584)
1 parent 2373faa commit cfe2eda

File tree

1 file changed

+1
-3
lines changed

1 file changed

+1
-3
lines changed

python/sglang/srt/layers/dp_attention.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,14 +79,12 @@ def initialize_dp_attention(
7979
)
8080

8181
if enable_dp_attention:
82-
local_rank = tp_rank % (tp_size // dp_size)
8382
_ATTN_DP_SIZE = dp_size
8483
if moe_dense_tp_size is None:
8584
_LOCAL_ATTN_DP_SIZE = _ATTN_DP_SIZE
8685
else:
8786
_LOCAL_ATTN_DP_SIZE = max(1, dp_size // (tp_size // moe_dense_tp_size))
8887
else:
89-
local_rank = tp_rank
9088
_ATTN_DP_SIZE = 1
9189
_LOCAL_ATTN_DP_SIZE = 1
9290

@@ -96,7 +94,7 @@ def initialize_dp_attention(
9694
list(range(head, head + _ATTN_TP_SIZE))
9795
for head in range(0, pp_size * tp_size, _ATTN_TP_SIZE)
9896
],
99-
local_rank,
97+
tp_group.local_rank,
10098
torch.distributed.get_backend(tp_group.device_group),
10199
use_pynccl=SYNC_TOKEN_IDS_ACROSS_TP,
102100
use_pymscclpp=False,

0 commit comments

Comments
 (0)