Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 12 additions & 9 deletions vllm/v1/attention/backends/mla/rocm_aiter_mla.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,16 +88,20 @@ def __init__(
# TODO: we can disambiguate between decode and mixed-prefill decode here
# so we can only use the persistent buffer if a cudagraph is actually
# being used.

# paged_kv_last_page_len is always 1s (kernel block size is always 1),
# so we create it once and reuse slices in both eager and cudagraph modes.
self.paged_kv_last_page_len = torch.ones(
max_num_reqs, dtype=torch.int32, device=device
)

if self.compilation_config.cudagraph_mode.has_full_cudagraphs():
self.paged_kv_indptr = torch.zeros(
max_num_reqs + 1, dtype=torch.int32, device=device
)
self.paged_kv_indices = torch.zeros(
max_num_pages, dtype=torch.int32, device=device
)
self.paged_kv_last_page_len = torch.zeros(
max_num_reqs, dtype=torch.int32, device=device
)

self.qo_indptr = torch.zeros(
max_num_reqs + 1, dtype=torch.int32, device=device
Expand All @@ -122,7 +126,9 @@ def _build_decode(
).unsqueeze(0) < seq_lens_device.unsqueeze(1)
paged_kv_indices = block_table_tensor[mask]

paged_kv_last_page_len = torch.where(seq_lens_device == 0, 1, seq_lens_device)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This line indeed looks incorrect to me. @zq1997 please double confirm it, is this a mistake?

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After confirmation, I am very sorry that this was indeed a mistake I made.

# kernel block size is always 1, so each page has exactly 1 token.
# last_page_len is always 1 - just slice the pre-initialized buffer.
paged_kv_last_page_len = self.paged_kv_last_page_len[:num_reqs]

paged_kv_indptr = torch.cat(
[
Expand All @@ -148,11 +154,8 @@ def _build_decode(
self.paged_kv_indptr[1 + num_reqs :].fill_(paged_kv_indptr[-1])
paged_kv_indptr = self.paged_kv_indptr[: 1 + num_reqs]

self.paged_kv_last_page_len[:num_reqs].copy_(
paged_kv_last_page_len, non_blocking=True
)
self.paged_kv_last_page_len[num_reqs:].fill_(1)
paged_kv_last_page_len = self.paged_kv_last_page_len[:num_reqs]
# paged_kv_last_page_len already uses the pre-initialized buffer slice
# (set above), so no copy needed - buffer is always 1s.

self.qo_indptr[: 1 + num_reqs].copy_(
query_start_loc_device, non_blocking=True
Expand Down