From 10a53bc686e76753d33f1ca210dda0c5ee3d9033 Mon Sep 17 00:00:00 2001 From: Jiying Dong <87510204+dongjiyingdjy@users.noreply.github.com> Date: Tue, 29 Jul 2025 00:26:34 -0700 Subject: [PATCH] fix - fix illeagel memory access Signed-off-by: Jiying Dong <87510204+dongjiyingdjy@users.noreply.github.com> --- tensorrt_llm/_torch/attention_backend/trtllm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorrt_llm/_torch/attention_backend/trtllm.py b/tensorrt_llm/_torch/attention_backend/trtllm.py index 143fae88d62..6d4235734a9 100644 --- a/tensorrt_llm/_torch/attention_backend/trtllm.py +++ b/tensorrt_llm/_torch/attention_backend/trtllm.py @@ -634,7 +634,7 @@ def __post_init__(self) -> None: self.block_ids_per_seq = None self.kv_block_ids_per_seq = None if self.enable_flash_mla: - self.block_ids_per_seq = torch.empty( + self.block_ids_per_seq = torch.zeros( [ self.kv_cache_manager.max_batch_size, self.kv_cache_manager.max_blocks_per_seq