Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions vllm/model_executor/layers/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,10 +539,13 @@ def _get_logprobs(
prompt_len = sampling_metadata.prompt_lens[i]
prompt_tokens = sampling_metadata.seq_data[
seq_ids[0]].prompt_token_ids
# Swapped seqs have output tokens.
output_tokens = sampling_metadata.seq_data[
seq_ids[0]].output_token_ids
batched_logprobs_query_seq_indices.extend(
sample_idx + j for j in range(prompt_len - 1))
batched_logprobs_query_token_indices.extend(
token_id for token_id in prompt_tokens[1:])
token_id for token_id in prompt_tokens[1:] + output_tokens)
sample_idx += prompt_len - 1
batched_logprobs_query_seq_indices.extend(
[sample_idx + parent_id for parent_id in parent_ids])
Expand Down Expand Up @@ -586,8 +589,11 @@ def _get_logprobs(
prompt_len = sampling_metadata.prompt_lens[i]
prompt_tokens = sampling_metadata.seq_data[
seq_ids[0]].prompt_token_ids
# Swapped seqs have output tokens.
output_tokens = sampling_metadata.seq_data[
seq_ids[0]].output_token_ids
group_prompt_logprobs: PromptLogprobs = [None]
for token_id in prompt_tokens[1:]:
for token_id in prompt_tokens[1:] + output_tokens:
Comment on lines +592 to +596
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is output_tokens used for prompt logprobs?

prompt_logprobs_dict = {
token_id:
batched_logprobs_query_result[query_result_idx].item()
Expand Down