Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 15 additions & 1 deletion trl/generation/vllm_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
"""vLLM-based generation backend for TRL trainers."""

import json
import logging
import math
import os
from collections.abc import Callable
from contextlib import nullcontext
Expand All @@ -34,6 +36,18 @@
from .vllm_client import VLLMClient


logger = logging.getLogger(__name__)


def sanitize_logprob(logprob):
value = logprob.logprob
if math.isnan(value):
logger.warning(f"Generated NaN logprob, token logprob '{logprob}' will be ignored")
return None

return value


if TYPE_CHECKING:
from accelerate import Accelerator
from peft import PeftModel
Expand Down Expand Up @@ -666,7 +680,7 @@ def generate(self, prompts: list, num_generations: int, profiler: ProfilingConte
all_prompt_ids = [output.prompt_token_ids for output in all_outputs]
all_completion_ids = [output.token_ids for outputs in all_outputs for output in outputs.outputs]
all_logprobs = [
[next(iter(lp.values())).logprob for lp in output.logprobs]
[sanitize_logprob(next(iter(lp.values()))) for lp in output.logprobs]
for outputs in all_outputs
for output in outputs.outputs
]
Expand Down
12 changes: 1 addition & 11 deletions trl/scripts/vllm_serve.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from transformers import AutoTokenizer, is_torch_xpu_available, is_vision_available

from trl import TrlParser
from trl.generation.vllm_generation import sanitize_logprob
from trl.import_utils import (
is_fastapi_available,
is_pydantic_available,
Expand Down Expand Up @@ -431,17 +432,6 @@ def chunk_list(lst: list, n: int) -> list[list]:
return [lst[i * k + min(i, r) : (i + 1) * k + min(i + 1, r)] for i in range(n)]


def sanitize_logprob(logprob):
import math

value = logprob.logprob
if math.isnan(value):
logger.warning(f"Generated NaN logprob, token logprob '{logprob}' will be ignored")
return None

return value


def _replace_prefix_tokens(
tokenizer,
model_prefix_token_ids: list[int],
Expand Down
Loading