Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import json
import multiprocessing
import os
import setproctitle
import signal
import socket
import tempfile
Expand Down Expand Up @@ -1803,7 +1804,8 @@ async def run_server_worker(listen_address,
ToolParserManager.import_tool_parser(args.tool_parser_plugin)

server_index = client_config.get("client_index", 0) if client_config else 0

setproctitle.setproctitle(
f"{envs.VLLM_PROCESS_NAME_PREFIX}::APIServer_{server_index}")
# Load logging config for uvicorn if specified
log_config = load_log_config(args.log_config_file)
if log_config is not None:
Expand Down
6 changes: 6 additions & 0 deletions vllm/envs.py
Original file line number Diff line number Diff line change
Expand Up @@ -971,6 +971,12 @@ def get_vllm_port() -> Optional[int]:
# Used to force set up loopback IP
"VLLM_LOOPBACK_IP":
lambda: os.getenv("VLLM_LOOPBACK_IP", ""),

# Used to set the process name prefix for vLLM processes.
# This is useful for debugging and monitoring purposes.
# The default value is "VLLM".
"VLLM_PROCESS_NAME_PREFIX":
lambda: os.getenv("VLLM_PROCESS_NAME_PREFIX", "VLLM"),
}

# --8<-- [end:env-vars-definition]
Expand Down
5 changes: 4 additions & 1 deletion vllm/v1/engine/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import sys
import threading
import time
import setproctitle
from collections import deque
from collections.abc import Generator
from concurrent.futures import Future
Expand All @@ -17,6 +18,7 @@
import msgspec
import zmq

import vllm.envs as envs
from vllm.config import ParallelConfig, VllmConfig
from vllm.distributed import stateless_destroy_torch_distributed_process_group
from vllm.executor.multiproc_worker_utils import _add_prefix
Expand Down Expand Up @@ -572,7 +574,8 @@ def run_engine_core(*args,
local_dp_rank: int = 0,
**kwargs):
"""Launch EngineCore busy loop in background process."""

setproctitle.setproctitle(
f"{envs.VLLM_PROCESS_NAME_PREFIX}::EngineCore_{dp_rank}")
# Signal handler used for graceful termination.
# SystemExit exception is only raised once to allow this and worker
# processes to terminate without error
Expand Down
5 changes: 4 additions & 1 deletion vllm/v1/executor/multiproc_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import time
import traceback
import weakref
import setproctitle
from concurrent.futures import Future, ThreadPoolExecutor
from dataclasses import dataclass
from enum import Enum, auto
Expand Down Expand Up @@ -365,7 +366,9 @@ def __init__(
}
wrapper.init_worker(all_kwargs)
self.worker = wrapper

setproctitle.setproctitle(
f"{envs.VLLM_PROCESS_NAME_PREFIX}::{
self.worker.worker.__class__.__name__}_{self.rank}")
pid = os.getpid()
_add_prefix(sys.stdout, f"VllmWorker rank={rank}", pid)
_add_prefix(sys.stderr, f"VllmWorker rank={rank}", pid)
Expand Down
Loading