Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from collections.abc import AsyncGenerator, AsyncIterator, Awaitable, Callable
from contextlib import asynccontextmanager
from http import HTTPStatus
from multiprocessing import shared_memory
from typing import Annotated, Any, Literal

import model_hosting_container_standards.sagemaker as sagemaker_standards
Expand Down Expand Up @@ -132,6 +133,17 @@
_running_tasks: set[asyncio.Task] = set()


def set_sleep_signal(value: int = 1, shared_memory_name: str = "sleep_signal") -> None:
try:
shm = shared_memory.SharedMemory(name=shared_memory_name, create=False, size=4)
except Exception:
shm = shared_memory.SharedMemory(name=shared_memory_name, create=True, size=4)

if shm is not None:
shm.buf[0:4] = value.to_bytes(4, "little")
shm.close()


@asynccontextmanager
async def lifespan(app: FastAPI):
try:
Expand Down Expand Up @@ -1082,6 +1094,8 @@ async def reset_mm_cache(raw_request: Request):

@router.post("/sleep")
async def sleep(raw_request: Request):
set_sleep_signal(1)

# get POST params
level = raw_request.query_params.get("level", "1")
await engine_client(raw_request).sleep(int(level))
Expand All @@ -1091,6 +1105,8 @@ async def sleep(raw_request: Request):

@router.post("/wake_up")
async def wake_up(raw_request: Request):
set_sleep_signal(0)

tags = raw_request.query_params.getlist("tags")
if tags == []:
# set to None to wake up all tags if no tags are provided
Expand Down
15 changes: 15 additions & 0 deletions vllm/v1/core/sched/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
from multiprocessing import shared_memory

import torch

Expand Down Expand Up @@ -69,4 +70,18 @@ def check_stop(
):
request.status = RequestStatus.FINISHED_LENGTH_CAPPED
return True

# Check if the model is sleeping
sleep_signal = 0
shared_memory_name = "sleep_signal"
try:
shm = shared_memory.SharedMemory(name=shared_memory_name)
sleep_signal = int.from_bytes(shm.buf[0:4], "little")
shm.close()
except Exception:
pass
Comment on lines +77 to +82
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

Using a bare except: is dangerous as it can hide unexpected errors, such as issues with buffer access or data conversion. It's better to catch only the specific exception you expect, in this case FileNotFoundError. Additionally, using a try...finally block ensures that shm.close() is called even if an error occurs while reading from the shared memory, preventing resource leaks.

Suggested change
try:
shm = shared_memory.SharedMemory(name=shared_memory_name)
sleep_signal = int.from_bytes(shm.buf[0:4], 'little')
shm.close()
except:
pass
shm = None
try:
shm = shared_memory.SharedMemory(name=shared_memory_name)
sleep_signal = int.from_bytes(shm.buf[0:4], 'little')
except FileNotFoundError:
pass
finally:
if shm is not None:
shm.close()

if sleep_signal == 1:
request.status = RequestStatus.FINISHED_STOPPED
return True

return False
Loading