Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions vllm/engine/async_llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -1187,6 +1187,12 @@ async def stop_profile(self) -> None:
async def reset_prefix_cache(self) -> None:
self.engine.reset_prefix_cache()

async def sleep(self) -> None:
self.engine.sleep()

async def wake_up(self) -> None:
self.engine.wake_up()

async def add_lora(self, lora_request: LoRARequest) -> None:
self.engine.add_lora(lora_request)

Expand Down
9 changes: 9 additions & 0 deletions vllm/engine/multiprocessing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,14 @@ class RPCResetPrefixCacheRequest(Enum):
RESET_PREFIX_CACHE = 1


class RPCSleepRequest(Enum):
SLEEP = 1


class RPCWakeUpRequest(Enum):
WAKE_UP = 1


@dataclass
class RPCLoadAdapterRequest:
lora_request: LoRARequest
Expand All @@ -141,6 +149,7 @@ class RPCAdapterLoadedResponse:

RPC_REQUEST_T = Union[RPCProcessRequest, RPCAbortRequest, RPCStartupRequest,
RPCUProfileRequest, RPCLoadAdapterRequest,
RPCSleepRequest, RPCWakeUpRequest,
RPCResetPrefixCacheRequest]

REQUEST_OUTPUTS_T = Union[List[RequestOutput], RPCAdapterLoadedResponse,
Expand Down
17 changes: 15 additions & 2 deletions vllm/engine/multiprocessing/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,9 @@
RPCLoadAdapterRequest,
RPCProcessRequest,
RPCResetPrefixCacheRequest,
RPCStartupRequest, RPCStartupResponse,
RPCUProfileRequest)
RPCSleepRequest, RPCStartupRequest,
RPCStartupResponse,
RPCUProfileRequest, RPCWakeUpRequest)
from vllm.engine.protocol import EngineClient
# yapf: enable
from vllm.envs import VLLM_RPC_TIMEOUT
Expand Down Expand Up @@ -685,6 +686,18 @@ async def reset_prefix_cache(self) -> None:
request=RPCResetPrefixCacheRequest.RESET_PREFIX_CACHE,
socket=self.input_socket)

async def sleep(self) -> None:
"""Sleep the model"""

await self._send_one_way_rpc_request(request=RPCSleepRequest.SLEEP,
socket=self.input_socket)

async def wake_up(self) -> None:
"""Wake up the model"""

await self._send_one_way_rpc_request(request=RPCWakeUpRequest.WAKE_UP,
socket=self.input_socket)

async def add_lora(self, lora_request: LoRARequest) -> None:
"""Load a new LoRA adapter into the engine for future requests."""
# Uses the same I/O as generate requests
Expand Down
15 changes: 13 additions & 2 deletions vllm/engine/multiprocessing/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@
RPCLoadAdapterRequest,
RPCProcessRequest,
RPCResetPrefixCacheRequest,
RPCStartupRequest, RPCStartupResponse,
RPCUProfileRequest)
RPCSleepRequest, RPCStartupRequest,
RPCStartupResponse,
RPCUProfileRequest, RPCWakeUpRequest)
# yapf: enable
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
Expand Down Expand Up @@ -242,6 +243,10 @@ def handle_new_input(self):
self._handle_load_adapter_request(request)
elif isinstance(request, RPCResetPrefixCacheRequest):
self.reset_prefix_cache()
elif isinstance(request, RPCSleepRequest):
self.sleep()
elif isinstance(request, RPCWakeUpRequest):
self.wake_up()
else:
raise ValueError("Unknown RPCRequest Type: "
f"{type(request)}")
Expand Down Expand Up @@ -369,6 +374,12 @@ def stop_profile(self) -> None:
def reset_prefix_cache(self) -> bool:
return self.engine.reset_prefix_cache()

def sleep(self) -> None:
self.engine.sleep()

def wake_up(self) -> None:
self.engine.wake_up()


def signal_handler(*_) -> None:
raise KeyboardInterrupt("MQLLMEngine terminated")
Expand Down
10 changes: 10 additions & 0 deletions vllm/engine/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,16 @@ async def reset_prefix_cache(self) -> None:
"""Reset the prefix cache"""
...

@abstractmethod
async def sleep(self) -> None:
"""Sleep the model"""
...

@abstractmethod
async def wake_up(self) -> None:
"""Wake up the model"""
...

@abstractmethod
async def add_lora(self, lora_request: LoRARequest) -> None:
"""Load a new LoRA adapter into the engine for future requests."""
Expand Down
20 changes: 20 additions & 0 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,26 @@ async def reset_prefix_cache(raw_request: Request):
await engine_client(raw_request).reset_prefix_cache()
return Response(status_code=200)

@router.post("/sleep")
async def sleep(raw_request: Request):
"""
Sleep the model. Note that we currently do not check if the
model is successfully put to sleep in the API server.
"""
logger.info("Setting the model to sleep mode...")
await engine_client(raw_request).sleep()
return Response(status_code=200)

@router.post("/wake_up")
async def wake_up(raw_request: Request):
"""
Wake up the model. Note that we currently do not check if the
model is successfully woken up in the API server.
"""
logger.info("Waking up the model...")
await engine_client(raw_request).wake_up()
return Response(status_code=200)


@router.post("/invocations")
async def invocations(raw_request: Request):
Expand Down
2 changes: 2 additions & 0 deletions vllm/v1/engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,3 +103,5 @@ class EngineCoreRequestType(enum.Enum):
ABORT = b'\x01'
PROFILE = b'\x02'
RESET_PREFIX_CACHE = b'\x03'
SLEEP = b'\x04'
WAKE_UP = b'\x05'
6 changes: 6 additions & 0 deletions vllm/v1/engine/async_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,12 @@ async def stop_profile(self) -> None:
async def reset_prefix_cache(self) -> None:
await self.engine_core.reset_prefix_cache_async()

async def sleep(self) -> None:
await self.engine_core.sleep_async()

async def wake_up(self) -> None:
await self.engine_core.wake_up_async()

@property
def is_running(self) -> bool:
return True
Expand Down
6 changes: 6 additions & 0 deletions vllm/v1/engine/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,12 @@ def profile(self, is_start: bool = True):
def reset_prefix_cache(self):
self.scheduler.reset_prefix_cache()

def sleep(self) -> None:
self.model_executor.sleep()

def wake_up(self) -> None:
self.model_executor.sleep()


class EngineCoreProc(EngineCore):
"""ZMQ-wrapper for running EngineCore in background process."""
Expand Down
30 changes: 30 additions & 0 deletions vllm/v1/engine/core_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,12 @@ def profile(self, is_start: bool = True) -> None:
def reset_prefix_cache(self) -> None:
raise NotImplementedError

def sleep(self) -> None:
raise NotImplementedError

def wake_up(self) -> None:
raise NotImplementedError

def abort_requests(self, request_ids: List[str]) -> None:
raise NotImplementedError

Expand All @@ -88,6 +94,12 @@ async def profile_async(self, is_start: bool = True) -> None:
async def reset_prefix_cache_async(self) -> None:
raise NotImplementedError

async def sleep_async(self) -> None:
raise NotImplementedError

async def wake_up_async(self) -> None:
raise NotImplementedError

async def abort_requests_async(self, request_ids: List[str]) -> None:
raise NotImplementedError

Expand Down Expand Up @@ -124,6 +136,12 @@ def profile(self, is_start: bool = True) -> None:
def reset_prefix_cache(self) -> None:
self.engine_core.reset_prefix_cache()

def sleep(self) -> None:
self.engine_core.sleep()

def wake_up(self) -> None:
self.engine_core.wake_up()


class MPClient(EngineCoreClient):
"""
Expand Down Expand Up @@ -241,6 +259,12 @@ def profile(self, is_start: bool = True) -> None:
def reset_prefix_cache(self) -> None:
self._send_input(EngineCoreRequestType.RESET_PREFIX_CACHE, None)

def sleep(self) -> None:
self._send_input(EngineCoreRequestType.SLEEP, None)

def wake_up(self) -> None:
self._send_input(EngineCoreRequestType.WAKE_UP, None)


class AsyncMPClient(MPClient):
"""Asyncio-compatible client for multi-proc EngineCore."""
Expand Down Expand Up @@ -294,3 +318,9 @@ async def profile_async(self, is_start: bool = True) -> None:

async def reset_prefix_cache_async(self) -> None:
await self._send_input(EngineCoreRequestType.RESET_PREFIX_CACHE, None)

async def sleep_async(self) -> None:
await self._send_input(EngineCoreRequestType.SLEEP, None)

async def wake_up_async(self) -> None:
await self._send_input(EngineCoreRequestType.WAKE_UP, None)
6 changes: 6 additions & 0 deletions vllm/v1/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,12 @@ def stop_profile(self):
def reset_prefix_cache(self):
self.engine_core.reset_prefix_cache()

def sleep(self):
self.engine_core.sleep()

def wake_up(self):
self.engine_core.wake_up()

def get_tokenizer_group(
self,
group_type: Type[_G] = BaseTokenizerGroup,
Expand Down