Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion vllm/v1/engine/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,9 @@ def is_sleeping(self) -> bool:
def execute_dummy_batch(self):
self.model_executor.execute_dummy_batch()

def skip_dummy_batch(self):
self.model_executor.skip_dummy_batch()

def add_lora(self, lora_request: LoRARequest) -> bool:
return self.model_executor.add_lora(lora_request)

Expand Down Expand Up @@ -959,6 +962,7 @@ def __init__(
self.step_counter = 0
self.current_wave = 0
self.last_counts = (0, 0)
self.use_ep = vllm_config.parallel_config.enable_expert_parallel

# Initialize the engine.
dp_rank = vllm_config.parallel_config.data_parallel_rank
Expand Down Expand Up @@ -1053,7 +1057,10 @@ def run_busy_loop(self):

# We are in a running state and so must execute a dummy pass
# if the model didn't execute any ready requests.
self.execute_dummy_batch()
if self.use_ep:
self.execute_dummy_batch()
else:
self.skip_dummy_batch()

# 3) All-reduce operation to determine global unfinished reqs.
self.engines_running = self._has_global_unfinished_reqs(
Expand Down
3 changes: 3 additions & 0 deletions vllm/v1/executor/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,9 @@ def execute_model(
def execute_dummy_batch(self) -> None:
self.collective_rpc("execute_dummy_batch")

def skip_dummy_batch(self) -> None:
self.collective_rpc("skip_dummy_batch")

def take_draft_token_ids(self) -> Optional[DraftTokenIds]:
output = self.collective_rpc("take_draft_token_ids")
return output[0]
Expand Down
7 changes: 7 additions & 0 deletions vllm/v1/worker/gpu_model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -2826,6 +2826,7 @@ def _dummy_run(
is_profile: bool = False,
create_mixed_batch: bool = False,
remove_lora: bool = True,
skip_run: bool = False,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Run a dummy forward pass to warm up/profile run or capture the
Expand Down Expand Up @@ -2873,6 +2874,12 @@ def _dummy_run(
num_pad, num_tokens_across_dp = self.get_dp_padding(num_tokens)
num_tokens += num_pad

# if self.get_dp_padding is not executed
# non-moe model on dp > 1 will be blocked forever
if skip_run:
return torch.tensor(0, device=self.device), torch.tensor(
0, device=self.device)

# If cudagraph_mode.decode_mode() == FULL and
# cudagraph_mode.separate_routine(). This means that we are using
# different graphs and/or modes for mixed prefill-decode batches vs.
Expand Down
3 changes: 3 additions & 0 deletions vllm/v1/worker/gpu_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -489,6 +489,9 @@ def profile(self, is_start: bool = True):
def execute_dummy_batch(self) -> None:
self.model_runner._dummy_run(1)

def skip_dummy_batch(self) -> None:
self.model_runner._dummy_run(1, skip_run=True)

def add_lora(self, lora_request: LoRARequest) -> bool:
return self.model_runner.add_lora(lora_request)

Expand Down