Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
293 changes: 156 additions & 137 deletions vllm_ascend/worker/model_runner_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,11 +486,8 @@ def get_model(self) -> nn.Module:
def _prepare_inputs(
self,
scheduler_output: "SchedulerOutput",
intermediate_tensors: Optional[IntermediateTensors] = None,
) -> tuple[dict[str, Any], torch.Tensor, np.ndarray, int, torch.Tensor,
int, torch.Tensor, SpecDecodeMetadata, Optional[torch.Tensor],
Optional[torch.Tensor], Optional[torch.Tensor], int, dict[str,
Any]]:
) -> tuple[dict[str, Any], np.ndarray, int, Optional[torch.Tensor],
torch.Tensor, Optional[SpecDecodeMetadata], int]:
total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
assert total_num_scheduled_tokens > 0
num_reqs = self.input_batch.num_reqs
Expand Down Expand Up @@ -589,15 +586,10 @@ def _prepare_inputs(
self.query_lens = torch.from_numpy(num_scheduled_tokens)

# Get info across DP ranks.
# NOTE: maybe_padded_num_tokens is only used when using TorchAir with DP,
# Otherwise, it's just max_tokens_across_dp_cpu
(maybe_padded_num_tokens, num_tokens_across_dp,
(num_input_tokens, num_tokens_across_dp,
with_prefill) = self._sync_metadata_across_dp(num_input_tokens,
with_prefill)
self.with_prefill = with_prefill
# TODO: Now that num_input_tokens is basically identical with maybe_padded_num_tokens
# We should consider removing maybe_padded_num_tokens later
num_input_tokens = maybe_padded_num_tokens

# Hot-Swap lora model
if self.lora_config:
Expand Down Expand Up @@ -732,111 +724,6 @@ def _prepare_inputs(
discard_request_indices)
self.discard_request_indices.copy_to_gpu(self.num_discarded_requests)

# _prepare_inputs may reorder the batch, so we must gather
# multi-modal outputs after that to ensure the correct order
if vllm_version_is('0.13.0'):
model_kwargs = self._init_model_kwargs(num_input_tokens)
else:
model_kwargs = self._init_model_kwargs()
if self.is_multimodal_model and not self.model_config.is_encoder_decoder:
self.multimodal_cpu_fields = ["grid_thw"]
self._prepare_multimodal_fields()
with self.maybe_get_ec_connector_output(
scheduler_output,
encoder_cache=self.encoder_cache,
):
# Run the multimodal encoder if any.
self._execute_mm_encoder(scheduler_output)

# NOTE(woosuk): To unify token ids and soft tokens (vision
# embeddings), we always use embeddings (rather than token ids)
# as input to the multimodal model, even when the input is text.
input_ids = self.input_ids.gpu[:total_num_scheduled_tokens]
mm_embeds, is_mm_embed = self._gather_mm_embeddings(
scheduler_output)

inputs_embeds = self.model.embed_input_ids(
input_ids,
multimodal_embeddings=mm_embeds,
is_multimodal=is_mm_embed,
)

# TODO(woosuk): Avoid the copy. Optimize.
self.inputs_embeds.gpu[:total_num_scheduled_tokens].copy_(
inputs_embeds)
inputs_embeds = self.inputs_embeds.gpu[:num_input_tokens]
input_ids = None
elif self.enable_prompt_embeds and get_pp_group().is_first_rank:
# Get the input embeddings for the tokens that are not input embeds,
# then put them into the appropriate positions.
# TODO(qthequartermasterman): Since even when prompt embeds are
# enabled, (a) not all requests will use prompt embeds, and (b)
# after the initial prompt is processed, the rest of the generated
# tokens will be token ids, it is not desirable to have the
# embedding layer outside of the acl graph all the time. The v0
# engine avoids this by "double compiling" the acl graph, once
# with input_ids and again with inputs_embeds, for all num_tokens.
# If a batch only has token ids, then including the embedding layer
# in the acl graph will be more performant (like in the else case
# below).
token_ids_idx = self.is_token_ids.gpu[:total_num_scheduled_tokens] \
.nonzero(as_tuple=False) \
.squeeze(1)
# Some tokens ids may need to become embeds
if token_ids_idx.numel() > 0:
token_ids = self.input_ids.gpu[token_ids_idx]
tokens_to_embeds = self.model.embed_input_ids(
input_ids=token_ids)
self.inputs_embeds.gpu[token_ids_idx] = tokens_to_embeds

inputs_embeds = self.inputs_embeds.gpu[:num_input_tokens]
input_ids = None
else:
# For text-only models, we use token ids as input.
# While it is possible to use embeddings as input just like the
# multimodal models, it is not desirable for performance since
# then the embedding layer is not included in the ACL graph.
input_ids = self.input_ids.gpu[:num_input_tokens]
inputs_embeds = None
if self.uses_mrope:
positions = self.mrope_positions.gpu[:, :num_input_tokens]
elif self.uses_xdrope_dim > 0:
positions = self.xdrope_positions.gpu[:, :num_input_tokens]
else:
positions = self.positions.gpu[:num_input_tokens]

# Run the encoder, just like we do with other multimodal inputs.
if self.model_config.is_encoder_decoder and scheduler_output.scheduled_encoder_inputs:
input_ids = self.input_ids.gpu[:total_num_scheduled_tokens]
positions = self.positions.gpu[:total_num_scheduled_tokens]
encoder_outputs = self._execute_mm_encoder(scheduler_output)
model_kwargs.update({"encoder_outputs": encoder_outputs})

# type: ignore
if get_pp_group().is_first_rank:
intermediate_tensors = None
else:
assert intermediate_tensors is not None
assert self.intermediate_tensors is not None
# If both flashcomm1 and pp are used simultaneously,
# the shape of the received data and the shape of the space to be copied to will not match,
# requiring a recalculation of the incoming data's shape.
tp_size = get_tensor_model_parallel_world_size()
num_input_tokens_with_flashcomm1 = num_input_tokens
if enable_sp():
num_input_tokens_with_flashcomm1 = (num_input_tokens +
tp_size - 1) // tp_size
for k, v in intermediate_tensors.items():
self.intermediate_tensors[
k][:num_input_tokens_with_flashcomm1].copy_(
v[:num_input_tokens_with_flashcomm1],
non_blocking=True)
intermediate_tensors = IntermediateTensors({
k:
v[:num_input_tokens_with_flashcomm1]
for k, v in self.intermediate_tensors.items()
})

use_spec_decode = len(
scheduler_output.scheduled_spec_decode_tokens) > 0
if not use_spec_decode:
Expand Down Expand Up @@ -1068,20 +955,15 @@ def _prepare_inputs(
for layer_name in attn_group.layer_names:
attn_metadata[layer_name] = attn_metadata_i

# update global cos, sin
update_cos_sin(positions)

if lmhead_tp_enable():
max_num_reqs_across_dp = self.max_num_reqs * self.uniform_decode_query_len
logits_indices = nn.functional.pad(
logits_indices,
(0, max_num_reqs_across_dp - logits_indices.shape[0]))

return (attn_metadata, positions, num_scheduled_tokens,
num_input_tokens, num_tokens_across_dp,
maybe_padded_num_tokens, logits_indices, spec_decode_metadata,
input_ids, inputs_embeds, intermediate_tensors,
max_num_scheduled_tokens, model_kwargs)
return (attn_metadata, num_scheduled_tokens, num_input_tokens,
num_tokens_across_dp, logits_indices, spec_decode_metadata,
max_num_scheduled_tokens)

# all-gather one hidden-states in sp scene
@staticmethod
Expand Down Expand Up @@ -1110,7 +992,7 @@ def _all_gather_hidden_states_and_aux(hidden_states):
hidden_states[1]))
return NPUModelRunner._all_gather_hidden_states(hidden_states)

def _generate_process_reqs_hidden_states(self, maybe_padded_num_tokens,
def _generate_process_reqs_hidden_states(self, num_input_tokens,
input_ids, positions,
intermediate_tensors,
inputs_embeds, model_kwargs):
Expand All @@ -1124,26 +1006,25 @@ def _generate_process_reqs_hidden_states(self, maybe_padded_num_tokens,
forward_context = get_forward_context()
if forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL \
and not self.use_sparse:
# TODO: maybe_padded_num_tokens will be removed, use num_input_tokens instead
if self.vllm_config.model_config.use_mla:
if self.pcp_size * self.dcp_size > 1:
# FIXME: Try using `auto_dispatch_capture=True`
update_mla_attn_dcp_pcp_params(self.update_stream,
forward_context,
maybe_padded_num_tokens)
num_input_tokens)
else:
# FIXME: Try using `auto_dispatch_capture=True`
update_mla_attn_params(self.update_stream, forward_context,
maybe_padded_num_tokens,
num_input_tokens,
self.speculative_config)
else:
if self.pcp_size * self.dcp_size > 1:
update_attn_dcp_pcp_params(self.update_stream,
forward_context,
maybe_padded_num_tokens)
num_input_tokens)
else:
update_attn_params(self.update_stream, forward_context,
maybe_padded_num_tokens,
num_input_tokens,
self.vllm_config)

if get_forward_context().sp_enabled and not isinstance(
Expand All @@ -1153,6 +1034,142 @@ def _generate_process_reqs_hidden_states(self, maybe_padded_num_tokens,
return hidden_states if self.pcp_size == 1 else self.pcp_manager.get_restore_hidden_states(
hidden_states)

def _preprocess(
self,
scheduler_output: "SchedulerOutput",
num_input_tokens: int, # Padded
intermediate_tensors: Optional[IntermediateTensors] = None,
) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor], torch.Tensor,
Optional[IntermediateTensors], dict[str, Any]]:
num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
is_first_rank = get_pp_group().is_first_rank

# _prepare_inputs may reorder the batch, so we must gather
# multi-modal outputs after that to ensure the correct order

if self.is_multimodal_model and not self.model_config.is_encoder_decoder:
self.multimodal_cpu_fields = ["grid_thw"]
self._prepare_multimodal_fields()
with self.maybe_get_ec_connector_output(
scheduler_output,
encoder_cache=self.encoder_cache,
):
# Run the multimodal encoder if any.
self._execute_mm_encoder(scheduler_output)

# NOTE(woosuk): To unify token ids and soft tokens (vision
# embeddings), we always use embeddings (rather than token ids)
# as input to the multimodal model, even when the input is text.
input_ids = self.input_ids.gpu[:num_scheduled_tokens]
Comment thread
gcanlin marked this conversation as resolved.
mm_embeds, is_mm_embed = self._gather_mm_embeddings(
scheduler_output)

inputs_embeds = self.model.embed_input_ids(
input_ids,
multimodal_embeddings=mm_embeds,
is_multimodal=is_mm_embed,
)

# TODO(woosuk): Avoid the copy. Optimize.
self.inputs_embeds.gpu[:num_scheduled_tokens].copy_(inputs_embeds)

input_ids, inputs_embeds = self._prepare_mm_inputs(num_input_tokens)
if vllm_version_is('0.13.0'):
model_kwargs = {
**self._init_model_kwargs(num_input_tokens),
**self._extract_mm_kwargs(scheduler_output),
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Qwen-Omni needs mm_kwargs

}
else:
model_kwargs = {
**self._init_model_kwargs(),
**self._extract_mm_kwargs(scheduler_output),
}

elif self.enable_prompt_embeds and is_first_rank:
# Get the input embeddings for the tokens that are not input embeds,
# then put them into the appropriate positions.
# TODO(qthequartermasterman): Since even when prompt embeds are
# enabled, (a) not all requests will use prompt embeds, and (b)
# after the initial prompt is processed, the rest of the generated
# tokens will be token ids, it is not desirable to have the
# embedding layer outside of the acl graph all the time. The v0
# engine avoids this by "double compiling" the acl graph, once
# with input_ids and again with inputs_embeds, for all num_tokens.
# If a batch only has token ids, then including the embedding layer
# in the acl graph will be more performant (like in the else case
# below).
token_ids_idx = self.is_token_ids.gpu[:num_scheduled_tokens] \
.nonzero(as_tuple=False) \
.squeeze(1)
# Some tokens ids may need to become embeds
if token_ids_idx.numel() > 0:
token_ids = self.input_ids.gpu[token_ids_idx]
tokens_to_embeds = self.model.embed_input_ids(
input_ids=token_ids)
self.inputs_embeds.gpu[token_ids_idx] = tokens_to_embeds

inputs_embeds = self.inputs_embeds.gpu[:num_input_tokens]
if vllm_version_is('0.13.0'):
model_kwargs = self._init_model_kwargs(num_input_tokens)
else:
model_kwargs = self._init_model_kwargs()
input_ids = None
else:
# For text-only models, we use token ids as input.
# While it is possible to use embeddings as input just like the
# multimodal models, it is not desirable for performance since
# then the embedding layer is not included in the ACL graph.
input_ids = self.input_ids.gpu[:num_input_tokens]
inputs_embeds = None
if vllm_version_is('0.13.0'):
model_kwargs = self._init_model_kwargs(num_input_tokens)
else:
model_kwargs = self._init_model_kwargs()

if self.uses_mrope:
positions = self.mrope_positions.gpu[:, :num_input_tokens]
elif self.uses_xdrope_dim > 0:
positions = self.xdrope_positions.gpu[:, :num_input_tokens]
else:
positions = self.positions.gpu[:num_input_tokens]

# Run the encoder, just like we do with other multimodal inputs.
if self.model_config.is_encoder_decoder and scheduler_output.scheduled_encoder_inputs:
input_ids = self.input_ids.gpu[:num_scheduled_tokens]
positions = self.positions.gpu[:num_scheduled_tokens]
encoder_outputs = self._execute_mm_encoder(scheduler_output)
model_kwargs.update({"encoder_outputs": encoder_outputs})

if is_first_rank:
intermediate_tensors = None
else:
assert intermediate_tensors is not None
assert self.intermediate_tensors is not None
# If both flashcomm1 and pp are used simultaneously,
# the shape of the received data and the shape of the space to be copied to will not match,
# requiring a recalculation of the incoming data's shape.
tp_size = get_tensor_model_parallel_world_size()
num_input_tokens_with_flashcomm1 = num_input_tokens
if enable_sp():
num_input_tokens_with_flashcomm1 = (num_input_tokens +
tp_size - 1) // tp_size
for k, v in intermediate_tensors.items():
self.intermediate_tensors[
k][:num_input_tokens_with_flashcomm1].copy_(
v[:num_input_tokens_with_flashcomm1],
non_blocking=True)
intermediate_tensors = IntermediateTensors({
k:
v[:num_input_tokens_with_flashcomm1]
for k, v in self.intermediate_tensors.items()
})

# update global cos, sin
update_cos_sin(positions)

return (input_ids, inputs_embeds, positions, intermediate_tensors,
model_kwargs)

def _build_attn_state(self, num_reqs, num_scheduled_tokens,
num_valid_tokens):
if np.all(self.input_batch.num_computed_tokens_cpu[:num_reqs] == 0):
Expand Down Expand Up @@ -1475,12 +1492,14 @@ def execute_model(
if self.dynamic_eplb:
self.eplb_updator.forward_before()

(attn_metadata, positions, num_scheduled_tokens_np,
num_input_tokens, num_tokens_across_dp, maybe_padded_num_tokens,
logits_indices, spec_decode_metadata, input_ids, inputs_embeds,
intermediate_tensors, max_query_len,
model_kwargs) = (self._prepare_inputs(scheduler_output,
intermediate_tensors))
(attn_metadata, num_scheduled_tokens_np, num_input_tokens,
num_tokens_across_dp, logits_indices, spec_decode_metadata,
max_query_len) = self._prepare_inputs(scheduler_output)

(input_ids, inputs_embeds, positions, intermediate_tensors,
model_kwargs) = self._preprocess(scheduler_output,
num_input_tokens,
intermediate_tensors)

if self.dynamic_eplb:
self.eplb_updator.take_update_info_from_eplb_process()
Expand Down Expand Up @@ -1524,7 +1543,7 @@ def execute_model(
self.maybe_setup_kv_connector(scheduler_output)

hidden_states = self._generate_process_reqs_hidden_states(
maybe_padded_num_tokens, input_ids, positions,
num_input_tokens, input_ids, positions,
intermediate_tensors, inputs_embeds, model_kwargs)

self.maybe_wait_for_kv_save()
Expand Down
Loading