Skip to content

Commit 4e15d77

Browse files
committed
fix qwenvl without pad
Signed-off-by: wangli <[email protected]>
1 parent 8e1d58b commit 4e15d77

File tree

1 file changed

+8
-2
lines changed

1 file changed

+8
-2
lines changed

vllm_ascend/models/qwen2_5_vl_without_padding.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@
3030
from vllm.config import VllmConfig
3131
from vllm.distributed import parallel_state
3232
from vllm.distributed import utils as dist_utils
33-
from vllm.model_executor.layers.activation import _ACTIVATION_REGISTRY
33+
from vllm.model_executor.layers.activation import (_ACTIVATION_REGISTRY,
34+
get_act_and_mul_fn)
3435
from vllm.model_executor.layers.layernorm import RMSNorm
3536
from vllm.model_executor.layers.quantization import QuantizationConfig
3637
from vllm.model_executor.models.qwen2_5_vl import (
@@ -42,6 +43,7 @@
4243
from vllm.multimodal import MULTIMODAL_REGISTRY
4344

4445
from vllm_ascend.models.qwen2_5_vl import AscendQwen2_5_VisionRotaryEmbedding
46+
from vllm_ascend.utils import vllm_version_is
4547

4648

4749
class AscendQwen2_5_VisionAttention_Without_Padding(Qwen2_5_VisionAttention):
@@ -171,12 +173,16 @@ def __init__(
171173
in_channels=vision_config.in_channels,
172174
hidden_size=self.hidden_size,
173175
)
176+
177+
act_fn = get_act_and_mul_fn(vision_config.hidden_act)
178+
if vllm_version_is("0.10.0"):
179+
act_fn = _ACTIVATION_REGISTRY[vision_config.hidden_act]
174180
self.blocks = nn.ModuleList([
175181
AscendQwen2_5_VisionBlock_Without_Padding(
176182
dim=self.hidden_size,
177183
num_heads=self.num_heads,
178184
mlp_hidden_dim=vision_config.intermediate_size,
179-
act_fn=_ACTIVATION_REGISTRY[vision_config.hidden_act],
185+
act_fn=act_fn,
180186
norm_layer=norm_layer,
181187
quant_config=quant_config,
182188
prefix=f"{prefix}.blocks.{layer_idx}")

0 commit comments

Comments
 (0)