diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index 303f04b64dcc..3b0a6a492412 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -63,7 +63,6 @@ def __init__( self, hidden_size: int, intermediate_size: int, - hidden_act: str, hidden_activation: str, quant_config: QuantizationConfig | None = None, prefix: str = "", @@ -83,11 +82,10 @@ def __init__( quant_config=quant_config, prefix=f"{prefix}.down_proj", ) - if not (hidden_act == hidden_activation == "gelu_pytorch_tanh"): + if not (hidden_activation == "gelu_pytorch_tanh"): raise ValueError( "Gemma2 uses `gelu_pytorch_tanh` as the hidden activation " - "function. Please set `hidden_act` and `hidden_activation` to " - "`gelu_pytorch_tanh`." + "function. Please set `hidden_activation` to `gelu_pytorch_tanh`." ) self.act_fn = GeluAndMul(approximate="tanh") @@ -212,7 +210,6 @@ def __init__( self.mlp = Gemma2MLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, - hidden_act=config.hidden_act, hidden_activation=config.hidden_activation, quant_config=quant_config, prefix=f"{prefix}.mlp",