From 8a68f1c65ebd4ee907428befe6ce0c6f1111f746 Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Mon, 26 May 2025 14:40:48 +0000 Subject: [PATCH] [Bugfix] Fix Llama GGUF initialization Signed-off-by: DarkLight1337 --- vllm/model_executor/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 6584980f6dc2..d36b6466c0bb 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -208,7 +208,7 @@ def _init_rotary_emb(self, config: LlamaConfig, quant_config: Optional[QuantizationConfig]) -> None: is_neox_style = True is_gguf = quant_config and quant_config.get_name() == "gguf" - if is_gguf and self.config.model_type == "llama": + if is_gguf and config.model_type == "llama": is_neox_style = False self.rotary_emb = get_rope(