Skip to content

Commit a869bac

Browse files
[Bugfix] Fix Llama GGUF initialization (#18717)
Signed-off-by: DarkLight1337 <[email protected]>
1 parent 82e2339 commit a869bac

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

vllm/model_executor/models/llama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ def _init_rotary_emb(self, config: LlamaConfig,
208208
quant_config: Optional[QuantizationConfig]) -> None:
209209
is_neox_style = True
210210
is_gguf = quant_config and quant_config.get_name() == "gguf"
211-
if is_gguf and self.config.model_type == "llama":
211+
if is_gguf and config.model_type == "llama":
212212
is_neox_style = False
213213

214214
self.rotary_emb = get_rope(

0 commit comments

Comments
 (0)