From c1e3ab7848cef082979a775567569e3f232f4253 Mon Sep 17 00:00:00 2001 From: Willow <523814299@qq.com> Date: Mon, 28 Oct 2024 14:20:26 +0800 Subject: [PATCH] bugfix: llava-hf/llava-interleave-qwen-7b-hf (#2657) - fix init raise exception because tie_word_embeddings config --- lmdeploy/vl/model/llava_hf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lmdeploy/vl/model/llava_hf.py b/lmdeploy/vl/model/llava_hf.py index 66faf4f467..c2a0e4afa0 100644 --- a/lmdeploy/vl/model/llava_hf.py +++ b/lmdeploy/vl/model/llava_hf.py @@ -31,13 +31,15 @@ def build_model(self): else: self.vl_model = model + # fix for llava-hf/llava-interleave-qwen-7b-hf + setattr(model.config, "tie_word_embeddings", False) with disable_logging(): load_checkpoint_and_dispatch( model=model, max_memory=self.max_memory, checkpoint=self.model_path, device_map='auto' if not self.with_llm else {'': 'cpu'}, - no_split_module_classes=['CLIPEncoderLayer'], + no_split_module_classes=['CLIPEncoderLayer', 'SiglipEncoderLayer'], dtype=torch.half) model.eval() self.model = model