Skip to content

Commit

Permalink
bugfix: llava-hf/llava-interleave-qwen-7b-hf (InternLM#2657)
Browse files Browse the repository at this point in the history
- fix init raise exception because tie_word_embeddings config
  • Loading branch information
deepindeed2022 authored and AllentDan committed Nov 13, 2024
1 parent 8502f70 commit c1e3ab7
Showing 1 changed file with 3 additions and 1 deletion.
4 changes: 3 additions & 1 deletion lmdeploy/vl/model/llava_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,15 @@ def build_model(self):
else:
self.vl_model = model

# fix for llava-hf/llava-interleave-qwen-7b-hf
setattr(model.config, "tie_word_embeddings", False)
with disable_logging():
load_checkpoint_and_dispatch(
model=model,
max_memory=self.max_memory,
checkpoint=self.model_path,
device_map='auto' if not self.with_llm else {'': 'cpu'},
no_split_module_classes=['CLIPEncoderLayer'],
no_split_module_classes=['CLIPEncoderLayer', 'SiglipEncoderLayer'],
dtype=torch.half)
model.eval()
self.model = model
Expand Down

0 comments on commit c1e3ab7

Please sign in to comment.