-
-
Notifications
You must be signed in to change notification settings - Fork 16.3k
Fix LoRA adapter silently failing on Pixtral/Ministral-3 models #34964
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -130,6 +130,24 @@ def _load_adapter(self, lora_request: LoRARequest) -> LoRAModel: | |
| skip_prefixes=lora_skip_prefixes, | ||
| ) | ||
|
|
||
| # Check that at least some loaded LoRA modules will match | ||
| # the model's module names. When hf_to_vllm_mapper is | ||
| # missing, LoRA weights may load successfully but with | ||
| # wrong module paths, causing them to be silently ignored. | ||
| model_module_names = {name for name, _ in model.named_modules()} | ||
| matched = any( | ||
| module_name in model_module_names for module_name in lora.loras | ||
| ) | ||
|
Comment on lines
+137
to
+140
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This check is redundant because a more precise check is performed in |
||
| if lora.loras and not matched: | ||
| logger.warning( | ||
| "None of the LoRA modules in adapter '%s' matched " | ||
| "any module in %s. The adapter weights will have no " | ||
| "effect. This is usually caused by a missing " | ||
| "hf_to_vllm_mapper on the model class.", | ||
| lora_request.lora_name, | ||
| model.__class__.__name__, | ||
| ) | ||
|
|
||
| except FileNotFoundError as e: | ||
| # FileNotFoundError should be raised if both | ||
| # - No adapter found to download from huggingface (or in | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -40,6 +40,7 @@ | |||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||
| from vllm.model_executor.layers.quantization import QuantizationConfig | ||||||||||||||||||||||||||||||||||
| from vllm.model_executor.model_loader.weight_utils import default_weight_loader | ||||||||||||||||||||||||||||||||||
| from vllm.model_executor.models.utils import WeightsMapper | ||||||||||||||||||||||||||||||||||
| from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargsItems | ||||||||||||||||||||||||||||||||||
| from vllm.multimodal.inputs import ( | ||||||||||||||||||||||||||||||||||
| MultiModalDataDict, | ||||||||||||||||||||||||||||||||||
|
|
@@ -372,6 +373,15 @@ def _cached_apply_hf_processor( | |||||||||||||||||||||||||||||||||
| class PixtralForConditionalGeneration( | ||||||||||||||||||||||||||||||||||
| nn.Module, SupportsLoRA, SupportsMultiModal, SupportsPP | ||||||||||||||||||||||||||||||||||
| ): | ||||||||||||||||||||||||||||||||||
| hf_to_vllm_mapper = WeightsMapper( | ||||||||||||||||||||||||||||||||||
| orig_to_new_prefix={ | ||||||||||||||||||||||||||||||||||
| "model.language_model.": "language_model.model.", | ||||||||||||||||||||||||||||||||||
| "model.vision_tower.": "vision_tower.", | ||||||||||||||||||||||||||||||||||
| "model.multi_modal_projector.": "multi_modal_projector.", | ||||||||||||||||||||||||||||||||||
| "lm_head.": "language_model.lm_head.", | ||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||
|
Comment on lines
+376
to
+383
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The target prefixes in the
Suggested change
|
||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||
| @classmethod | ||||||||||||||||||||||||||||||||||
| def get_placeholder_str(cls, modality: str, i: int) -> str | None: | ||||||||||||||||||||||||||||||||||
| if modality.startswith("image"): | ||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These warnings may be misleading, please delete them