From 2b95850d3cdec1a95fb0212322e862d36c0f876e Mon Sep 17 00:00:00 2001 From: ltd0924 <32387785+ltd0924@users.noreply.github.com> Date: Tue, 2 Sep 2025 18:41:10 +0800 Subject: [PATCH] Update qwen_vl_processor.py --- fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py b/fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py index ab249b1f091..dc85b78c04c 100644 --- a/fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py +++ b/fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py @@ -231,6 +231,15 @@ def process_request_dict(self, request, max_model_len=None): elif request.get("messages"): messages = request["messages"] self._check_mm_limits(messages) + chat_template_kwargs = request.get("chat_template_kwargs") + if chat_template_kwargs: + if isinstance(chat_template_kwargs, dict): + for k, v in chat_template_kwargs.items(): + if k not in request: + request[k] = v + else: + raise ValueError("Invalid input: chat_template_kwargs must be a dict") + request.setdefault("enable_thinking", True) outputs = self.processor.request2ids(request) else: