We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent cd87420 commit ed17129Copy full SHA for ed17129
lmms_eval/models/qwen_vl.py
@@ -239,13 +239,14 @@ def _collate(x):
239
# Similar to llava, is visual paths has len 0
240
# Then nothing will be executed
241
query = []
242
- for visual_path, context in zip(visual_paths, contexts):
243
- query.append({"image": visual_path})
244
- query.append({"text": context})
245
-
246
if len(visual_paths) == 0:
247
for context in contexts:
248
query.append({"text": context})
+ else:
+ for visual_path, context in zip(visual_paths, contexts):
+ query.append({"image": visual_path})
+ query.append({"text": context})
249
+
250
251
questions = self.tokenizer.from_list_format(query)
252
input_ids = self.tokenizer(questions, return_tensors="pt", padding="longest")
0 commit comments