File tree Expand file tree Collapse file tree 3 files changed +14
-4
lines changed
Expand file tree Collapse file tree 3 files changed +14
-4
lines changed Original file line number Diff line number Diff line change @@ -1495,6 +1495,12 @@ def _parse_chat_message_content(
14951495 role = message ["role" ]
14961496 content = message .get ("content" )
14971497 reasoning = message .get ("reasoning" ) or message .get ("reasoning_content" )
1498+ # TODO: get from reasoning_content?
1499+
1500+ # HACK
1501+ if role == "tool" :
1502+ content_format = "openai"
1503+
14981504 if content is None :
14991505 content = []
15001506 elif isinstance (content , str ):
@@ -1503,7 +1509,9 @@ def _parse_chat_message_content(
15031509 role ,
15041510 content , # type: ignore
15051511 mm_tracker ,
1506- wrap_dicts = (content_format == "openai" ),
1512+ wrap_dicts = (
1513+ content_format == "openai"
1514+ ), # kimik2 thinks this is string, breaks on tool
15071515 interleave_strings = interleave_strings ,
15081516 )
15091517
Original file line number Diff line number Diff line change @@ -263,7 +263,7 @@ def need_builtin_tool_call(self) -> bool:
263263 last_message = self .parser .chat_completion_messages [- 1 ]["content" ][- 1 ]
264264 if isinstance (last_message , FunctionCall ):
265265 # HACK: figure out which tools are MCP tools
266- if last_message .name == "code_interpreter" :
266+ if last_message .name == "code_interpreter" or last_message . name == "python" :
267267 return True
268268
269269 return False
@@ -276,7 +276,7 @@ async def call_python_tool(
276276 return await tool_session .get_result (self )
277277 args = json .loads (last_msg .arguments )
278278 param = {
279- "code" : args [' code' ],
279+ "code" : args [" code" ],
280280 }
281281 result = await tool_session .call_tool ("python" , param )
282282 result_str = result .content [0 ].text
Original file line number Diff line number Diff line change @@ -1334,7 +1334,9 @@ async def _generate_with_builtin_tools(
13341334 # engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids)
13351335 # request_prompt = prompt_token_ids
13361336 # Update the sampling params.
1337- sampling_params .max_tokens = self .max_model_len - len (engine_prompt )
1337+ sampling_params .max_tokens = self .max_model_len - len (
1338+ engine_prompt ["prompt_token_ids" ]
1339+ )
13381340 # OPTIMIZATION
13391341 priority = orig_priority - 1
13401342
You can’t perform that action at this time.
0 commit comments