From ea20c612ad4589faf218bbee273c33224e5783cd Mon Sep 17 00:00:00 2001 From: Alde Rojas Date: Tue, 2 Dec 2025 22:33:33 -0600 Subject: [PATCH 1/5] common : add parser for ministral/mistral 3 --- common/chat.cpp | 119 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/common/chat.cpp b/common/chat.cpp index 41a5bb42d5..8a9fa368e6 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1,5 +1,6 @@ #include "chat.h" #include "chat-parser.h" +#include "chat-peg-parser.h" #include "common.h" #include "json-partial.h" #include "json-schema-to-grammar.h" @@ -987,6 +988,117 @@ static common_chat_params common_chat_params_init_lfm2(const common_chat_templat return data; } +static common_chat_params common_chat_params_init_mistral_3(const common_chat_template & tmpl, const struct templates_params & inputs) { + common_chat_params data; + + // Build up messages to follow the format: https://huggingface.co/mistralai/Ministral-3-14B-Reasoning-2512/blob/main/chat_template.jinja + auto adjusted_messages = json::array(); + for (const auto & msg : inputs.messages) { + auto role = msg.value("role", ""); + if (role != "system" && role != "assistant") { + // Only adjust system and assistant messages. Interestingly, the system message may contain thinking. + adjusted_messages.push_back(msg); + continue; + } + + auto content = json::array(); + + // If message contains `reasoning_content`, add it as a block of type `thinking` + if (msg.contains("reasoning_content") && msg.at("reasoning_content").is_string()) { + content.push_back({ + {"type", "thinking"}, + {"thinking", msg.at("reasoning_content").get()}, + }); + } + + // If message contains `content`, add it as a block of type `text` + if (msg.contains("content")) { + if (msg.at("content").is_string()) { + content.push_back({ + {"type", "text"}, + {"text", msg.at("content").get()}, + }); + } else if (msg.at("content").is_array()) { + auto blocks = msg.at("content"); + content.insert(content.end(), blocks.begin(), blocks.end()); + } + } + + auto adjusted = msg; + adjusted["content"] = content; + adjusted.erase("reasoning_content"); + adjusted_messages.push_back(adjusted); + } + + auto has_tools = inputs.tools.is_array() && !inputs.tools.empty(); + auto include_grammar = true; + + data.prompt = apply(tmpl, inputs, /* messages_override = */ adjusted_messages); + data.format = COMMON_CHAT_FORMAT_PEG_NATIVE; + data.preserved_tokens = { + "[THINK]", + "[/THINK]", + "[TOOL_CALLS]", + "[ARGS]", + }; + + auto parser = build_chat_peg_native_parser([&](common_chat_peg_native_builder & p) { + auto reasoning = p.optional("[THINK]" + p.reasoning(p.until("[/THINK]")) + "[/THINK]"); + + // Response format parser + if (inputs.json_schema.is_object() && !inputs.json_schema.empty()) { + // Ministral wants to emit json surrounded code fences + return reasoning << "```json" << p.content(p.schema(p.json(), "response-format", inputs.json_schema)) << "```"; + } + + // Tool call parser + if (has_tools && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) { + auto tool_choice = p.choice(); + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool.at("function"); + std::string name = function.at("name"); + const auto & schema = function.at("parameters"); + + tool_choice |= p.rule("tool-" + name, + p.tool_open(p.tool_name(p.literal(name)) + "[ARGS]") + + p.tool_args(p.schema(p.json(), "tool-" + name + "-schema", schema)) + ); + }); + + auto min_calls = inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_REQUIRED ? 1 : 0; + auto max_calls = inputs.parallel_tool_calls ? -1 : 1; + auto tool_calls = p.trigger_rule("tool-call", p.repeat("[TOOL_CALLS]" + tool_choice, min_calls, max_calls)); + + return reasoning << p.content(p.until("[TOOL_CALLS]")) << tool_calls; + } + + // Content only parser + include_grammar = false; + return reasoning << p.content(p.rest()); + }); + + data.parser = parser.save(); + + if (include_grammar) { + data.grammar_lazy = has_tools && inputs.tool_choice == COMMON_CHAT_TOOL_CHOICE_AUTO; + + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool.at("function"); + auto schema = function.at("parameters"); + builder.resolve_refs(schema); + }); + parser.build_grammar(builder, data.grammar_lazy); + }); + + data.grammar_triggers = { + {COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "[TOOL_CALLS]"} + }; + } + + return data; +} + static common_chat_params common_chat_params_init_magistral(const common_chat_template & tmpl, const struct templates_params & inputs) { common_chat_params data; data.prompt = apply(tmpl, inputs); @@ -2504,6 +2616,13 @@ static common_chat_params common_chat_templates_apply_jinja( return common_chat_params_init_llama_3_x(tmpl, params, allow_python_tag_builtin_tools); } + // Ministral/Mistral 3 + if (src.find("[SYSTEM_PROMPT]") != std::string::npos && + src.find("[TOOL_CALLS]") != std::string::npos && + src.find("[ARGS]") != std::string::npos) { + return common_chat_params_init_mistral_3(tmpl, params); + } + if (src.find("[THINK]") != std::string::npos && src.find("[/THINK]") != std::string::npos) { return common_chat_params_init_magistral(tmpl, params); } From d601b07d2a87dc0e17c9c0b9e54cfdf2be853822 Mon Sep 17 00:00:00 2001 From: Alde Rojas Date: Wed, 3 Dec 2025 21:21:44 -0600 Subject: [PATCH 2/5] common : expose reasoning_format during chat param init --- common/chat.cpp | 5 ++++- tools/server/server-common.cpp | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/common/chat.cpp b/common/chat.cpp index 8a9fa368e6..f1758050e6 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -151,6 +151,7 @@ struct templates_params { common_chat_tool_choice tool_choice; json json_schema; bool parallel_tool_calls; + common_reasoning_format reasoning_format; bool stream; std::string grammar; bool add_generation_prompt = true; @@ -1031,6 +1032,7 @@ static common_chat_params common_chat_params_init_mistral_3(const common_chat_te } auto has_tools = inputs.tools.is_array() && !inputs.tools.empty(); + auto extract_reasoning = inputs.reasoning_format != COMMON_REASONING_FORMAT_NONE; auto include_grammar = true; data.prompt = apply(tmpl, inputs, /* messages_override = */ adjusted_messages); @@ -1043,7 +1045,7 @@ static common_chat_params common_chat_params_init_mistral_3(const common_chat_te }; auto parser = build_chat_peg_native_parser([&](common_chat_peg_native_builder & p) { - auto reasoning = p.optional("[THINK]" + p.reasoning(p.until("[/THINK]")) + "[/THINK]"); + auto reasoning = extract_reasoning ? p.optional("[THINK]" + p.reasoning(p.until("[/THINK]")) + "[/THINK]") : p.eps(); // Response format parser if (inputs.json_schema.is_object() && !inputs.json_schema.empty()) { @@ -2453,6 +2455,7 @@ static common_chat_params common_chat_templates_apply_jinja( params.messages = common_chat_msgs_to_json_oaicompat(inputs.messages, /* concat_text= */ !tmpl.original_caps().requires_typed_content); params.add_generation_prompt = inputs.add_generation_prompt; params.tool_choice = inputs.tool_choice; + params.reasoning_format = inputs.reasoning_format; params.enable_thinking = inputs.enable_thinking; params.grammar = inputs.grammar; params.now = inputs.now; diff --git a/tools/server/server-common.cpp b/tools/server/server-common.cpp index b403864e0e..ab6b3aa7ce 100644 --- a/tools/server/server-common.cpp +++ b/tools/server/server-common.cpp @@ -972,6 +972,9 @@ json oaicompat_chat_params_parse( inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true); inputs.reasoning_format = opt.reasoning_format; + if (body.contains("reasoning_format")) { + inputs.reasoning_format = common_reasoning_format_from_name(body.at("reasoning_format").get()); + } inputs.enable_thinking = opt.enable_thinking; if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) { if (body.contains("grammar")) { From 86a52f43d7ca12a302f353fcb76cf6e5871c3b7c Mon Sep 17 00:00:00 2001 From: Alde Rojas Date: Thu, 4 Dec 2025 00:59:25 -0600 Subject: [PATCH 3/5] add tests and unsloth template --- common/chat.cpp | 8 +- ...stral-Ministral-3-14B-Reasoning-2512.jinja | 126 ++++++++++++ tests/test-chat.cpp | 179 +++++++++++++++++- 3 files changed, 306 insertions(+), 7 deletions(-) create mode 100644 models/templates/unsloth-mistral-Ministral-3-14B-Reasoning-2512.jinja diff --git a/common/chat.cpp b/common/chat.cpp index f1758050e6..9a121c995d 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -989,7 +989,7 @@ static common_chat_params common_chat_params_init_lfm2(const common_chat_templat return data; } -static common_chat_params common_chat_params_init_mistral_3(const common_chat_template & tmpl, const struct templates_params & inputs) { +static common_chat_params common_chat_params_init_ministral_3(const common_chat_template & tmpl, const struct templates_params & inputs) { common_chat_params data; // Build up messages to follow the format: https://huggingface.co/mistralai/Ministral-3-14B-Reasoning-2512/blob/main/chat_template.jinja @@ -1049,7 +1049,7 @@ static common_chat_params common_chat_params_init_mistral_3(const common_chat_te // Response format parser if (inputs.json_schema.is_object() && !inputs.json_schema.empty()) { - // Ministral wants to emit json surrounded code fences + // Ministral wants to emit json surrounded by code fences return reasoning << "```json" << p.content(p.schema(p.json(), "response-format", inputs.json_schema)) << "```"; } @@ -2619,11 +2619,11 @@ static common_chat_params common_chat_templates_apply_jinja( return common_chat_params_init_llama_3_x(tmpl, params, allow_python_tag_builtin_tools); } - // Ministral/Mistral 3 + // Ministral/Mistral Large 3 if (src.find("[SYSTEM_PROMPT]") != std::string::npos && src.find("[TOOL_CALLS]") != std::string::npos && src.find("[ARGS]") != std::string::npos) { - return common_chat_params_init_mistral_3(tmpl, params); + return common_chat_params_init_ministral_3(tmpl, params); } if (src.find("[THINK]") != std::string::npos && src.find("[/THINK]") != std::string::npos) { diff --git a/models/templates/unsloth-mistral-Ministral-3-14B-Reasoning-2512.jinja b/models/templates/unsloth-mistral-Ministral-3-14B-Reasoning-2512.jinja new file mode 100644 index 0000000000..613c8e9767 --- /dev/null +++ b/models/templates/unsloth-mistral-Ministral-3-14B-Reasoning-2512.jinja @@ -0,0 +1,126 @@ +{#- Unsloth template fixes #} +{#- Default system message if no system prompt is passed. #} +{%- set default_system_message = '# HOW YOU SHOULD THINK AND ANSWER\n\nFirst draft your thinking process (inner monologue) until you arrive at a response. Format your response using Markdown, and use LaTeX for any mathematical equations. Write both your thoughts and the response in the same language as the input.\n\nYour thinking process must follow the template below:[THINK]Your thoughts or/and draft, like working through an exercise on scratch paper. Be as casual and as long as you want until you are confident to generate the response to the user.[/THINK]Here, provide a self-contained response.' %} + +{#- Begin of sequence token. #} +{{- bos_token }} + +{#- Handle system prompt if it exists. #} +{#- System prompt supports text content or text and thinking chunks. #} +{%- if messages[0]['role'] == 'system' %} + {{- '[SYSTEM_PROMPT]' -}} + {%- if messages[0]['content'] is string %} + {{- messages[0]['content'] -}} + {%- else %} + {%- for block in messages[0]['content'] %} + {%- if block['type'] == 'text' %} + {{- block['text'] }} + {%- elif block['type'] == 'thinking' %} + {{- '[THINK]' + block['thinking'] + '[/THINK]' }} + {%- else %} + {{- raise_exception('Only text and thinking chunks are supported in system message contents.') }} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- '[/SYSTEM_PROMPT]' -}} + {%- set loop_messages = messages[1:] %} +{%- else %} + {%- set loop_messages = messages %} + {%- if default_system_message != '' %} + {{- '[SYSTEM_PROMPT]' + default_system_message + '[/SYSTEM_PROMPT]' }} + {%- endif %} +{%- endif %} + + +{#- Tools definition #} +{%- set tools_definition = '' %} +{%- set has_tools = false %} +{%- if tools is defined and tools is not none and tools|length > 0 %} + {%- set has_tools = true %} + {%- set tools_definition = '[AVAILABLE_TOOLS]' + (tools| tojson) + '[/AVAILABLE_TOOLS]' %} + {{- tools_definition }} +{%- endif %} + +{#- Checks for alternating user/assistant messages. #} +{%- set ns = namespace(index=0) %} +{%- for message in loop_messages %} + {%- if message.role == 'user' or (message.role == 'assistant' and (message.tool_calls is not defined or message.tool_calls is none or message.tool_calls | length == 0)) %} + {%- if (message['role'] == 'user') != (ns.index % 2 == 0) %} + {{- raise_exception('After the optional system message, conversation roles must alternate user and assistant roles except for tool calls and results.') }} + {%- endif %} + {%- set ns.index = ns.index + 1 %} + {%- endif %} +{%- endfor %} + +{#- Handle conversation messages. #} +{%- for message in loop_messages %} + + {#- User messages supports text content or text and image chunks. #} + {%- if message['role'] == 'user' %} + {%- if message['content'] is string %} + {{- '[INST]' + message['content'] + '[/INST]' }} + {%- elif message['content'] | length > 0 %} + {{- '[INST]' }} + {%- if message['content'] | length == 2 %} + {%- set blocks = message['content'] | sort(attribute='type') %} + {%- else %} + {%- set blocks = message['content'] %} + {%- endif %} + {%- for block in blocks %} + {%- if block['type'] == 'text' %} + {{- block['text'] }} + {%- elif block['type'] in ['image', 'image_url'] %} + {{- '[IMG]' }} + {%- else %} + {{- raise_exception('Only text, image and image_url chunks are supported in user message content.') }} + {%- endif %} + {%- endfor %} + {{- '[/INST]' }} + {%- else %} + {{- raise_exception('User message must have a string or a list of chunks in content') }} + {%- endif %} + + {#- Assistant messages supports text content or text, image and thinking chunks. #} + {%- elif message['role'] == 'assistant' %} + + {%- if message['content'] is string and message['content'] != '' %} + {{- message['content'] }} + {%- elif message['content'] is iterable and message['content'] | length > 0 %} + {%- for block in message['content'] %} + {%- if block['type'] == 'text' %} + {{- block['text'] }} + {%- elif block['type'] == 'thinking' %} + {{- '[THINK]' + block['thinking'] + '[/THINK]' }} + {%- else %} + {{- raise_exception('Only text and thinking chunks are supported in assistant message contents.') }} + {%- endif %} + {%- endfor %} + {%- endif %} + + {%- if message['tool_calls'] is defined and message['tool_calls'] is not none and message['tool_calls']|length > 0 %} + {%- for tool in message['tool_calls'] %} + {{- '[TOOL_CALLS]' }} + {%- set name = tool['function']['name'] %} + {%- set arguments = tool['function']['arguments'] %} + {%- if arguments is not string %} + {%- set arguments = arguments|tojson|safe %} + {%- elif arguments == '' %} + {%- set arguments = '{}' %} + {%- endif %} + {{- name + '[ARGS]' + arguments }} + {%- endfor %} + {%- endif %} + + {{- eos_token }} + + {#- Tool messages only supports text content. #} + {%- elif message['role'] == 'tool' %} + {{- '[TOOL_RESULTS]' + message['content']|string + '[/TOOL_RESULTS]' }} + + {#- Raise exception for unsupported roles. #} + {%- else %} + {{- raise_exception('Only user, assistant and tool roles are supported, got ' + message['role'] + '.') }} + {%- endif %} +{%- endfor %} + +{#- Copyright 2025-present Unsloth. Apache 2.0 License. #} \ No newline at end of file diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index 62dd1583fa..a048c3e6a9 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -150,6 +150,22 @@ static std::string renormalize_json(const std::string & json_str) { return json_str; } } + +// Use for PEG parser implementations +struct make_peg_parser { + common_chat_params params_; + common_peg_arena arena_; + + make_peg_parser(common_chat_templates * tmpls, const common_chat_templates_inputs & inputs) { + params_ = common_chat_templates_apply(tmpls, inputs); + arena_.load(params_.parser); + } + + common_chat_msg operator()(const std::string & msg, bool is_partial) { + return common_chat_peg_parse(arena_, msg, is_partial, /* syntax = */ {params_.format}); + } +}; + static void assert_msg_equals(const common_chat_msg & expected, const common_chat_msg & actual, bool ignore_whitespace_differences = false) { assert_equals(expected.role, actual.role); if (ignore_whitespace_differences) { @@ -429,9 +445,16 @@ static void test_templates(const struct common_chat_templates * tmpls, const std template static void test_parser_with_streaming(const common_chat_msg & expected, const std::string & raw_message, T parse_msg) { auto merged = simple_assist_msg(""); - auto last_msg = parse_msg(""); + common_chat_msg last_msg; + last_msg.role = "assistant"; for (size_t i = 1; i <= raw_message.size(); ++i) { - auto curr_msg = parse_msg(raw_message.substr(0, i)); + auto is_partial = i < raw_message.size(); + common_chat_msg curr_msg; + if constexpr (std::is_invocable_v) { + curr_msg = parse_msg(raw_message.substr(0, i), is_partial); + } else { + curr_msg = parse_msg(raw_message.substr(0, i)); + } if (curr_msg == simple_assist_msg("")) continue; LOG_INF("Streaming msg: %s\n", common_chat_msgs_to_json_oaicompat({curr_msg}).dump().c_str()); for (auto diff: common_chat_msg_diff::compute_diffs(last_msg, curr_msg)) { @@ -456,7 +479,11 @@ static void test_parser_with_streaming(const common_chat_msg & expected, const s assert_msg_equals(curr_msg, merged, true); last_msg = curr_msg; } - assert_msg_equals(expected, parse_msg(raw_message), true); + if constexpr (std::is_invocable_v) { + assert_msg_equals(expected, parse_msg(raw_message, false), true); + } else { + assert_msg_equals(expected, parse_msg(raw_message), true); + } assert_msg_equals(expected, merged, true); } @@ -3302,6 +3329,152 @@ Hey there!<|im_end|> GGML_ASSERT(grammar && "Failed to build Qwen3-Coder grammar with union types"); } + { + // Ministral-3-14B-Reasoning-2512 + auto tmpls = read_templates("models/templates/unsloth-mistral-Ministral-3-14B-Reasoning-2512.jinja"); + common_chat_msg msg; + msg.role = "user"; + msg.content = "hello"; + + { + // Test basic message + common_chat_msg expected; + expected.role = "assistant"; + expected.content = "Hello world"; + + common_chat_templates_inputs inputs; + inputs.messages = {msg}; + + test_parser_with_streaming(expected, + "Hello world", + make_peg_parser(tmpls.get(), inputs) + ); + } + { + // Test basic message and reasoning with reasoning_format = none + common_chat_msg expected; + expected.role = "assistant"; + expected.content = "[THINK]I am thinking[/THINK]Hello world"; + + common_chat_templates_inputs inputs; + inputs.messages = {msg}; + + test_parser_with_streaming(expected, + "[THINK]I am thinking[/THINK]Hello world", + make_peg_parser(tmpls.get(), inputs) + ); + } + { + // Test basic message and reasoning with reasoning_format = auto + common_chat_msg expected; + expected.role = "assistant"; + expected.content = "Hello world"; + expected.reasoning_content = "I am thinking"; + + common_chat_templates_inputs inputs; + inputs.messages = {msg}; + inputs.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + + test_parser_with_streaming(expected, + "[THINK]I am thinking[/THINK]Hello world", + make_peg_parser(tmpls.get(), inputs) + ); + } + { + // Test basic tool call + common_chat_msg expected; + expected.role = "assistant"; + expected.reasoning_content = "I need to get the weather in New York City"; + expected.tool_calls = {{ + /* .name = */ "get_weather", + /* .arguments = */ R"({"location": "New York City, NY"})", + /* .id = */ {}, + }}; + + common_chat_templates_inputs inputs; + inputs.messages = {msg}; + inputs.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + inputs.tools = {{ + /* .name = */ "get_weather", + /* .description = */ "get the weather", + /* .parameters = */ R"({ + "type": "object", + "properties": { + "location": {"type": "string"} + } + })" + }}; + + test_parser_with_streaming(expected, + "[THINK]I need to get the weather in New York City[/THINK]" + R"([TOOL_CALLS]get_weather[ARGS]{"location": "New York City, NY"})", + make_peg_parser(tmpls.get(), inputs) + ); + } + { + // Test basic tool call with parallel_tool_calls = true + common_chat_msg expected; + expected.role = "assistant"; + expected.reasoning_content = "I need to get the weather in New York City and Los Angeles"; + expected.tool_calls = {{ + /* .name = */ "get_weather", + /* .arguments = */ R"({"location": "New York City, NY"})", + /* .id = */ {}, + }, { + /* .name = */ "get_weather", + /* .arguments = */ R"({"location": "Los Angeles, CA"})", + /* .id = */ {}, + }}; + + common_chat_templates_inputs inputs; + inputs.messages = {msg}; + inputs.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + inputs.parallel_tool_calls = true; + inputs.tools = {{ + /* .name = */ "get_weather", + /* .description = */ "get the weather", + /* .parameters = */ R"({ + "type": "object", + "properties": { + "location": {"type": "string"} + } + })" + }}; + + test_parser_with_streaming(expected, + "[THINK]I need to get the weather in New York City and Los Angeles[/THINK]" + R"([TOOL_CALLS]get_weather[ARGS]{"location": "New York City, NY"})" + R"([TOOL_CALLS]get_weather[ARGS]{"location": "Los Angeles, CA"})", + make_peg_parser(tmpls.get(), inputs) + ); + } + { + // Test response format + common_chat_msg expected; + expected.role = "assistant"; + expected.reasoning_content = "I need to output the invoice details in JSON"; + expected.content = R"({"amount": 123.45, "date": "2025-12-03"})"; + + common_chat_templates_inputs inputs; + inputs.messages = {msg}; + inputs.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + inputs.json_schema = R"({ + "type": "object", + "properties": { + "amount": {"type": "number"}, + "date": {"type": "string"} + } + })"; + + test_parser_with_streaming(expected, + "[THINK]I need to output the invoice details in JSON[/THINK]" + "```json\n" + R"({"amount": 123.45, "date": "2025-12-03"})" + "\n```", + make_peg_parser(tmpls.get(), inputs) + ); + } + } } static void test_msg_diffs_compute() { From ad8901d57cee743032532f4603a6f28af9fd6704 Mon Sep 17 00:00:00 2001 From: Alde Rojas Date: Sat, 6 Dec 2025 18:12:32 -0600 Subject: [PATCH 4/5] common : refine test cases for ministral --- tests/test-chat.cpp | 248 +++++++++++++++++++------------------------- 1 file changed, 105 insertions(+), 143 deletions(-) diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index a048c3e6a9..ca29ab9d4d 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -151,21 +151,6 @@ static std::string renormalize_json(const std::string & json_str) { } } -// Use for PEG parser implementations -struct make_peg_parser { - common_chat_params params_; - common_peg_arena arena_; - - make_peg_parser(common_chat_templates * tmpls, const common_chat_templates_inputs & inputs) { - params_ = common_chat_templates_apply(tmpls, inputs); - arena_.load(params_.parser); - } - - common_chat_msg operator()(const std::string & msg, bool is_partial) { - return common_chat_peg_parse(arena_, msg, is_partial, /* syntax = */ {params_.format}); - } -}; - static void assert_msg_equals(const common_chat_msg & expected, const common_chat_msg & actual, bool ignore_whitespace_differences = false) { assert_equals(expected.role, actual.role); if (ignore_whitespace_differences) { @@ -538,6 +523,40 @@ const common_chat_msg message_assist_call_python_lines = simple_assist const common_chat_msg message_assist_call_python_lines_unclosed = simple_assist_msg("", "", "python", "{\"code\":\"# This is a program:\\nprint('hey')"); const common_chat_msg message_assist_call_code_interpreter = simple_assist_msg("", "", "code_interpreter", "{\"code\":\"print('hey')\"}"); +// Use for PEG parser implementations +struct peg_test_case { + common_chat_templates_inputs params; + std::string input; + common_chat_msg expect; +}; + +struct make_peg_parser { + common_chat_params params_; + common_peg_arena arena_; + + make_peg_parser(common_chat_templates * tmpls, const common_chat_templates_inputs & inputs) { + params_ = common_chat_templates_apply(tmpls, inputs); + arena_.load(params_.parser); + } + + common_chat_msg operator()(const std::string & msg, bool is_partial) { + return common_chat_peg_parse(arena_, msg, is_partial, /* syntax = */ {params_.format}); + } +}; + +static void test_peg_parser(common_chat_templates * tmpls, const std::function & init) { + peg_test_case tc; + init(tc); + if (tc.params.messages.empty()) { + tc.params.messages = {message_user}; + } + if (tc.expect.role.empty()) { + tc.expect.role = "assistant"; + } + auto parser = make_peg_parser(tmpls, tc.params); + test_parser_with_streaming(tc.expect, tc.input, parser); +} + static void test_msgs_oaicompat_json_conversion() { printf("[%s]\n", __func__); std::vector msgs{ @@ -3328,152 +3347,94 @@ Hey there!<|im_end|> auto grammar = build_grammar(params.grammar); GGML_ASSERT(grammar && "Failed to build Qwen3-Coder grammar with union types"); } +} + +static void test_template_output_peg_parsers() { + printf("[%s]\n", __func__); + + // JSON schemas + const char * invoice_schema = R"({ + "type": "object", + "properties": { + "amount": {"type": "number"}, + "date": {"type": "string"} + } + })"; { // Ministral-3-14B-Reasoning-2512 auto tmpls = read_templates("models/templates/unsloth-mistral-Ministral-3-14B-Reasoning-2512.jinja"); - common_chat_msg msg; - msg.role = "user"; - msg.content = "hello"; - { - // Test basic message - common_chat_msg expected; - expected.role = "assistant"; - expected.content = "Hello world"; + // Test basic message + test_peg_parser(tmpls.get(), [&](peg_test_case & t) { + t.input = "Hello, world!\nWhat's up?"; + t.expect = message_assist; + }); - common_chat_templates_inputs inputs; - inputs.messages = {msg}; + // Test basic message and reasoning with reasoning_format = none + test_peg_parser(tmpls.get(), [&](peg_test_case & t) { + t.input = "[THINK]I'm\nthinking[/THINK]Hello, world!\nWhat's up?"; + t.expect.content = "[THINK]I'm\nthinking[/THINK]Hello, world!\nWhat's up?"; + }); - test_parser_with_streaming(expected, - "Hello world", - make_peg_parser(tmpls.get(), inputs) - ); - } - { - // Test basic message and reasoning with reasoning_format = none - common_chat_msg expected; - expected.role = "assistant"; - expected.content = "[THINK]I am thinking[/THINK]Hello world"; + // Test basic message and reasoning with reasoning_format = auto + test_peg_parser(tmpls.get(), [&](peg_test_case & t) { + t.input = "[THINK]I'm\nthinking[/THINK]Hello, world!\nWhat's up?"; + t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO; - common_chat_templates_inputs inputs; - inputs.messages = {msg}; + t.expect = message_assist_thoughts; + }); - test_parser_with_streaming(expected, - "[THINK]I am thinking[/THINK]Hello world", - make_peg_parser(tmpls.get(), inputs) - ); - } - { - // Test basic message and reasoning with reasoning_format = auto - common_chat_msg expected; - expected.role = "assistant"; - expected.content = "Hello world"; - expected.reasoning_content = "I am thinking"; + // Test tool call + test_peg_parser(tmpls.get(), [&](peg_test_case & t) { + t.input = R"([TOOL_CALLS]special_function[ARGS]{"arg1":1})"; + t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + t.params.tools = {special_function_tool}; - common_chat_templates_inputs inputs; - inputs.messages = {msg}; - inputs.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + t.expect = message_assist_call; + }); - test_parser_with_streaming(expected, - "[THINK]I am thinking[/THINK]Hello world", - make_peg_parser(tmpls.get(), inputs) - ); - } - { - // Test basic tool call - common_chat_msg expected; - expected.role = "assistant"; - expected.reasoning_content = "I need to get the weather in New York City"; - expected.tool_calls = {{ - /* .name = */ "get_weather", - /* .arguments = */ R"({"location": "New York City, NY"})", - /* .id = */ {}, - }}; + // Test tool call with reasoning + test_peg_parser(tmpls.get(), [&](peg_test_case & t) { + t.input = "[THINK]I'm\nthinking[/THINK]" + R"([TOOL_CALLS]special_function[ARGS]{"arg1":1})"; + t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + t.params.tools = {special_function_tool}; - common_chat_templates_inputs inputs; - inputs.messages = {msg}; - inputs.reasoning_format = COMMON_REASONING_FORMAT_AUTO; - inputs.tools = {{ - /* .name = */ "get_weather", - /* .description = */ "get the weather", - /* .parameters = */ R"({ - "type": "object", - "properties": { - "location": {"type": "string"} - } - })" - }}; + t.expect = message_assist_call_thoughts; + }); - test_parser_with_streaming(expected, - "[THINK]I need to get the weather in New York City[/THINK]" - R"([TOOL_CALLS]get_weather[ARGS]{"location": "New York City, NY"})", - make_peg_parser(tmpls.get(), inputs) - ); - } - { - // Test basic tool call with parallel_tool_calls = true - common_chat_msg expected; - expected.role = "assistant"; - expected.reasoning_content = "I need to get the weather in New York City and Los Angeles"; - expected.tool_calls = {{ - /* .name = */ "get_weather", - /* .arguments = */ R"({"location": "New York City, NY"})", + // Test parallel tool calls + test_peg_parser(tmpls.get(), [&](peg_test_case & t) { + t.input = R"([TOOL_CALLS]special_function[ARGS]{"arg1": 1})" + R"([TOOL_CALLS]special_function_with_opt[ARGS]{"arg1": 1, "arg2": 2})"; + t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + t.params.parallel_tool_calls = true; + t.params.tools = {special_function_tool, special_function_tool_with_optional_param}; + + t.expect.tool_calls = {{ + /* .name = */ "special_function", + /* .arguments = */ R"({"arg1": 1})", /* .id = */ {}, }, { - /* .name = */ "get_weather", - /* .arguments = */ R"({"location": "Los Angeles, CA"})", + /* .name = */ "special_function_with_opt", + /* .arguments = */ R"({"arg1": 1, "arg2": 2})", /* .id = */ {}, }}; + }); - common_chat_templates_inputs inputs; - inputs.messages = {msg}; - inputs.reasoning_format = COMMON_REASONING_FORMAT_AUTO; - inputs.parallel_tool_calls = true; - inputs.tools = {{ - /* .name = */ "get_weather", - /* .description = */ "get the weather", - /* .parameters = */ R"({ - "type": "object", - "properties": { - "location": {"type": "string"} - } - })" - }}; - - test_parser_with_streaming(expected, - "[THINK]I need to get the weather in New York City and Los Angeles[/THINK]" - R"([TOOL_CALLS]get_weather[ARGS]{"location": "New York City, NY"})" - R"([TOOL_CALLS]get_weather[ARGS]{"location": "Los Angeles, CA"})", - make_peg_parser(tmpls.get(), inputs) - ); - } - { - // Test response format - common_chat_msg expected; - expected.role = "assistant"; - expected.reasoning_content = "I need to output the invoice details in JSON"; - expected.content = R"({"amount": 123.45, "date": "2025-12-03"})"; - - common_chat_templates_inputs inputs; - inputs.messages = {msg}; - inputs.reasoning_format = COMMON_REASONING_FORMAT_AUTO; - inputs.json_schema = R"({ - "type": "object", - "properties": { - "amount": {"type": "number"}, - "date": {"type": "string"} - } - })"; - - test_parser_with_streaming(expected, - "[THINK]I need to output the invoice details in JSON[/THINK]" - "```json\n" - R"({"amount": 123.45, "date": "2025-12-03"})" - "\n```", - make_peg_parser(tmpls.get(), inputs) - ); - } + // Test response format + test_peg_parser(tmpls.get(), [&](peg_test_case & t) { + t.input = "[THINK]I need to output the invoice details in JSON[/THINK]" + "```json\n" + R"({"amount": 123.45, "date": "2025-12-03"})" + "\n```"; + t.params.reasoning_format = COMMON_REASONING_FORMAT_AUTO; + t.params.json_schema = invoice_schema; + + t.expect.reasoning_content = "I need to output the invoice details in JSON"; + t.expect.content =R"({"amount": 123.45, "date": "2025-12-03"})"; + }); } } @@ -3600,6 +3561,7 @@ int main(int argc, char ** argv) { test_msgs_oaicompat_json_conversion(); test_tools_oaicompat_json_conversion(); test_template_output_parsers(); + test_template_output_peg_parsers(); std::cout << "\n[chat] All tests passed!" << '\n'; } return 0; From 86cb4345f1d6717949d26ce70bdad4a51158cc98 Mon Sep 17 00:00:00 2001 From: Alde Rojas Date: Sat, 6 Dec 2025 18:14:37 -0600 Subject: [PATCH 5/5] remove extraneous change --- tests/test-chat.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index ca29ab9d4d..9cad67fe2c 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -150,7 +150,6 @@ static std::string renormalize_json(const std::string & json_str) { return json_str; } } - static void assert_msg_equals(const common_chat_msg & expected, const common_chat_msg & actual, bool ignore_whitespace_differences = false) { assert_equals(expected.role, actual.role); if (ignore_whitespace_differences) {