@@ -282,6 +282,7 @@ def create_and_run(
282282 None ,
283283 ]
284284 | NotGiven = NOT_GIVEN ,
285+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
285286 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
286287 stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
287288 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -331,6 +332,10 @@ def create_and_run(
331332 model associated with the assistant. If not, the model associated with the
332333 assistant will be used.
333334
335+ parallel_tool_calls: Whether to enable
336+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
337+ during tool use.
338+
334339 response_format: Specifies the format that the model must output. Compatible with
335340 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
336341 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -429,6 +434,7 @@ def create_and_run(
429434 None ,
430435 ]
431436 | NotGiven = NOT_GIVEN ,
437+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
432438 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
433439 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
434440 thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -481,6 +487,10 @@ def create_and_run(
481487 model associated with the assistant. If not, the model associated with the
482488 assistant will be used.
483489
490+ parallel_tool_calls: Whether to enable
491+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
492+ during tool use.
493+
484494 response_format: Specifies the format that the model must output. Compatible with
485495 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
486496 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -575,6 +585,7 @@ def create_and_run(
575585 None ,
576586 ]
577587 | NotGiven = NOT_GIVEN ,
588+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
578589 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
579590 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
580591 thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -627,6 +638,10 @@ def create_and_run(
627638 model associated with the assistant. If not, the model associated with the
628639 assistant will be used.
629640
641+ parallel_tool_calls: Whether to enable
642+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
643+ during tool use.
644+
630645 response_format: Specifies the format that the model must output. Compatible with
631646 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
632647 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -720,6 +735,7 @@ def create_and_run(
720735 None ,
721736 ]
722737 | NotGiven = NOT_GIVEN ,
738+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
723739 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
724740 stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
725741 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -747,6 +763,7 @@ def create_and_run(
747763 "max_prompt_tokens" : max_prompt_tokens ,
748764 "metadata" : metadata ,
749765 "model" : model ,
766+ "parallel_tool_calls" : parallel_tool_calls ,
750767 "response_format" : response_format ,
751768 "stream" : stream ,
752769 "temperature" : temperature ,
@@ -997,6 +1014,7 @@ async def create_and_run(
9971014 None ,
9981015 ]
9991016 | NotGiven = NOT_GIVEN ,
1017+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
10001018 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
10011019 stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
10021020 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1046,6 +1064,10 @@ async def create_and_run(
10461064 model associated with the assistant. If not, the model associated with the
10471065 assistant will be used.
10481066
1067+ parallel_tool_calls: Whether to enable
1068+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1069+ during tool use.
1070+
10491071 response_format: Specifies the format that the model must output. Compatible with
10501072 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
10511073 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1144,6 +1166,7 @@ async def create_and_run(
11441166 None ,
11451167 ]
11461168 | NotGiven = NOT_GIVEN ,
1169+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
11471170 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
11481171 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
11491172 thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -1196,6 +1219,10 @@ async def create_and_run(
11961219 model associated with the assistant. If not, the model associated with the
11971220 assistant will be used.
11981221
1222+ parallel_tool_calls: Whether to enable
1223+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1224+ during tool use.
1225+
11991226 response_format: Specifies the format that the model must output. Compatible with
12001227 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
12011228 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1290,6 +1317,7 @@ async def create_and_run(
12901317 None ,
12911318 ]
12921319 | NotGiven = NOT_GIVEN ,
1320+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
12931321 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
12941322 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
12951323 thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -1342,6 +1370,10 @@ async def create_and_run(
13421370 model associated with the assistant. If not, the model associated with the
13431371 assistant will be used.
13441372
1373+ parallel_tool_calls: Whether to enable
1374+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1375+ during tool use.
1376+
13451377 response_format: Specifies the format that the model must output. Compatible with
13461378 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
13471379 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1435,6 +1467,7 @@ async def create_and_run(
14351467 None ,
14361468 ]
14371469 | NotGiven = NOT_GIVEN ,
1470+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
14381471 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
14391472 stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
14401473 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1462,6 +1495,7 @@ async def create_and_run(
14621495 "max_prompt_tokens" : max_prompt_tokens ,
14631496 "metadata" : metadata ,
14641497 "model" : model ,
1498+ "parallel_tool_calls" : parallel_tool_calls ,
14651499 "response_format" : response_format ,
14661500 "stream" : stream ,
14671501 "temperature" : temperature ,
0 commit comments