@@ -291,6 +291,7 @@ def create_and_run(
291291 None ,
292292 ]
293293 | NotGiven = NOT_GIVEN ,
294+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
294295 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
295296 stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
296297 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -340,6 +341,10 @@ def create_and_run(
340341 model associated with the assistant. If not, the model associated with the
341342 assistant will be used.
342343
344+ parallel_tool_calls: Whether to enable
345+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
346+ during tool use.
347+
343348 response_format: Specifies the format that the model must output. Compatible with
344349 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
345350 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -438,6 +443,7 @@ def create_and_run(
438443 None ,
439444 ]
440445 | NotGiven = NOT_GIVEN ,
446+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
441447 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
442448 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
443449 thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -490,6 +496,10 @@ def create_and_run(
490496 model associated with the assistant. If not, the model associated with the
491497 assistant will be used.
492498
499+ parallel_tool_calls: Whether to enable
500+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
501+ during tool use.
502+
493503 response_format: Specifies the format that the model must output. Compatible with
494504 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
495505 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -584,6 +594,7 @@ def create_and_run(
584594 None ,
585595 ]
586596 | NotGiven = NOT_GIVEN ,
597+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
587598 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
588599 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
589600 thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -636,6 +647,10 @@ def create_and_run(
636647 model associated with the assistant. If not, the model associated with the
637648 assistant will be used.
638649
650+ parallel_tool_calls: Whether to enable
651+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
652+ during tool use.
653+
639654 response_format: Specifies the format that the model must output. Compatible with
640655 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
641656 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -729,6 +744,7 @@ def create_and_run(
729744 None ,
730745 ]
731746 | NotGiven = NOT_GIVEN ,
747+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
732748 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
733749 stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
734750 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -756,6 +772,7 @@ def create_and_run(
756772 "max_prompt_tokens" : max_prompt_tokens ,
757773 "metadata" : metadata ,
758774 "model" : model ,
775+ "parallel_tool_calls" : parallel_tool_calls ,
759776 "response_format" : response_format ,
760777 "stream" : stream ,
761778 "temperature" : temperature ,
@@ -1284,6 +1301,7 @@ async def create_and_run(
12841301 None ,
12851302 ]
12861303 | NotGiven = NOT_GIVEN ,
1304+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
12871305 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
12881306 stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
12891307 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1333,6 +1351,10 @@ async def create_and_run(
13331351 model associated with the assistant. If not, the model associated with the
13341352 assistant will be used.
13351353
1354+ parallel_tool_calls: Whether to enable
1355+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1356+ during tool use.
1357+
13361358 response_format: Specifies the format that the model must output. Compatible with
13371359 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
13381360 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1431,6 +1453,7 @@ async def create_and_run(
14311453 None ,
14321454 ]
14331455 | NotGiven = NOT_GIVEN ,
1456+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
14341457 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
14351458 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
14361459 thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -1483,6 +1506,10 @@ async def create_and_run(
14831506 model associated with the assistant. If not, the model associated with the
14841507 assistant will be used.
14851508
1509+ parallel_tool_calls: Whether to enable
1510+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1511+ during tool use.
1512+
14861513 response_format: Specifies the format that the model must output. Compatible with
14871514 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
14881515 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1577,6 +1604,7 @@ async def create_and_run(
15771604 None ,
15781605 ]
15791606 | NotGiven = NOT_GIVEN ,
1607+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
15801608 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
15811609 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
15821610 thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -1629,6 +1657,10 @@ async def create_and_run(
16291657 model associated with the assistant. If not, the model associated with the
16301658 assistant will be used.
16311659
1660+ parallel_tool_calls: Whether to enable
1661+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1662+ during tool use.
1663+
16321664 response_format: Specifies the format that the model must output. Compatible with
16331665 [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
16341666 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1722,6 +1754,7 @@ async def create_and_run(
17221754 None ,
17231755 ]
17241756 | NotGiven = NOT_GIVEN ,
1757+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
17251758 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
17261759 stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
17271760 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1749,6 +1782,7 @@ async def create_and_run(
17491782 "max_prompt_tokens" : max_prompt_tokens ,
17501783 "metadata" : metadata ,
17511784 "model" : model ,
1785+ "parallel_tool_calls" : parallel_tool_calls ,
17521786 "response_format" : response_format ,
17531787 "stream" : stream ,
17541788 "temperature" : temperature ,
0 commit comments