@@ -96,6 +96,7 @@ def create(
9696 metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
9797 model : Union [str , ChatModel , None ] | NotGiven = NOT_GIVEN ,
9898 parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
99+ reasoning_effort : Optional [Literal ["low" , "medium" , "high" ]] | NotGiven = NOT_GIVEN ,
99100 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
100101 stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
101102 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -164,6 +165,13 @@ def create(
164165 [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
165166 during tool use.
166167
168+ reasoning_effort: **o1 and o3-mini models only**
169+
170+ Constrains effort on reasoning for
171+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
172+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
173+ result in faster responses and fewer tokens used on reasoning in a response.
174+
167175 response_format: Specifies the format that the model must output. Compatible with
168176 [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
169177 [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -239,6 +247,7 @@ def create(
239247 metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
240248 model : Union [str , ChatModel , None ] | NotGiven = NOT_GIVEN ,
241249 parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
250+ reasoning_effort : Optional [Literal ["low" , "medium" , "high" ]] | NotGiven = NOT_GIVEN ,
242251 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
243252 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
244253 tool_choice : Optional [AssistantToolChoiceOptionParam ] | NotGiven = NOT_GIVEN ,
@@ -310,6 +319,13 @@ def create(
310319 [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
311320 during tool use.
312321
322+ reasoning_effort: **o1 and o3-mini models only**
323+
324+ Constrains effort on reasoning for
325+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
326+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
327+ result in faster responses and fewer tokens used on reasoning in a response.
328+
313329 response_format: Specifies the format that the model must output. Compatible with
314330 [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
315331 [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -381,6 +397,7 @@ def create(
381397 metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
382398 model : Union [str , ChatModel , None ] | NotGiven = NOT_GIVEN ,
383399 parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
400+ reasoning_effort : Optional [Literal ["low" , "medium" , "high" ]] | NotGiven = NOT_GIVEN ,
384401 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
385402 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
386403 tool_choice : Optional [AssistantToolChoiceOptionParam ] | NotGiven = NOT_GIVEN ,
@@ -452,6 +469,13 @@ def create(
452469 [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
453470 during tool use.
454471
472+ reasoning_effort: **o1 and o3-mini models only**
473+
474+ Constrains effort on reasoning for
475+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
476+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
477+ result in faster responses and fewer tokens used on reasoning in a response.
478+
455479 response_format: Specifies the format that the model must output. Compatible with
456480 [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
457481 [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -522,6 +546,7 @@ def create(
522546 metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
523547 model : Union [str , ChatModel , None ] | NotGiven = NOT_GIVEN ,
524548 parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
549+ reasoning_effort : Optional [Literal ["low" , "medium" , "high" ]] | NotGiven = NOT_GIVEN ,
525550 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
526551 stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
527552 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -552,6 +577,7 @@ def create(
552577 "metadata" : metadata ,
553578 "model" : model ,
554579 "parallel_tool_calls" : parallel_tool_calls ,
580+ "reasoning_effort" : reasoning_effort ,
555581 "response_format" : response_format ,
556582 "stream" : stream ,
557583 "temperature" : temperature ,
@@ -1469,6 +1495,7 @@ async def create(
14691495 metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
14701496 model : Union [str , ChatModel , None ] | NotGiven = NOT_GIVEN ,
14711497 parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1498+ reasoning_effort : Optional [Literal ["low" , "medium" , "high" ]] | NotGiven = NOT_GIVEN ,
14721499 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
14731500 stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
14741501 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1537,6 +1564,13 @@ async def create(
15371564 [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
15381565 during tool use.
15391566
1567+ reasoning_effort: **o1 and o3-mini models only**
1568+
1569+ Constrains effort on reasoning for
1570+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1571+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
1572+ result in faster responses and fewer tokens used on reasoning in a response.
1573+
15401574 response_format: Specifies the format that the model must output. Compatible with
15411575 [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
15421576 [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -1612,6 +1646,7 @@ async def create(
16121646 metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
16131647 model : Union [str , ChatModel , None ] | NotGiven = NOT_GIVEN ,
16141648 parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1649+ reasoning_effort : Optional [Literal ["low" , "medium" , "high" ]] | NotGiven = NOT_GIVEN ,
16151650 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
16161651 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
16171652 tool_choice : Optional [AssistantToolChoiceOptionParam ] | NotGiven = NOT_GIVEN ,
@@ -1683,6 +1718,13 @@ async def create(
16831718 [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
16841719 during tool use.
16851720
1721+ reasoning_effort: **o1 and o3-mini models only**
1722+
1723+ Constrains effort on reasoning for
1724+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1725+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
1726+ result in faster responses and fewer tokens used on reasoning in a response.
1727+
16861728 response_format: Specifies the format that the model must output. Compatible with
16871729 [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
16881730 [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -1754,6 +1796,7 @@ async def create(
17541796 metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
17551797 model : Union [str , ChatModel , None ] | NotGiven = NOT_GIVEN ,
17561798 parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1799+ reasoning_effort : Optional [Literal ["low" , "medium" , "high" ]] | NotGiven = NOT_GIVEN ,
17571800 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
17581801 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
17591802 tool_choice : Optional [AssistantToolChoiceOptionParam ] | NotGiven = NOT_GIVEN ,
@@ -1825,6 +1868,13 @@ async def create(
18251868 [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
18261869 during tool use.
18271870
1871+ reasoning_effort: **o1 and o3-mini models only**
1872+
1873+ Constrains effort on reasoning for
1874+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1875+ supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
1876+ result in faster responses and fewer tokens used on reasoning in a response.
1877+
18281878 response_format: Specifies the format that the model must output. Compatible with
18291879 [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
18301880 [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -1895,6 +1945,7 @@ async def create(
18951945 metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
18961946 model : Union [str , ChatModel , None ] | NotGiven = NOT_GIVEN ,
18971947 parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1948+ reasoning_effort : Optional [Literal ["low" , "medium" , "high" ]] | NotGiven = NOT_GIVEN ,
18981949 response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
18991950 stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
19001951 temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1925,6 +1976,7 @@ async def create(
19251976 "metadata" : metadata ,
19261977 "model" : model ,
19271978 "parallel_tool_calls" : parallel_tool_calls ,
1979+ "reasoning_effort" : reasoning_effort ,
19281980 "response_format" : response_format ,
19291981 "stream" : stream ,
19301982 "temperature" : temperature ,
0 commit comments