@@ -340,7 +340,7 @@ def create_and_run(
340340
341341 response_format: Specifies the format that the model must output. Compatible with
342342 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
343- all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
343+ all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
344344
345345 Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
346346 message the model generates is valid JSON.
@@ -366,7 +366,7 @@ def create_and_run(
366366 tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
367367 not call any tools and instead generates a message. `auto` is the default value
368368 and means the model can pick between generating a message or calling a tool.
369- Specifying a particular tool like `{"type": "TOOL_TYPE "}` or
369+ Specifying a particular tool like `{"type": "file_search "}` or
370370 `{"type": "function", "function": {"name": "my_function"}}` forces the model to
371371 call that tool.
372372
@@ -382,6 +382,11 @@ def create_and_run(
382382 model considers the results of the tokens with top_p probability mass. So 0.1
383383 means only the tokens comprising the top 10% probability mass are considered.
384384
385+ We generally recommend altering this or temperature but not both.
386+
387+ truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
388+ control the intial context window of the run.
389+
385390 extra_headers: Send extra headers
386391
387392 extra_query: Add additional query parameters to the request
@@ -481,7 +486,7 @@ def create_and_run(
481486
482487 response_format: Specifies the format that the model must output. Compatible with
483488 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
484- all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
489+ all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
485490
486491 Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
487492 message the model generates is valid JSON.
@@ -503,7 +508,7 @@ def create_and_run(
503508 tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
504509 not call any tools and instead generates a message. `auto` is the default value
505510 and means the model can pick between generating a message or calling a tool.
506- Specifying a particular tool like `{"type": "TOOL_TYPE "}` or
511+ Specifying a particular tool like `{"type": "file_search "}` or
507512 `{"type": "function", "function": {"name": "my_function"}}` forces the model to
508513 call that tool.
509514
@@ -519,6 +524,11 @@ def create_and_run(
519524 model considers the results of the tokens with top_p probability mass. So 0.1
520525 means only the tokens comprising the top 10% probability mass are considered.
521526
527+ We generally recommend altering this or temperature but not both.
528+
529+ truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
530+ control the intial context window of the run.
531+
522532 extra_headers: Send extra headers
523533
524534 extra_query: Add additional query parameters to the request
@@ -618,7 +628,7 @@ def create_and_run(
618628
619629 response_format: Specifies the format that the model must output. Compatible with
620630 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
621- all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
631+ all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
622632
623633 Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
624634 message the model generates is valid JSON.
@@ -640,7 +650,7 @@ def create_and_run(
640650 tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
641651 not call any tools and instead generates a message. `auto` is the default value
642652 and means the model can pick between generating a message or calling a tool.
643- Specifying a particular tool like `{"type": "TOOL_TYPE "}` or
653+ Specifying a particular tool like `{"type": "file_search "}` or
644654 `{"type": "function", "function": {"name": "my_function"}}` forces the model to
645655 call that tool.
646656
@@ -656,6 +666,11 @@ def create_and_run(
656666 model considers the results of the tokens with top_p probability mass. So 0.1
657667 means only the tokens comprising the top 10% probability mass are considered.
658668
669+ We generally recommend altering this or temperature but not both.
670+
671+ truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
672+ control the intial context window of the run.
673+
659674 extra_headers: Send extra headers
660675
661676 extra_query: Add additional query parameters to the request
@@ -1296,7 +1311,7 @@ async def create_and_run(
12961311
12971312 response_format: Specifies the format that the model must output. Compatible with
12981313 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1299- all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
1314+ all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
13001315
13011316 Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
13021317 message the model generates is valid JSON.
@@ -1322,7 +1337,7 @@ async def create_and_run(
13221337 tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
13231338 not call any tools and instead generates a message. `auto` is the default value
13241339 and means the model can pick between generating a message or calling a tool.
1325- Specifying a particular tool like `{"type": "TOOL_TYPE "}` or
1340+ Specifying a particular tool like `{"type": "file_search "}` or
13261341 `{"type": "function", "function": {"name": "my_function"}}` forces the model to
13271342 call that tool.
13281343
@@ -1338,6 +1353,11 @@ async def create_and_run(
13381353 model considers the results of the tokens with top_p probability mass. So 0.1
13391354 means only the tokens comprising the top 10% probability mass are considered.
13401355
1356+ We generally recommend altering this or temperature but not both.
1357+
1358+ truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
1359+ control the intial context window of the run.
1360+
13411361 extra_headers: Send extra headers
13421362
13431363 extra_query: Add additional query parameters to the request
@@ -1437,7 +1457,7 @@ async def create_and_run(
14371457
14381458 response_format: Specifies the format that the model must output. Compatible with
14391459 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1440- all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
1460+ all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
14411461
14421462 Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
14431463 message the model generates is valid JSON.
@@ -1459,7 +1479,7 @@ async def create_and_run(
14591479 tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
14601480 not call any tools and instead generates a message. `auto` is the default value
14611481 and means the model can pick between generating a message or calling a tool.
1462- Specifying a particular tool like `{"type": "TOOL_TYPE "}` or
1482+ Specifying a particular tool like `{"type": "file_search "}` or
14631483 `{"type": "function", "function": {"name": "my_function"}}` forces the model to
14641484 call that tool.
14651485
@@ -1475,6 +1495,11 @@ async def create_and_run(
14751495 model considers the results of the tokens with top_p probability mass. So 0.1
14761496 means only the tokens comprising the top 10% probability mass are considered.
14771497
1498+ We generally recommend altering this or temperature but not both.
1499+
1500+ truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
1501+ control the intial context window of the run.
1502+
14781503 extra_headers: Send extra headers
14791504
14801505 extra_query: Add additional query parameters to the request
@@ -1574,7 +1599,7 @@ async def create_and_run(
15741599
15751600 response_format: Specifies the format that the model must output. Compatible with
15761601 [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1577- all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
1602+ all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
15781603
15791604 Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
15801605 message the model generates is valid JSON.
@@ -1596,7 +1621,7 @@ async def create_and_run(
15961621 tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
15971622 not call any tools and instead generates a message. `auto` is the default value
15981623 and means the model can pick between generating a message or calling a tool.
1599- Specifying a particular tool like `{"type": "TOOL_TYPE "}` or
1624+ Specifying a particular tool like `{"type": "file_search "}` or
16001625 `{"type": "function", "function": {"name": "my_function"}}` forces the model to
16011626 call that tool.
16021627
@@ -1612,6 +1637,11 @@ async def create_and_run(
16121637 model considers the results of the tokens with top_p probability mass. So 0.1
16131638 means only the tokens comprising the top 10% probability mass are considered.
16141639
1640+ We generally recommend altering this or temperature but not both.
1641+
1642+ truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
1643+ control the intial context window of the run.
1644+
16151645 extra_headers: Send extra headers
16161646
16171647 extra_query: Add additional query parameters to the request
0 commit comments