Skip to content

Commit c93e77b

Browse files
authored
feat: update default (#4076)
Changes: - Default model and docs now use gpt-5-codex. - Disables the GPT-5 Codex NUX by default. - Keeps presets available for API key users.
1 parent c415827 commit c93e77b

File tree

14 files changed

+136
-52
lines changed

14 files changed

+136
-52
lines changed

codex-rs/common/src/model_presets.rs

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
use codex_core::config::GPT_5_CODEX_MEDIUM_MODEL;
21
use codex_core::protocol_config_types::ReasoningEffort;
32
use codex_protocol::mcp_protocol::AuthMode;
43

@@ -69,13 +68,6 @@ const PRESETS: &[ModelPreset] = &[
6968
},
7069
];
7170

72-
pub fn builtin_model_presets(auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
73-
match auth_mode {
74-
Some(AuthMode::ApiKey) => PRESETS
75-
.iter()
76-
.copied()
77-
.filter(|p| p.model != GPT_5_CODEX_MEDIUM_MODEL)
78-
.collect(),
79-
_ => PRESETS.to_vec(),
80-
}
71+
pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
72+
PRESETS.to_vec()
8173
}

codex-rs/core/src/config.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ use toml_edit::DocumentMut;
3737
use toml_edit::Item as TomlItem;
3838
use toml_edit::Table as TomlTable;
3939

40-
const OPENAI_DEFAULT_MODEL: &str = "gpt-5";
40+
const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex";
4141
const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex";
4242
pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex";
4343

@@ -54,7 +54,7 @@ pub struct Config {
5454
/// Optional override of model selection.
5555
pub model: String,
5656

57-
/// Model used specifically for review sessions. Defaults to "gpt-5".
57+
/// Model used specifically for review sessions. Defaults to "gpt-5-codex".
5858
pub review_model: String,
5959

6060
pub model_family: ModelFamily,
@@ -1366,7 +1366,7 @@ startup_timeout_ms = 2500
13661366
tokio::fs::write(
13671367
&config_path,
13681368
r#"
1369-
model = "gpt-5"
1369+
model = "gpt-5-codex"
13701370
model_reasoning_effort = "medium"
13711371
13721372
[profiles.dev]
@@ -1441,7 +1441,7 @@ model = "gpt-4"
14411441
model_reasoning_effort = "medium"
14421442
14431443
[profiles.prod]
1444-
model = "gpt-5"
1444+
model = "gpt-5-codex"
14451445
"#,
14461446
)
14471447
.await?;
@@ -1472,7 +1472,7 @@ model = "gpt-5"
14721472
.profiles
14731473
.get("prod")
14741474
.and_then(|profile| profile.model.as_deref()),
1475-
Some("gpt-5"),
1475+
Some("gpt-5-codex"),
14761476
);
14771477

14781478
Ok(())

codex-rs/core/src/config_edit.rs

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -228,15 +228,15 @@ mod tests {
228228
codex_home,
229229
None,
230230
&[
231-
(&[CONFIG_KEY_MODEL], "gpt-5"),
231+
(&[CONFIG_KEY_MODEL], "gpt-5-codex"),
232232
(&[CONFIG_KEY_EFFORT], "high"),
233233
],
234234
)
235235
.await
236236
.expect("persist");
237237

238238
let contents = read_config(codex_home).await;
239-
let expected = r#"model = "gpt-5"
239+
let expected = r#"model = "gpt-5-codex"
240240
model_reasoning_effort = "high"
241241
"#;
242242
assert_eq!(contents, expected);
@@ -348,7 +348,7 @@ model_reasoning_effort = "high"
348348
&[
349349
(&["a", "b", "c"], "v"),
350350
(&["x"], "y"),
351-
(&["profiles", "p1", CONFIG_KEY_MODEL], "gpt-5"),
351+
(&["profiles", "p1", CONFIG_KEY_MODEL], "gpt-5-codex"),
352352
],
353353
)
354354
.await
@@ -361,7 +361,7 @@ model_reasoning_effort = "high"
361361
c = "v"
362362
363363
[profiles.p1]
364-
model = "gpt-5"
364+
model = "gpt-5-codex"
365365
"#;
366366
assert_eq!(contents, expected);
367367
}
@@ -454,7 +454,7 @@ existing = "keep"
454454
codex_home,
455455
None,
456456
&[
457-
(&[CONFIG_KEY_MODEL], "gpt-5"),
457+
(&[CONFIG_KEY_MODEL], "gpt-5-codex"),
458458
(&[CONFIG_KEY_EFFORT], "minimal"),
459459
],
460460
)
@@ -466,7 +466,7 @@ existing = "keep"
466466
# should be preserved
467467
468468
existing = "keep"
469-
model = "gpt-5"
469+
model = "gpt-5-codex"
470470
model_reasoning_effort = "minimal"
471471
"#;
472472
assert_eq!(contents, expected);
@@ -524,7 +524,7 @@ model = "o3"
524524
let codex_home = tmpdir.path();
525525

526526
// Seed with a model value only
527-
let seed = "model = \"gpt-5\"\n";
527+
let seed = "model = \"gpt-5-codex\"\n";
528528
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
529529
.await
530530
.expect("seed write");
@@ -535,7 +535,7 @@ model = "o3"
535535
.expect("persist");
536536

537537
let contents = read_config(codex_home).await;
538-
let expected = r#"model = "gpt-5"
538+
let expected = r#"model = "gpt-5-codex"
539539
model_reasoning_effort = "high"
540540
"#;
541541
assert_eq!(contents, expected);
@@ -579,7 +579,7 @@ model = "o4-mini"
579579

580580
// No active profile key; we'll target an explicit override
581581
let seed = r#"[profiles.team]
582-
model = "gpt-5"
582+
model = "gpt-5-codex"
583583
"#;
584584
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)
585585
.await
@@ -595,7 +595,7 @@ model = "gpt-5"
595595

596596
let contents = read_config(codex_home).await;
597597
let expected = r#"[profiles.team]
598-
model = "gpt-5"
598+
model = "gpt-5-codex"
599599
model_reasoning_effort = "minimal"
600600
"#;
601601
assert_eq!(contents, expected);
@@ -611,15 +611,15 @@ model_reasoning_effort = "minimal"
611611
codex_home,
612612
None,
613613
&[
614-
(&[CONFIG_KEY_MODEL], Some("gpt-5")),
614+
(&[CONFIG_KEY_MODEL], Some("gpt-5-codex")),
615615
(&[CONFIG_KEY_EFFORT], None),
616616
],
617617
)
618618
.await
619619
.expect("persist");
620620

621621
let contents = read_config(codex_home).await;
622-
let expected = "model = \"gpt-5\"\n";
622+
let expected = "model = \"gpt-5-codex\"\n";
623623
assert_eq!(contents, expected);
624624
}
625625

@@ -670,7 +670,7 @@ model = "o3"
670670
let tmpdir = tempdir().expect("tmp");
671671
let codex_home = tmpdir.path();
672672

673-
let seed = r#"model = "gpt-5"
673+
let seed = r#"model = "gpt-5-codex"
674674
model_reasoning_effort = "medium"
675675
"#;
676676
tokio::fs::write(codex_home.join(CONFIG_TOML_FILE), seed)

codex-rs/core/src/internal_storage.rs

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,27 @@ use std::path::PathBuf;
77

88
pub(crate) const INTERNAL_STORAGE_FILE: &str = "internal_storage.json";
99

10-
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
10+
#[derive(Debug, Clone, Serialize, Deserialize)]
1111
pub struct InternalStorage {
1212
#[serde(skip)]
1313
storage_path: PathBuf,
14-
#[serde(default)]
14+
#[serde(default = "default_gpt_5_codex_model_prompt_seen")]
1515
pub gpt_5_codex_model_prompt_seen: bool,
1616
}
1717

18+
const fn default_gpt_5_codex_model_prompt_seen() -> bool {
19+
true
20+
}
21+
22+
impl Default for InternalStorage {
23+
fn default() -> Self {
24+
Self {
25+
storage_path: PathBuf::new(),
26+
gpt_5_codex_model_prompt_seen: default_gpt_5_codex_model_prompt_seen(),
27+
}
28+
}
29+
}
30+
1831
// TODO(jif) generalise all the file writers and build proper async channel inserters.
1932
impl InternalStorage {
2033
pub fn load(codex_home: &Path) -> Self {

codex-rs/core/tests/suite/client.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -822,7 +822,7 @@ async fn token_count_includes_rate_limits_snapshot() {
822822
"reasoning_output_tokens": 0,
823823
"total_tokens": 123
824824
},
825-
// Default model is gpt-5 in tests → 272000 context window
825+
// Default model is gpt-5-codex in tests → 272000 context window
826826
"model_context_window": 272000
827827
},
828828
"rate_limits": {

codex-rs/core/tests/suite/compact_resume_fork.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
133133
.to_string();
134134
let user_turn_1 = json!(
135135
{
136-
"model": "gpt-5",
136+
"model": "gpt-5-codex",
137137
"instructions": prompt,
138138
"input": [
139139
{
@@ -182,7 +182,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
182182
});
183183
let compact_1 = json!(
184184
{
185-
"model": "gpt-5",
185+
"model": "gpt-5-codex",
186186
"instructions": "You have exceeded the maximum number of tokens, please stop coding and instead write a short memento message for the next agent. Your note should:
187187
- Summarize what you finished and what still needs work. If there was a recent update_plan call, repeat its steps verbatim.
188188
- List outstanding TODOs with file paths / line numbers so they're easy to find.
@@ -255,7 +255,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() {
255255
});
256256
let user_turn_2_after_compact = json!(
257257
{
258-
"model": "gpt-5",
258+
"model": "gpt-5-codex",
259259
"instructions": prompt,
260260
"input": [
261261
{
@@ -320,7 +320,7 @@ SUMMARY_ONLY_CONTEXT"
320320
});
321321
let usert_turn_3_after_resume = json!(
322322
{
323-
"model": "gpt-5",
323+
"model": "gpt-5-codex",
324324
"instructions": prompt,
325325
"input": [
326326
{
@@ -405,7 +405,7 @@ SUMMARY_ONLY_CONTEXT"
405405
});
406406
let user_turn_3_after_fork = json!(
407407
{
408-
"model": "gpt-5",
408+
"model": "gpt-5-codex",
409409
"instructions": prompt,
410410
"input": [
411411
{

codex-rs/core/tests/suite/prompt_caching.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,7 @@ async fn prompt_tools_are_consistent_across_requests() {
184184

185185
let conversation_manager =
186186
ConversationManager::with_auth(CodexAuth::from_api_key("Test API Key"));
187+
let expected_instructions = config.model_family.base_instructions.clone();
187188
let codex = conversation_manager
188189
.new_conversation(config)
189190
.await
@@ -213,7 +214,6 @@ async fn prompt_tools_are_consistent_across_requests() {
213214
let requests = server.received_requests().await.unwrap();
214215
assert_eq!(requests.len(), 2, "expected two POST requests");
215216

216-
let expected_instructions: &str = include_str!("../../prompt.md");
217217
// our internal implementation is responsible for keeping tools in sync
218218
// with the OpenAI schema, so we just verify the tool presence here
219219
let expected_tools_names: &[&str] = &["shell", "update_plan", "apply_patch", "view_image"];

codex-rs/docs/codex_mcp_interface.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ Start a new session with optional overrides:
5151

5252
Request `newConversation` params (subset):
5353

54-
- `model`: string model id (e.g. "o3", "gpt-5")
54+
- `model`: string model id (e.g. "o3", "gpt-5", "gpt-5-codex")
5555
- `profile`: optional named profile
5656
- `cwd`: optional working directory
5757
- `approvalPolicy`: `untrusted` | `on-request` | `on-failure` | `never`
@@ -120,4 +120,3 @@ While processing, the server emits `codex/event` notifications containing agent
120120
## Compatibility and stability
121121

122122
This interface is experimental. Method names, fields, and event shapes may evolve. For the authoritative schema, consult `protocol/src/mcp_protocol.rs` and the corresponding server wiring in `mcp-server/`.
123-

codex-rs/mcp-server/tests/suite/config.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
2626
std::fs::write(
2727
config_toml,
2828
r#"
29-
model = "gpt-5"
29+
model = "gpt-5-codex"
3030
approval_policy = "on-request"
3131
sandbox_mode = "workspace-write"
3232
model_reasoning_summary = "detailed"
@@ -92,7 +92,7 @@ async fn get_config_toml_parses_all_fields() {
9292
exclude_tmpdir_env_var: Some(true),
9393
exclude_slash_tmp: Some(true),
9494
}),
95-
model: Some("gpt-5".into()),
95+
model: Some("gpt-5-codex".into()),
9696
model_reasoning_effort: Some(ReasoningEffort::High),
9797
model_reasoning_summary: Some(ReasoningSummary::Detailed),
9898
model_verbosity: Some(Verbosity::Medium),

codex-rs/mcp-server/tests/suite/set_default_model.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ fn create_config_toml(codex_home: &Path) -> std::io::Result<()> {
6969
std::fs::write(
7070
config_toml,
7171
r#"
72-
model = "gpt-5"
72+
model = "gpt-5-codex"
7373
model_reasoning_effort = "medium"
7474
"#,
7575
)

0 commit comments

Comments
 (0)