Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions crates/goose-cli/src/commands/configure.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ use goose::config::{
use goose::model::ModelConfig;
use goose::posthog::{get_telemetry_choice, TELEMETRY_ENABLED_KEY};
use goose::providers::base::ConfigKey;
use goose::providers::chatgpt_codex::reasoning_levels_for_model;
use goose::providers::formats::anthropic::supports_adaptive_thinking;
use goose::providers::provider_test::test_provider_configuration;
use goose::providers::{create, providers, retry_operation, RetryConfig};
Expand Down Expand Up @@ -809,6 +810,26 @@ pub async fn configure_provider_dialog() -> anyhow::Result<bool> {
}
}

if provider_name == "chatgpt_codex" {
let valid_levels = reasoning_levels_for_model(&model);
if !valid_levels.is_empty() {
let mut select = cliclack::select("Select reasoning effort level:");
for &level in valid_levels {
let description = match level {
"low" => "Low - Fast responses with lighter reasoning",
"medium" => "Medium - Balances speed and reasoning depth for everyday tasks",
"high" => "High - Greater reasoning depth for complex problems",
"xhigh" => "Extra High - Extra high reasoning depth for complex problems",
_ => "",
};
select = select.item(level, description, "");
}
select = select.initial_value("medium");
let effort: &str = select.interact()?;
config.set_chatgpt_codex_reasoning_effort(effort.to_string())?;
}
}

// Test the configuration
let spin = spinner();
spin.start("Checking your configuration...");
Expand Down
1 change: 1 addition & 0 deletions crates/goose/src/config/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1012,6 +1012,7 @@ config_value!(CODEX_COMMAND, String, "codex");
config_value!(CODEX_REASONING_EFFORT, String, "high");
config_value!(CODEX_ENABLE_SKILLS, String, "true");
config_value!(CODEX_SKIP_GIT_CHECK, String, "false");
config_value!(CHATGPT_CODEX_REASONING_EFFORT, String, "medium");

config_value!(GOOSE_SEARCH_PATHS, Vec<String>);
config_value!(GOOSE_MODE, GooseMode);
Expand Down
104 changes: 92 additions & 12 deletions crates/goose/src/providers/chatgpt_codex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,17 +46,56 @@ const HTML_AUTO_CLOSE_TIMEOUT_MS: u64 = 2000;

const CHATGPT_CODEX_PROVIDER_NAME: &str = "chatgpt_codex";
pub const CHATGPT_CODEX_DEFAULT_MODEL: &str = "gpt-5.3-codex";
pub const CHATGPT_CODEX_KNOWN_MODELS: &[&str] = &[
"gpt-5.4",
"gpt-5.3-codex",
"gpt-5.2-codex",
"gpt-5.1-codex",
"gpt-5.1-codex-mini",
"gpt-5.1-codex-max",

#[derive(Debug)]
pub struct ChatGptCodexModelAttrs {
pub name: &'static str,
pub reasoning_levels: &'static [&'static str],
}

pub const CHATGPT_CODEX_KNOWN_MODELS: &[ChatGptCodexModelAttrs] = &[
ChatGptCodexModelAttrs {
name: "gpt-5.4",
reasoning_levels: &["low", "medium", "high", "xhigh"],
},
ChatGptCodexModelAttrs {
name: "gpt-5.3-codex",
reasoning_levels: &["low", "medium", "high", "xhigh"],
},
ChatGptCodexModelAttrs {
name: "gpt-5.2-codex",
reasoning_levels: &["low", "medium", "high", "xhigh"],
},
ChatGptCodexModelAttrs {
name: "gpt-5.1-codex",
reasoning_levels: &["low", "medium", "high", "xhigh"],
},
ChatGptCodexModelAttrs {
name: "gpt-5.1-codex-mini",
reasoning_levels: &["medium", "high"],
},
ChatGptCodexModelAttrs {
name: "gpt-5.1-codex-max",
reasoning_levels: &["low", "medium", "high", "xhigh"],
},
];

const CHATGPT_CODEX_DOC_URL: &str = "https://openai.com/chatgpt";

const DEFAULT_REASONING_LEVELS: &[&str] = &["medium", "high"];

pub fn reasoning_levels_for_model(model_name: &str) -> &'static [&'static str] {
CHATGPT_CODEX_KNOWN_MODELS
.iter()
.find(|m| m.name == model_name)
.map(|m| m.reasoning_levels)
.unwrap_or(DEFAULT_REASONING_LEVELS)
}

fn known_model_names() -> Vec<&'static str> {
CHATGPT_CODEX_KNOWN_MODELS.iter().map(|m| m.name).collect()
}

const GPT_53_CODEX_TOOL_PREAMBLE: &str = "\
You are a coding agent. You have access to tools to accomplish tasks. \
Always use your tools to fulfill requests - do not just describe what you would do. \
Expand Down Expand Up @@ -182,13 +221,34 @@ fn build_input_items(messages: &[Message]) -> Result<Vec<Value>> {
Ok(items)
}

fn get_reasoning_effort(model_name: &str) -> String {
let config = crate::config::Config::global();
let effort = config
.get_chatgpt_codex_reasoning_effort()
.map(String::from)
.unwrap_or_else(|_| "medium".to_string());

let valid_levels = reasoning_levels_for_model(model_name);
if valid_levels.contains(&effort.as_str()) {
effort
} else {
tracing::warn!(
"Invalid CHATGPT_CODEX_REASONING_EFFORT '{}' for model '{}', using 'medium'",
effort,
model_name
);
"medium".to_string()
}
}

fn create_codex_request(
model_config: &ModelConfig,
system: &str,
messages: &[Message],
tools: &[Tool],
) -> Result<Value> {
let input_items = build_input_items(messages)?;
let reasoning_effort = get_reasoning_effort(&model_config.model_name);

let instructions = match model_config.model_name.as_str() {
"gpt-5.3-codex" => format!("{GPT_53_CODEX_TOOL_PREAMBLE}\n\n{system}"),
Expand All @@ -199,6 +259,7 @@ fn create_codex_request(
"model": model_config.model_name,
"input": input_items,
"store": false,
"reasoning": {"effort": reasoning_effort},
"instructions": instructions,
});

Expand Down Expand Up @@ -871,7 +932,7 @@ impl ProviderDef for ChatGptCodexProvider {
"ChatGPT Codex",
"Use your ChatGPT Plus/Pro subscription for GPT-5 Codex models via OAuth",
CHATGPT_CODEX_DEFAULT_MODEL,
CHATGPT_CODEX_KNOWN_MODELS.to_vec(),
known_model_names(),
CHATGPT_CODEX_DOC_URL,
vec![ConfigKey::new_oauth(
"CHATGPT_CODEX_TOKEN",
Expand Down Expand Up @@ -944,10 +1005,7 @@ impl Provider for ChatGptCodexProvider {
}

async fn fetch_supported_models(&self) -> Result<Vec<String>, ProviderError> {
Ok(CHATGPT_CODEX_KNOWN_MODELS
.iter()
.map(|s| s.to_string())
.collect())
Ok(known_model_names().into_iter().map(String::from).collect())
}
}

Expand Down Expand Up @@ -1232,4 +1290,26 @@ mod tests {

assert_eq!(claims.chatgpt_account_id.as_deref(), Some("account-1"));
}

#[test_case("unknown-model", &["medium", "high"]; "unknown model gets default reasoning levels")]
fn test_reasoning_levels_for_model(model: &str, expected: &[&str]) {
assert_eq!(reasoning_levels_for_model(model), expected);
}

#[test]
fn test_gpt53_preamble_injected() {
let model = ModelConfig::new("gpt-5.3-codex").unwrap();
let payload = create_codex_request(&model, "system prompt", &[], &[]).unwrap();
let instructions = payload["instructions"].as_str().unwrap();
assert!(instructions.contains(GPT_53_CODEX_TOOL_PREAMBLE));
assert!(instructions.contains("system prompt"));
}

#[test]
fn test_other_models_no_preamble() {
let model = ModelConfig::new("gpt-5.4").unwrap();
let payload = create_codex_request(&model, "system prompt", &[], &[]).unwrap();
let instructions = payload["instructions"].as_str().unwrap();
assert_eq!(instructions, "system prompt");
}
}
Loading