diff --git a/packages/types/src/providers/openai.ts b/packages/types/src/providers/openai.ts index 722b57677cc..264c5aa40f5 100644 --- a/packages/types/src/providers/openai.ts +++ b/packages/types/src/providers/openai.ts @@ -3,7 +3,7 @@ import type { ModelInfo } from "../model.js" // https://openai.com/api/pricing/ export type OpenAiNativeModelId = keyof typeof openAiNativeModels -export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-5.1" +export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-5.1-codex-max" export const openAiNativeModels = { "gpt-5.1-codex-max": { @@ -16,7 +16,7 @@ export const openAiNativeModels = { supportsPromptCache: true, promptCacheRetention: "24h", supportsReasoningEffort: ["low", "medium", "high", "xhigh"], - reasoningEffort: "medium", + reasoningEffort: "xhigh", inputPrice: 1.25, outputPrice: 10.0, cacheReadsPrice: 0.125, @@ -25,6 +25,41 @@ export const openAiNativeModels = { description: "GPT-5.1 Codex Max: Our most intelligent coding model optimized for long-horizon, agentic coding tasks", }, + "gpt-5.2": { + maxTokens: 128000, + contextWindow: 400000, + supportsNativeTools: true, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: true, + supportsPromptCache: true, + promptCacheRetention: "24h", + supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"], + reasoningEffort: "medium", + inputPrice: 1.75, + outputPrice: 14.0, + cacheReadsPrice: 0.175, + supportsVerbosity: true, + supportsTemperature: false, + tiers: [ + { name: "flex", contextWindow: 400000, inputPrice: 0.875, outputPrice: 7.0, cacheReadsPrice: 0.0875 }, + { name: "priority", contextWindow: 400000, inputPrice: 3.5, outputPrice: 28.0, cacheReadsPrice: 0.35 }, + ], + description: "GPT-5.2: Our flagship model for coding and agentic tasks across industries", + }, + "gpt-5.2-chat-latest": { + maxTokens: 16_384, + contextWindow: 128_000, + supportsNativeTools: true, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: true, + supportsPromptCache: true, + inputPrice: 1.75, + outputPrice: 14.0, + cacheReadsPrice: 0.175, + description: "GPT-5.2 Chat: Optimized for conversational AI and chat use cases", + }, "gpt-5.1": { maxTokens: 128000, contextWindow: 400000, diff --git a/src/api/providers/__tests__/openai-native.spec.ts b/src/api/providers/__tests__/openai-native.spec.ts index 0482b8893b8..3196316f639 100644 --- a/src/api/providers/__tests__/openai-native.spec.ts +++ b/src/api/providers/__tests__/openai-native.spec.ts @@ -205,7 +205,7 @@ describe("OpenAiNativeHandler", () => { openAiNativeApiKey: "test-api-key", }) const modelInfo = handlerWithoutModel.getModel() - expect(modelInfo.id).toBe("gpt-5.1") // Default model + expect(modelInfo.id).toBe("gpt-5.1-codex-max") // Default model expect(modelInfo.info).toBeDefined() }) })