diff --git a/packages/types/src/providers/openai.ts b/packages/types/src/providers/openai.ts index 722b57677cc..8687710d165 100644 --- a/packages/types/src/providers/openai.ts +++ b/packages/types/src/providers/openai.ts @@ -3,9 +3,58 @@ import type { ModelInfo } from "../model.js" // https://openai.com/api/pricing/ export type OpenAiNativeModelId = keyof typeof openAiNativeModels -export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-5.1" +export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-5.2" export const openAiNativeModels = { + "gpt-5.2": { + maxTokens: 128000, + contextWindow: 400000, + supportsNativeTools: true, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: true, + supportsPromptCache: true, + promptCacheRetention: "24h", + supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"], + reasoningEffort: "medium", + inputPrice: 1.75, + outputPrice: 14.0, + cacheReadsPrice: 0.175, + supportsVerbosity: true, + supportsTemperature: false, + description: "GPT-5.2: The best model for coding and agentic tasks across industries", + }, + "gpt-5.2-chat-latest": { + maxTokens: 128000, + contextWindow: 400000, + supportsNativeTools: true, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: true, + supportsPromptCache: true, + promptCacheRetention: "24h", + inputPrice: 1.75, + outputPrice: 14.0, + cacheReadsPrice: 0.175, + description: "GPT-5.2 Chat: Most up-to-date chat experience for conversational tasks", + }, + "gpt-5.2-pro": { + maxTokens: 128000, + contextWindow: 400000, + supportsNativeTools: true, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: true, + supportsPromptCache: true, + promptCacheRetention: "24h", + supportsReasoningEffort: ["medium", "high", "xhigh"], + reasoningEffort: "high", + inputPrice: 21.0, + outputPrice: 168.0, + cacheReadsPrice: 2.1, + supportsTemperature: false, + description: "GPT-5.2 Pro: Highest-quality model with stronger reasoning capabilities", + }, "gpt-5.1-codex-max": { maxTokens: 128000, contextWindow: 400000, @@ -414,6 +463,41 @@ export const openAiNativeModels = { "Codex Mini: Cloud-based software engineering agent powered by codex-1, a version of o3 optimized for coding tasks. Trained with reinforcement learning to generate human-style code, adhere to instructions, and iteratively run tests.", }, // Dated clones (snapshots) preserved for backward compatibility + "gpt-5.2-2025-12-11": { + maxTokens: 128000, + contextWindow: 400000, + supportsNativeTools: true, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: true, + supportsPromptCache: true, + promptCacheRetention: "24h", + supportsReasoningEffort: ["none", "low", "medium", "high", "xhigh"], + reasoningEffort: "medium", + inputPrice: 1.75, + outputPrice: 14.0, + cacheReadsPrice: 0.175, + supportsVerbosity: true, + supportsTemperature: false, + description: "GPT-5.2 snapshot (2025-12-11): The best model for coding and agentic tasks across industries", + }, + "gpt-5.2-pro-2025-12-11": { + maxTokens: 128000, + contextWindow: 400000, + supportsNativeTools: true, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: true, + supportsPromptCache: true, + promptCacheRetention: "24h", + supportsReasoningEffort: ["medium", "high", "xhigh"], + reasoningEffort: "high", + inputPrice: 21.0, + outputPrice: 168.0, + cacheReadsPrice: 2.1, + supportsTemperature: false, + description: "GPT-5.2 Pro snapshot (2025-12-11): Highest-quality model with stronger reasoning capabilities", + }, "gpt-5-2025-08-07": { maxTokens: 128000, contextWindow: 400000, diff --git a/src/api/providers/__tests__/openai-native-usage.spec.ts b/src/api/providers/__tests__/openai-native-usage.spec.ts index 48e1c26877b..7d8c5a1c366 100644 --- a/src/api/providers/__tests__/openai-native-usage.spec.ts +++ b/src/api/providers/__tests__/openai-native-usage.spec.ts @@ -361,7 +361,7 @@ describe("OpenAiNativeHandler - normalizeUsage", () => { return (handler as any).buildRequestBody(model, [], "", model.verbosity, undefined, undefined) } - it("should set prompt_cache_retention=24h for gpt-5.1 models that support prompt caching", () => { + it("should set prompt_cache_retention=24h for gpt-5.x models that support prompt caching", () => { const body = buildRequestBodyForModel("gpt-5.1") expect(body.prompt_cache_retention).toBe("24h") @@ -370,6 +370,15 @@ describe("OpenAiNativeHandler - normalizeUsage", () => { const codexMiniBody = buildRequestBodyForModel("gpt-5.1-codex-mini") expect(codexMiniBody.prompt_cache_retention).toBe("24h") + + const gpt52Body = buildRequestBodyForModel("gpt-5.2") + expect(gpt52Body.prompt_cache_retention).toBe("24h") + + const gpt52ChatBody = buildRequestBodyForModel("gpt-5.2-chat-latest") + expect(gpt52ChatBody.prompt_cache_retention).toBe("24h") + + const gpt52ProBody = buildRequestBodyForModel("gpt-5.2-pro") + expect(gpt52ProBody.prompt_cache_retention).toBe("24h") }) it("should not set prompt_cache_retention for non-gpt-5.1 models even if they support prompt caching", () => { diff --git a/src/api/providers/__tests__/openai-native.spec.ts b/src/api/providers/__tests__/openai-native.spec.ts index 0482b8893b8..33e7bfe0d40 100644 --- a/src/api/providers/__tests__/openai-native.spec.ts +++ b/src/api/providers/__tests__/openai-native.spec.ts @@ -205,7 +205,7 @@ describe("OpenAiNativeHandler", () => { openAiNativeApiKey: "test-api-key", }) const modelInfo = handlerWithoutModel.getModel() - expect(modelInfo.id).toBe("gpt-5.1") // Default model + expect(modelInfo.id).toBe("gpt-5.2") // Default model expect(modelInfo.info).toBeDefined() }) })