Skip to content

Commit abe3252

Browse files
committed
feat: add verbosity setting for GPT-5 models
- Add VerbosityLevel type definition to model types - Add verbosity field to ProviderSettings schema - Create Verbosity UI component for settings - Add verbosity labels to all localization files - Integrate verbosity handling in model parameters transformation - Update OpenAI native handler to support verbosity for GPT-5 - Add comprehensive tests for verbosity setting - Update existing GPT-5 tests to use verbosity from settings
1 parent 3f9bd5d commit abe3252

File tree

26 files changed

+335
-38
lines changed

26 files changed

+335
-38
lines changed

packages/types/src/model.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,16 @@ export const reasoningEffortsSchema = z.enum(reasoningEfforts)
1010

1111
export type ReasoningEffort = z.infer<typeof reasoningEffortsSchema>
1212

13+
/**
14+
* Verbosity
15+
*/
16+
17+
export const verbosityLevels = ["low", "medium", "high"] as const
18+
19+
export const verbosityLevelsSchema = z.enum(verbosityLevels)
20+
21+
export type VerbosityLevel = z.infer<typeof verbosityLevelsSchema>
22+
1323
/**
1424
* ModelParameter
1525
*/

packages/types/src/provider-settings.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { z } from "zod"
22

3-
import { reasoningEffortsSchema, modelInfoSchema } from "./model.js"
3+
import { reasoningEffortsSchema, verbosityLevelsSchema, modelInfoSchema } from "./model.js"
44
import { codebaseIndexProviderSchema } from "./codebase-index.js"
55

66
/**
@@ -79,6 +79,9 @@ const baseProviderSettingsSchema = z.object({
7979
reasoningEffort: reasoningEffortsSchema.optional(),
8080
modelMaxTokens: z.number().optional(),
8181
modelMaxThinkingTokens: z.number().optional(),
82+
83+
// Model verbosity.
84+
verbosity: verbosityLevelsSchema.optional(),
8285
})
8386

8487
// Several of the providers share common model config properties.

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -538,16 +538,9 @@ describe("OpenAiNativeHandler", () => {
538538
handler = new OpenAiNativeHandler({
539539
...mockOptions,
540540
apiModelId: "gpt-5-2025-08-07",
541+
verbosity: "low", // Set verbosity through options
541542
})
542543

543-
// Test that the handler has verbosity control methods
544-
expect(handler.setGpt5Verbosity).toBeDefined()
545-
expect(handler.getGpt5Verbosity).toBeDefined()
546-
547-
// Set verbosity to low
548-
handler.setGpt5Verbosity("low")
549-
expect(handler.getGpt5Verbosity()).toBe("low")
550-
551544
// Create a message to verify verbosity is passed
552545
const stream = handler.createMessage(systemPrompt, messages)
553546
const chunks: any[] = []
@@ -597,11 +590,10 @@ describe("OpenAiNativeHandler", () => {
597590
handler = new OpenAiNativeHandler({
598591
...mockOptions,
599592
apiModelId: "gpt-5-2025-08-07",
593+
verbosity: "high", // Set verbosity through options
594+
reasoningEffort: "low", // Set reasoning effort
600595
})
601596

602-
// Set both verbosity and reasoning effort
603-
handler.setGpt5Verbosity("high")
604-
605597
const stream = handler.createMessage(systemPrompt, messages)
606598
const chunks: any[] = []
607599
for await (const chunk of stream) {
@@ -615,7 +607,7 @@ describe("OpenAiNativeHandler", () => {
615607
messages: expect.any(Array),
616608
stream: true,
617609
stream_options: { include_usage: true },
618-
reasoning_effort: "minimal", // Default for GPT-5
610+
reasoning_effort: "low",
619611
verbosity: "high",
620612
}),
621613
)

src/api/providers/openai-native.ts

Lines changed: 34 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import {
88
openAiNativeModels,
99
OPENAI_NATIVE_DEFAULT_TEMPERATURE,
1010
type ReasoningEffort,
11+
type VerbosityLevel,
1112
} from "@roo-code/types"
1213

1314
import type { ApiHandlerOptions } from "../../shared/api"
@@ -24,7 +25,6 @@ import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ".
2425
export type OpenAiNativeModel = ReturnType<OpenAiNativeHandler["getModel"]>
2526

2627
// GPT-5 specific types for Responses API
27-
type Verbosity = "low" | "medium" | "high"
2828
type ReasoningEffortWithMinimal = ReasoningEffort | "minimal"
2929

3030
interface GPT5ResponsesAPIParams {
@@ -34,7 +34,7 @@ interface GPT5ResponsesAPIParams {
3434
effort: ReasoningEffortWithMinimal
3535
}
3636
text?: {
37-
verbosity: Verbosity
37+
verbosity: VerbosityLevel
3838
}
3939
}
4040

@@ -53,7 +53,6 @@ interface GPT5ResponseChunk {
5353
export class OpenAiNativeHandler extends BaseProvider implements SingleCompletionHandler {
5454
protected options: ApiHandlerOptions
5555
private client: OpenAI
56-
private gpt5Verbosity: Verbosity = "medium" // Default verbosity for GPT-5
5756

5857
constructor(options: ApiHandlerOptions) {
5958
super()
@@ -146,18 +145,35 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
146145
systemPrompt: string,
147146
messages: Anthropic.Messages.MessageParam[],
148147
): ApiStream {
149-
const { reasoning } = this.getModel()
148+
const { reasoning, verbosity } = this.getModel()
150149

151-
const stream = await this.client.chat.completions.create({
150+
// Prepare the request parameters
151+
const params: any = {
152152
model: model.id,
153153
temperature: this.options.modelTemperature ?? OPENAI_NATIVE_DEFAULT_TEMPERATURE,
154154
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
155155
stream: true,
156156
stream_options: { include_usage: true },
157157
...(reasoning && reasoning),
158-
})
158+
}
159159

160-
yield* this.handleStreamResponse(stream, model)
160+
// Add verbosity if supported (for future GPT-5 models)
161+
if (verbosity && model.id.startsWith("gpt-5")) {
162+
params.verbosity = verbosity
163+
}
164+
165+
const stream = await this.client.chat.completions.create(params)
166+
167+
if (typeof (stream as any)[Symbol.asyncIterator] !== "function") {
168+
throw new Error(
169+
"OpenAI SDK did not return an AsyncIterable for streaming response. Please check SDK version and usage.",
170+
)
171+
}
172+
173+
yield* this.handleStreamResponse(
174+
stream as unknown as AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>,
175+
model,
176+
)
161177
}
162178

163179
private async *handleGpt5Message(
@@ -172,6 +188,9 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
172188
// Get reasoning effort, supporting the new "minimal" option for GPT-5
173189
const reasoningEffort = this.getGpt5ReasoningEffort(model)
174190

191+
// Get verbosity from model settings, default to "medium" if not specified
192+
const verbosity = model.verbosity || "medium"
193+
175194
// Prepare the request parameters for Responses API
176195
const params: GPT5ResponsesAPIParams = {
177196
model: model.id,
@@ -182,7 +201,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
182201
},
183202
}),
184203
text: {
185-
verbosity: this.gpt5Verbosity,
204+
verbosity: verbosity,
186205
},
187206
}
188207

@@ -332,16 +351,6 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
332351
}
333352
}
334353

335-
// Method to set verbosity for GPT-5 models
336-
setGpt5Verbosity(verbosity: Verbosity) {
337-
this.gpt5Verbosity = verbosity
338-
}
339-
340-
// Method to get current verbosity setting
341-
getGpt5Verbosity(): Verbosity {
342-
return this.gpt5Verbosity
343-
}
344-
345354
private isGpt5Model(modelId: string): boolean {
346355
return modelId.startsWith("gpt-5")
347356
}
@@ -411,23 +420,25 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
411420

412421
// The o3 models are named like "o3-mini-[reasoning-effort]", which are
413422
// not valid model ids, so we need to strip the suffix.
414-
return { id: id.startsWith("o3-mini") ? "o3-mini" : id, info, ...params }
423+
return { id: id.startsWith("o3-mini") ? "o3-mini" : id, info, ...params, verbosity: params.verbosity }
415424
}
416425

417426
async completePrompt(prompt: string): Promise<string> {
418427
try {
419-
const { id, temperature, reasoning } = this.getModel()
428+
const { id, temperature, reasoning, verbosity } = this.getModel()
420429

421-
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming & { verbosity?: Verbosity } = {
430+
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming & {
431+
verbosity?: VerbosityLevel
432+
} = {
422433
model: id,
423434
messages: [{ role: "user", content: prompt }],
424435
temperature,
425436
...(reasoning && reasoning),
426437
}
427438

428439
// Add verbosity for GPT-5 models
429-
if (this.isGpt5Model(id)) {
430-
params.verbosity = this.gpt5Verbosity
440+
if (this.isGpt5Model(id) && verbosity) {
441+
params.verbosity = verbosity
431442
}
432443

433444
const response = await this.client.chat.completions.create(params as any)

src/api/transform/__tests__/model-params.spec.ts

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -788,4 +788,101 @@ describe("getModelParams", () => {
788788
expect(result.reasoning).toBeUndefined()
789789
})
790790
})
791+
792+
describe("Verbosity settings", () => {
793+
it("should include verbosity when specified in settings", () => {
794+
const model: ModelInfo = {
795+
...baseModel,
796+
}
797+
798+
const result = getModelParams({
799+
...openaiParams,
800+
settings: { verbosity: "low" },
801+
model,
802+
})
803+
804+
expect(result.verbosity).toBe("low")
805+
})
806+
807+
it("should handle medium verbosity", () => {
808+
const model: ModelInfo = {
809+
...baseModel,
810+
}
811+
812+
const result = getModelParams({
813+
...openaiParams,
814+
settings: { verbosity: "medium" },
815+
model,
816+
})
817+
818+
expect(result.verbosity).toBe("medium")
819+
})
820+
821+
it("should handle high verbosity", () => {
822+
const model: ModelInfo = {
823+
...baseModel,
824+
}
825+
826+
const result = getModelParams({
827+
...openaiParams,
828+
settings: { verbosity: "high" },
829+
model,
830+
})
831+
832+
expect(result.verbosity).toBe("high")
833+
})
834+
835+
it("should return undefined verbosity when not specified", () => {
836+
const model: ModelInfo = {
837+
...baseModel,
838+
}
839+
840+
const result = getModelParams({
841+
...openaiParams,
842+
settings: {},
843+
model,
844+
})
845+
846+
expect(result.verbosity).toBeUndefined()
847+
})
848+
849+
it("should include verbosity alongside reasoning settings", () => {
850+
const model: ModelInfo = {
851+
...baseModel,
852+
supportsReasoningEffort: true,
853+
}
854+
855+
const result = getModelParams({
856+
...openaiParams,
857+
settings: {
858+
reasoningEffort: "high",
859+
verbosity: "low",
860+
},
861+
model,
862+
})
863+
864+
expect(result.reasoningEffort).toBe("high")
865+
expect(result.verbosity).toBe("low")
866+
expect(result.reasoning).toEqual({ reasoning_effort: "high" })
867+
})
868+
869+
it("should include verbosity with reasoning budget models", () => {
870+
const model: ModelInfo = {
871+
...baseModel,
872+
supportsReasoningBudget: true,
873+
}
874+
875+
const result = getModelParams({
876+
...anthropicParams,
877+
settings: {
878+
enableReasoningEffort: true,
879+
verbosity: "high",
880+
},
881+
model,
882+
})
883+
884+
expect(result.verbosity).toBe("high")
885+
expect(result.reasoningBudget).toBe(8192) // Default thinking tokens
886+
})
887+
})
791888
})

src/api/transform/model-params.ts

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
1-
import { type ModelInfo, type ProviderSettings, ANTHROPIC_DEFAULT_MAX_TOKENS } from "@roo-code/types"
1+
import {
2+
type ModelInfo,
3+
type ProviderSettings,
4+
type VerbosityLevel,
5+
ANTHROPIC_DEFAULT_MAX_TOKENS,
6+
} from "@roo-code/types"
27

38
import {
49
DEFAULT_HYBRID_REASONING_MODEL_MAX_TOKENS,
@@ -35,6 +40,7 @@ type BaseModelParams = {
3540
temperature: number | undefined
3641
reasoningEffort: "low" | "medium" | "high" | undefined
3742
reasoningBudget: number | undefined
43+
verbosity: VerbosityLevel | undefined
3844
}
3945

4046
type AnthropicModelParams = {
@@ -76,6 +82,7 @@ export function getModelParams({
7682
modelMaxThinkingTokens: customMaxThinkingTokens,
7783
modelTemperature: customTemperature,
7884
reasoningEffort: customReasoningEffort,
85+
verbosity: customVerbosity,
7986
} = settings
8087

8188
// Use the centralized logic for computing maxTokens
@@ -89,6 +96,7 @@ export function getModelParams({
8996
let temperature = customTemperature ?? defaultTemperature
9097
let reasoningBudget: ModelParams["reasoningBudget"] = undefined
9198
let reasoningEffort: ModelParams["reasoningEffort"] = undefined
99+
let verbosity: VerbosityLevel | undefined = customVerbosity
92100

93101
if (shouldUseReasoningBudget({ model, settings })) {
94102
// Check if this is a Gemini 2.5 Pro model
@@ -123,7 +131,7 @@ export function getModelParams({
123131
reasoningEffort = customReasoningEffort ?? model.reasoningEffort
124132
}
125133

126-
const params: BaseModelParams = { maxTokens, temperature, reasoningEffort, reasoningBudget }
134+
const params: BaseModelParams = { maxTokens, temperature, reasoningEffort, reasoningBudget, verbosity }
127135

128136
if (format === "anthropic") {
129137
return {

webview-ui/src/components/settings/ApiOptions.tsx

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ import { inputEventTransform, noTransform } from "./transforms"
9191
import { ModelInfoView } from "./ModelInfoView"
9292
import { ApiErrorMessage } from "./ApiErrorMessage"
9393
import { ThinkingBudget } from "./ThinkingBudget"
94+
import { Verbosity } from "./Verbosity"
9495
import { DiffSettingsControl } from "./DiffSettingsControl"
9596
import { TodoListSettingsControl } from "./TodoListSettingsControl"
9697
import { TemperatureControl } from "./TemperatureControl"
@@ -616,6 +617,12 @@ const ApiOptions = ({
616617
modelInfo={selectedModelInfo}
617618
/>
618619

620+
<Verbosity
621+
apiConfiguration={apiConfiguration}
622+
setApiConfigurationField={setApiConfigurationField}
623+
modelInfo={selectedModelInfo}
624+
/>
625+
619626
{!fromWelcomeView && (
620627
<Collapsible open={isAdvancedSettingsOpen} onOpenChange={setIsAdvancedSettingsOpen}>
621628
<CollapsibleTrigger className="flex items-center gap-1 w-full cursor-pointer hover:opacity-80 mb-2">

0 commit comments

Comments
 (0)