Skip to content

Commit 6ffdd44

Browse files
Remove GPT‑5 instructions/reasoning_summary from UI message metadata to prevent ui_messages.json bloat (#8756)
chore(gpt5): stop persisting instructions/reasoning_summary in UI message metadata Problem: ui_messages.json was getting bloated with unused or duplicated content (system 'instructions' and 'reasoning_summary') that we do not read back. Root cause: earlier OpenAI Responses API implementation persisted these fields to per-message metadata; however, 'instructions' are already sent as top-level request instructions and 'reasoning_summary' is surfaced live via streaming events. Neither field is consumed from storage. Changes: (1) Task.persistGpt5Metadata now stores only previous_response_id; (2) removed instructions and reasoning_summary from types; (3) updated Zod schema; (4) persistence layer writes messages as-is (no sanitizer); (5) tests green. Impact: smaller ui_messages.json, no runtime behavior change for requests. Migration: old metadata fields will be ignored by schema.
1 parent f3a505f commit 6ffdd44

File tree

4 files changed

+77
-20
lines changed

4 files changed

+77
-20
lines changed

packages/types/src/message.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -221,8 +221,6 @@ export const clineMessageSchema = z.object({
221221
gpt5: z
222222
.object({
223223
previous_response_id: z.string().optional(),
224-
instructions: z.string().optional(),
225-
reasoning_summary: z.string().optional(),
226224
})
227225
.optional(),
228226
})
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
import { describe, it, expect, vi, beforeEach } from "vitest"
2+
import * as os from "os"
3+
import * as path from "path"
4+
import * as fs from "fs/promises"
5+
6+
// Mocks (use hoisted to avoid initialization ordering issues)
7+
const hoisted = vi.hoisted(() => ({
8+
safeWriteJsonMock: vi.fn().mockResolvedValue(undefined),
9+
}))
10+
vi.mock("../../../utils/safeWriteJson", () => ({
11+
safeWriteJson: hoisted.safeWriteJsonMock,
12+
}))
13+
14+
// Import after mocks
15+
import { saveTaskMessages } from "../taskMessages"
16+
17+
let tmpBaseDir: string
18+
19+
beforeEach(async () => {
20+
hoisted.safeWriteJsonMock.mockClear()
21+
// Create a unique, writable temp directory to act as globalStoragePath
22+
tmpBaseDir = await fs.mkdtemp(path.join(os.tmpdir(), "roo-test-"))
23+
})
24+
25+
describe("taskMessages.saveTaskMessages", () => {
26+
beforeEach(() => {
27+
hoisted.safeWriteJsonMock.mockClear()
28+
})
29+
30+
it("persists messages as-is", async () => {
31+
const messages: any[] = [
32+
{
33+
role: "assistant",
34+
content: "Hello",
35+
metadata: {
36+
gpt5: {
37+
previous_response_id: "resp_123",
38+
},
39+
other: "keep",
40+
},
41+
},
42+
{ role: "user", content: "Do thing" },
43+
]
44+
45+
await saveTaskMessages({
46+
messages,
47+
taskId: "task-1",
48+
globalStoragePath: tmpBaseDir,
49+
})
50+
51+
expect(hoisted.safeWriteJsonMock).toHaveBeenCalledTimes(1)
52+
const [, persisted] = hoisted.safeWriteJsonMock.mock.calls[0]
53+
expect(persisted).toEqual(messages)
54+
})
55+
56+
it("persists messages without modification when no metadata", async () => {
57+
const messages: any[] = [
58+
{ role: "assistant", content: "Hi" },
59+
{ role: "user", content: "Yo" },
60+
]
61+
62+
await saveTaskMessages({
63+
messages,
64+
taskId: "task-2",
65+
globalStoragePath: tmpBaseDir,
66+
})
67+
68+
const [, persisted] = hoisted.safeWriteJsonMock.mock.calls[0]
69+
expect(persisted).toEqual(messages)
70+
})
71+
})

src/core/task/Task.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2267,7 +2267,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
22672267
}
22682268
}
22692269

2270-
await this.persistGpt5Metadata(reasoningMessage)
2270+
await this.persistGpt5Metadata()
22712271
await this.saveClineMessages()
22722272
await this.providerRef.deref()?.postStateToWebview()
22732273

@@ -2853,10 +2853,12 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
28532853
}
28542854

28552855
/**
2856-
* Persist GPT-5 per-turn metadata (previous_response_id, instructions, reasoning_summary)
2856+
* Persist GPT-5 per-turn metadata (previous_response_id only)
28572857
* onto the last complete assistant say("text") message.
2858+
*
2859+
* Note: We do not persist system instructions or reasoning summaries.
28582860
*/
2859-
private async persistGpt5Metadata(reasoningMessage?: string): Promise<void> {
2861+
private async persistGpt5Metadata(): Promise<void> {
28602862
try {
28612863
const modelId = this.api.getModel().id
28622864
if (!modelId || !modelId.startsWith("gpt-5")) return
@@ -2875,9 +2877,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
28752877
}
28762878
const gpt5Metadata: Gpt5Metadata = {
28772879
...(msg.metadata.gpt5 ?? {}),
2878-
previous_response_id: lastResponseId,
2879-
instructions: this.lastUsedInstructions,
2880-
reasoning_summary: (reasoningMessage ?? "").trim() || undefined,
2880+
...(lastResponseId ? { previous_response_id: lastResponseId } : {}),
28812881
}
28822882
msg.metadata.gpt5 = gpt5Metadata
28832883
}

src/core/task/types.ts

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,6 @@ export interface Gpt5Metadata {
1212
* Used to maintain conversation continuity in subsequent requests
1313
*/
1414
previous_response_id?: string
15-
16-
/**
17-
* The system instructions/prompt used for this response
18-
* Stored to track what instructions were active when the response was generated
19-
*/
20-
instructions?: string
21-
22-
/**
23-
* The reasoning summary from GPT-5's reasoning process
24-
* Contains the model's internal reasoning if reasoning mode was enabled
25-
*/
26-
reasoning_summary?: string
2715
}
2816

2917
/**

0 commit comments

Comments
 (0)