diff --git a/src/api/providers/__tests__/roo.spec.ts b/src/api/providers/__tests__/roo.spec.ts index 137b7c7f627..2dab7c78bed 100644 --- a/src/api/providers/__tests__/roo.spec.ts +++ b/src/api/providers/__tests__/roo.spec.ts @@ -452,7 +452,7 @@ describe("RooHandler", () => { }) describe("temperature and model configuration", () => { - it("should use default temperature of 0.7", async () => { + it("should use default temperature of 0", async () => { handler = new RooHandler(mockOptions) const stream = handler.createMessage(systemPrompt, messages) for await (const _chunk of stream) { @@ -461,7 +461,7 @@ describe("RooHandler", () => { expect(mockCreate).toHaveBeenCalledWith( expect.objectContaining({ - temperature: 0.7, + temperature: 0, }), expect.objectContaining({ headers: expect.objectContaining({ diff --git a/src/api/providers/roo.ts b/src/api/providers/roo.ts index abab59effea..83eab87ef7e 100644 --- a/src/api/providers/roo.ts +++ b/src/api/providers/roo.ts @@ -59,7 +59,6 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { apiKey: sessionToken, defaultProviderModelId: rooDefaultModelId, providerModels: {}, - defaultTemperature: 0.7, }) // Load dynamic models asynchronously - strip /v1 from baseURL for fetcher