diff --git a/packages/types/src/providers/fireworks.ts b/packages/types/src/providers/fireworks.ts index 1918826ca1..3f7b17034e 100644 --- a/packages/types/src/providers/fireworks.ts +++ b/packages/types/src/providers/fireworks.ts @@ -3,6 +3,7 @@ import type { ModelInfo } from "../model.js" export type FireworksModelId = | "accounts/fireworks/models/kimi-k2-instruct" | "accounts/fireworks/models/kimi-k2-instruct-0905" + | "accounts/fireworks/models/kimi-k2-thinking" | "accounts/fireworks/models/minimax-m2" | "accounts/fireworks/models/qwen3-235b-a22b-instruct-2507" | "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct" @@ -43,6 +44,21 @@ export const fireworksModels = { description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities.", }, + "accounts/fireworks/models/kimi-k2-thinking": { + maxTokens: 16000, + contextWindow: 256000, + supportsImages: false, + supportsPromptCache: true, + supportsNativeTools: true, + supportsTemperature: true, + preserveReasoning: true, + defaultTemperature: 1.0, + inputPrice: 0.6, + outputPrice: 2.5, + cacheReadsPrice: 0.15, + description: + "The kimi-k2-thinking model is a general-purpose agentic reasoning model developed by Moonshot AI. Thanks to its strength in deep reasoning and multi-turn tool use, it can solve even the hardest problems.", + }, "accounts/fireworks/models/minimax-m2": { maxTokens: 4096, contextWindow: 204800, diff --git a/src/api/providers/__tests__/fireworks.spec.ts b/src/api/providers/__tests__/fireworks.spec.ts index 9b837fef60..ac5c4396f1 100644 --- a/src/api/providers/__tests__/fireworks.spec.ts +++ b/src/api/providers/__tests__/fireworks.spec.ts @@ -115,6 +115,31 @@ describe("FireworksHandler", () => { ) }) + it("should return Kimi K2 Thinking model with correct configuration", () => { + const testModelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking" + const handlerWithModel = new FireworksHandler({ + apiModelId: testModelId, + fireworksApiKey: "test-fireworks-api-key", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual( + expect.objectContaining({ + maxTokens: 16000, + contextWindow: 256000, + supportsImages: false, + supportsPromptCache: true, + supportsNativeTools: true, + supportsTemperature: true, + preserveReasoning: true, + defaultTemperature: 1.0, + inputPrice: 0.6, + outputPrice: 2.5, + cacheReadsPrice: 0.15, + }), + ) + }) + it("should return MiniMax M2 model with correct configuration", () => { const testModelId: FireworksModelId = "accounts/fireworks/models/minimax-m2" const handlerWithModel = new FireworksHandler({ @@ -424,16 +449,85 @@ describe("FireworksHandler", () => { ) }) - it("should use default temperature of 0.5", () => { - const testModelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-instruct" + it("should use provider default temperature of 0.5 for models without defaultTemperature", async () => { + const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-instruct" const handlerWithModel = new FireworksHandler({ - apiModelId: testModelId, + apiModelId: modelId, fireworksApiKey: "test-fireworks-api-key", }) - const model = handlerWithModel.getModel() - // The temperature is set in the constructor as defaultTemperature: 0.5 - // This test verifies the handler is configured with the correct default temperature - expect(handlerWithModel).toBeDefined() + + mockCreate.mockImplementationOnce(() => ({ + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + })) + + const messageGenerator = handlerWithModel.createMessage("system", []) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 0.5, + }), + undefined, + ) + }) + + it("should use model defaultTemperature (1.0) over provider default (0.5) for kimi-k2-thinking", async () => { + const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking" + const handlerWithModel = new FireworksHandler({ + apiModelId: modelId, + fireworksApiKey: "test-fireworks-api-key", + }) + + mockCreate.mockImplementationOnce(() => ({ + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + })) + + const messageGenerator = handlerWithModel.createMessage("system", []) + await messageGenerator.next() + + // Model's defaultTemperature (1.0) should take precedence over provider's default (0.5) + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 1.0, + }), + undefined, + ) + }) + + it("should use user-specified temperature over model and provider defaults", async () => { + const modelId: FireworksModelId = "accounts/fireworks/models/kimi-k2-thinking" + const handlerWithModel = new FireworksHandler({ + apiModelId: modelId, + fireworksApiKey: "test-fireworks-api-key", + modelTemperature: 0.7, + }) + + mockCreate.mockImplementationOnce(() => ({ + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + })) + + const messageGenerator = handlerWithModel.createMessage("system", []) + await messageGenerator.next() + + // User-specified temperature should take precedence over everything + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 0.7, + }), + undefined, + ) }) it("should handle empty response in completePrompt", async () => { diff --git a/src/api/providers/base-openai-compatible-provider.ts b/src/api/providers/base-openai-compatible-provider.ts index d832508f63..2c698f9653 100644 --- a/src/api/providers/base-openai-compatible-provider.ts +++ b/src/api/providers/base-openai-compatible-provider.ts @@ -84,7 +84,7 @@ export abstract class BaseOpenAiCompatibleProvider format: "openai", }) ?? undefined - const temperature = this.options.modelTemperature ?? this.defaultTemperature + const temperature = this.options.modelTemperature ?? info.defaultTemperature ?? this.defaultTemperature const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model, diff --git a/src/integrations/terminal/ExecaTerminalProcess.ts b/src/integrations/terminal/ExecaTerminalProcess.ts index 6de37768e0..c553ea0209 100644 --- a/src/integrations/terminal/ExecaTerminalProcess.ts +++ b/src/integrations/terminal/ExecaTerminalProcess.ts @@ -79,6 +79,8 @@ export class ExecaTerminalProcess extends BaseTerminalProcess { }) } + // Check if this is a background command (ends with &) + const isBackgroundCommand = /&\s*(#.*)?$/.test(command.trim()) const rawStream = this.subprocess.iterable({ from: "all", preserveNewlines: true }) const decoder = new TextDecoder("utf-8") const stream = (async function* () { @@ -92,7 +94,20 @@ export class ExecaTerminalProcess extends BaseTerminalProcess { })() await this.terminal.setActiveStream(stream, Promise.resolve(this.pid)) + let outputIndex = 0 + if (isBackgroundCommand) { + delay(10_000).then(() => { + if (this.aborted || outputIndex > 0) { + return + } + + const warning = `background command running [${command.length > 30 ? `${command.slice(0, 30)}...` : command}]\n` + this.emit("line", warning) + this.startHotTimer(warning) + }) + } + for await (const line of stream) { if (this.aborted) { break diff --git a/src/package.json b/src/package.json index 32c10e1f45..c7a9383083 100644 --- a/src/package.json +++ b/src/package.json @@ -802,16 +802,19 @@ "zgsm.debugProxy.enabled": { "type": "boolean", "default": false, + "description": "%settings.debugProxy.enabled.description%", "markdownDescription": "%settings.debugProxy.enabled.description%" }, "zgsm.debugProxy.serverUrl": { "type": "string", "default": "http://127.0.0.1:8888", + "description": "%settings.debugProxy.serverUrl.description%", "markdownDescription": "%settings.debugProxy.serverUrl.description%" }, "zgsm.debugProxy.tlsInsecure": { "type": "boolean", "default": false, + "description": "%settings.debugProxy.tlsInsecure.description%", "markdownDescription": "%settings.debugProxy.tlsInsecure.description%" } } diff --git a/webview-ui/src/components/chat/ChatTextArea.tsx b/webview-ui/src/components/chat/ChatTextArea.tsx index 02986381d6..05f11e408c 100644 --- a/webview-ui/src/components/chat/ChatTextArea.tsx +++ b/webview-ui/src/components/chat/ChatTextArea.tsx @@ -1438,7 +1438,7 @@ export const ChatTextArea = forwardRef( -
+
{showLabel && } {tooltip ? ( diff --git a/webview-ui/src/components/settings/ProviderRenderer.tsx b/webview-ui/src/components/settings/ProviderRenderer.tsx index 1d8fa1cee6..1cbe75e56e 100644 --- a/webview-ui/src/components/settings/ProviderRenderer.tsx +++ b/webview-ui/src/components/settings/ProviderRenderer.tsx @@ -213,6 +213,7 @@ const ProviderRenderer: React.FC = ({
0 ? "" : "hidden")}> {config?.modelIdKey ? (