diff --git a/src/api/providers/base-openai-compatible-provider.ts b/src/api/providers/base-openai-compatible-provider.ts index d079e22a1c43..5d2b9425e7cf 100644 --- a/src/api/providers/base-openai-compatible-provider.ts +++ b/src/api/providers/base-openai-compatible-provider.ts @@ -10,6 +10,7 @@ import { convertToOpenAiMessages } from "../transform/openai-format" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" import { DEFAULT_HEADERS } from "./constants" import { BaseProvider } from "./base-provider" +import { handleOpenAIError } from "./utils/openai-error-handler" type BaseOpenAiCompatibleProviderOptions = ApiHandlerOptions & { providerName: string @@ -86,7 +87,11 @@ export abstract class BaseOpenAiCompatibleProvider params.temperature = this.options.modelTemperature } - return this.client.chat.completions.create(params, requestOptions) + try { + return this.client.chat.completions.create(params, requestOptions) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } } override async *createMessage( @@ -127,11 +132,7 @@ export abstract class BaseOpenAiCompatibleProvider return response.choices[0]?.message.content || "" } catch (error) { - if (error instanceof Error) { - throw new Error(`${this.providerName} completion error: ${error.message}`) - } - - throw error + throw handleOpenAIError(error, this.providerName) } } diff --git a/src/api/providers/huggingface.ts b/src/api/providers/huggingface.ts index aa158654c9af..7b62046b99e7 100644 --- a/src/api/providers/huggingface.ts +++ b/src/api/providers/huggingface.ts @@ -8,11 +8,13 @@ import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ". import { DEFAULT_HEADERS } from "./constants" import { BaseProvider } from "./base-provider" import { getHuggingFaceModels, getCachedHuggingFaceModels } from "./fetchers/huggingface" +import { handleOpenAIError } from "./utils/openai-error-handler" export class HuggingFaceHandler extends BaseProvider implements SingleCompletionHandler { private client: OpenAI private options: ApiHandlerOptions private modelCache: ModelRecord | null = null + private readonly providerName = "HuggingFace" constructor(options: ApiHandlerOptions) { super() @@ -64,7 +66,12 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion params.max_tokens = this.options.modelMaxTokens } - const stream = await this.client.chat.completions.create(params) + let stream + try { + stream = await this.client.chat.completions.create(params) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } for await (const chunk of stream) { const delta = chunk.choices[0]?.delta @@ -97,11 +104,7 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion return response.choices[0]?.message.content || "" } catch (error) { - if (error instanceof Error) { - throw new Error(`Hugging Face completion error: ${error.message}`) - } - - throw error + throw handleOpenAIError(error, this.providerName) } } diff --git a/src/api/providers/lm-studio.ts b/src/api/providers/lm-studio.ts index f3af46d1cec0..6c58a96ae1fa 100644 --- a/src/api/providers/lm-studio.ts +++ b/src/api/providers/lm-studio.ts @@ -15,18 +15,23 @@ import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" import { getModels, getModelsFromCache } from "./fetchers/modelCache" import { getApiRequestTimeout } from "./utils/timeout-config" +import { handleOpenAIError } from "./utils/openai-error-handler" export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler { protected options: ApiHandlerOptions private client: OpenAI + private readonly providerName = "LM Studio" constructor(options: ApiHandlerOptions) { super() this.options = options + // LM Studio uses "noop" as a placeholder API key + const apiKey = "noop" + this.client = new OpenAI({ baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1", - apiKey: "noop", + apiKey: apiKey, timeout: getApiRequestTimeout(), }) } @@ -88,7 +93,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan params.draft_model = this.options.lmStudioDraftModelId } - const results = await this.client.chat.completions.create(params) + let results + try { + results = await this.client.chat.completions.create(params) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } const matcher = new XmlMatcher( "think", @@ -164,7 +174,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan params.draft_model = this.options.lmStudioDraftModelId } - const response = await this.client.chat.completions.create(params) + let response + try { + response = await this.client.chat.completions.create(params) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } return response.choices[0]?.message.content || "" } catch (error) { throw new Error( diff --git a/src/api/providers/ollama.ts b/src/api/providers/ollama.ts index 75895908e9cf..ab9df116aa84 100644 --- a/src/api/providers/ollama.ts +++ b/src/api/providers/ollama.ts @@ -14,12 +14,14 @@ import { ApiStream } from "../transform/stream" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" import { getApiRequestTimeout } from "./utils/timeout-config" +import { handleOpenAIError } from "./utils/openai-error-handler" type CompletionUsage = OpenAI.Chat.Completions.ChatCompletionChunk["usage"] export class OllamaHandler extends BaseProvider implements SingleCompletionHandler { protected options: ApiHandlerOptions private client: OpenAI + private readonly providerName = "Ollama" constructor(options: ApiHandlerOptions) { super() @@ -54,13 +56,18 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl ...(useR1Format ? convertToR1Format(messages) : convertToOpenAiMessages(messages)), ] - const stream = await this.client.chat.completions.create({ - model: this.getModel().id, - messages: openAiMessages, - temperature: this.options.modelTemperature ?? 0, - stream: true, - stream_options: { include_usage: true }, - }) + let stream + try { + stream = await this.client.chat.completions.create({ + model: this.getModel().id, + messages: openAiMessages, + temperature: this.options.modelTemperature ?? 0, + stream: true, + stream_options: { include_usage: true }, + }) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } const matcher = new XmlMatcher( "think", (chunk) => @@ -106,14 +113,19 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl try { const modelId = this.getModel().id const useR1Format = modelId.toLowerCase().includes("deepseek-r1") - const response = await this.client.chat.completions.create({ - model: this.getModel().id, - messages: useR1Format - ? convertToR1Format([{ role: "user", content: prompt }]) - : [{ role: "user", content: prompt }], - temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0), - stream: false, - }) + let response + try { + response = await this.client.chat.completions.create({ + model: this.getModel().id, + messages: useR1Format + ? convertToR1Format([{ role: "user", content: prompt }]) + : [{ role: "user", content: prompt }], + temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0), + stream: false, + }) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } return response.choices[0]?.message.content || "" } catch (error) { if (error instanceof Error) { diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 36158d770c17..501ff3d5f556 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -24,6 +24,7 @@ import { DEFAULT_HEADERS } from "./constants" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" import { getApiRequestTimeout } from "./utils/timeout-config" +import { handleOpenAIError } from "./utils/openai-error-handler" // TODO: Rename this to OpenAICompatibleHandler. Also, I think the // `OpenAINativeHandler` can subclass from this, since it's obviously @@ -31,6 +32,7 @@ import { getApiRequestTimeout } from "./utils/timeout-config" export class OpenAiHandler extends BaseProvider implements SingleCompletionHandler { protected options: ApiHandlerOptions private client: OpenAI + private readonly providerName = "OpenAI" constructor(options: ApiHandlerOptions) { super() @@ -174,10 +176,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl // Add max_tokens if needed this.addMaxTokensIfNeeded(requestOptions, modelInfo) - const stream = await this.client.chat.completions.create( - requestOptions, - isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, - ) + let stream + try { + stream = await this.client.chat.completions.create( + requestOptions, + isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, + ) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } const matcher = new XmlMatcher( "think", @@ -236,10 +243,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl // Add max_tokens if needed this.addMaxTokensIfNeeded(requestOptions, modelInfo) - const response = await this.client.chat.completions.create( - requestOptions, - this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, - ) + let response + try { + response = await this.client.chat.completions.create( + requestOptions, + this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, + ) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } yield { type: "text", @@ -281,15 +293,20 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl // Add max_tokens if needed this.addMaxTokensIfNeeded(requestOptions, modelInfo) - const response = await this.client.chat.completions.create( - requestOptions, - isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, - ) + let response + try { + response = await this.client.chat.completions.create( + requestOptions, + isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, + ) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } return response.choices[0]?.message.content || "" } catch (error) { if (error instanceof Error) { - throw new Error(`OpenAI completion error: ${error.message}`) + throw new Error(`${this.providerName} completion error: ${error.message}`) } throw error @@ -327,10 +344,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl // This allows O3 models to limit response length when includeMaxTokens is enabled this.addMaxTokensIfNeeded(requestOptions, modelInfo) - const stream = await this.client.chat.completions.create( - requestOptions, - methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, - ) + let stream + try { + stream = await this.client.chat.completions.create( + requestOptions, + methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, + ) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } yield* this.handleStreamResponse(stream) } else { @@ -352,10 +374,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl // This allows O3 models to limit response length when includeMaxTokens is enabled this.addMaxTokensIfNeeded(requestOptions, modelInfo) - const response = await this.client.chat.completions.create( - requestOptions, - methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, - ) + let response + try { + response = await this.client.chat.completions.create( + requestOptions, + methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, + ) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } yield { type: "text", diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 208ba563c644..580b17331194 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -25,6 +25,7 @@ import { getModelEndpoints } from "./fetchers/modelEndpointCache" import { DEFAULT_HEADERS } from "./constants" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler } from "../index" +import { handleOpenAIError } from "./utils/openai-error-handler" // Image generation types interface ImageGenerationResponse { @@ -85,6 +86,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH private client: OpenAI protected models: ModelRecord = {} protected endpoints: ModelRecord = {} + private readonly providerName = "OpenRouter" constructor(options: ApiHandlerOptions) { super() @@ -161,7 +163,12 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH ...(reasoning && { reasoning }), } - const stream = await this.client.chat.completions.create(completionParams) + let stream + try { + stream = await this.client.chat.completions.create(completionParams) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } let lastUsage: CompletionUsage | undefined = undefined @@ -259,7 +266,12 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH ...(reasoning && { reasoning }), } - const response = await this.client.chat.completions.create(completionParams) + let response + try { + response = await this.client.chat.completions.create(completionParams) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } if ("error" in response) { const error = response.error as { message?: string; code?: number } diff --git a/src/api/providers/requesty.ts b/src/api/providers/requesty.ts index 0661cebe0989..16aefae52861 100644 --- a/src/api/providers/requesty.ts +++ b/src/api/providers/requesty.ts @@ -16,6 +16,7 @@ import { getModels } from "./fetchers/modelCache" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" import { toRequestyServiceUrl } from "../../shared/utils/requesty" +import { handleOpenAIError } from "./utils/openai-error-handler" // Requesty usage includes an extra field for Anthropic use cases. // Safely cast the prompt token details section to the appropriate structure. @@ -42,6 +43,7 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan protected models: ModelRecord = {} private client: OpenAI private baseURL: string + private readonly providerName = "Requesty" constructor(options: ApiHandlerOptions) { super() @@ -49,9 +51,11 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan this.options = options this.baseURL = toRequestyServiceUrl(options.requestyBaseUrl) + const apiKey = this.options.requestyApiKey ?? "not-provided" + this.client = new OpenAI({ baseURL: this.baseURL, - apiKey: this.options.requestyApiKey ?? "not-provided", + apiKey: apiKey, defaultHeaders: DEFAULT_HEADERS, }) } @@ -126,7 +130,12 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan requesty: { trace_id: metadata?.taskId, extra: { mode: metadata?.mode } }, } - const stream = await this.client.chat.completions.create(completionParams) + let stream + try { + stream = await this.client.chat.completions.create(completionParams) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } let lastUsage: any = undefined for await (const chunk of stream) { @@ -162,7 +171,12 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan temperature: temperature, } - const response: OpenAI.Chat.ChatCompletion = await this.client.chat.completions.create(completionParams) + let response: OpenAI.Chat.ChatCompletion + try { + response = await this.client.chat.completions.create(completionParams) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } return response.choices[0]?.message.content || "" } } diff --git a/src/api/providers/utils/openai-error-handler.ts b/src/api/providers/utils/openai-error-handler.ts new file mode 100644 index 000000000000..90be81f7c434 --- /dev/null +++ b/src/api/providers/utils/openai-error-handler.ts @@ -0,0 +1,29 @@ +/** + * General error handler for OpenAI client errors + * Transforms technical errors into user-friendly messages + */ + +import i18n from "../../../i18n/setup" + +/** + * Handles OpenAI client errors and transforms them into user-friendly messages + * @param error - The error to handle + * @param providerName - The name of the provider for context in error messages + * @returns The original error or a transformed user-friendly error + */ +export function handleOpenAIError(error: unknown, providerName: string): Error { + if (error instanceof Error) { + const msg = error.message || "" + + // Invalid character/ByteString conversion error in API key + if (msg.includes("Cannot convert argument to a ByteString")) { + return new Error(i18n.t("common:errors.api.invalidKeyInvalidChars")) + } + + // For other Error instances, wrap with provider-specific prefix + return new Error(`${providerName} completion error: ${msg}`) + } + + // Non-Error: wrap with provider-specific prefix + return new Error(`${providerName} completion error: ${String(error)}`) +} diff --git a/src/api/providers/xai.ts b/src/api/providers/xai.ts index 596c9e89b8ca..7eb6e9866dd8 100644 --- a/src/api/providers/xai.ts +++ b/src/api/providers/xai.ts @@ -12,19 +12,24 @@ import { getModelParams } from "../transform/model-params" import { DEFAULT_HEADERS } from "./constants" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" +import { handleOpenAIError } from "./utils/openai-error-handler" const XAI_DEFAULT_TEMPERATURE = 0 export class XAIHandler extends BaseProvider implements SingleCompletionHandler { protected options: ApiHandlerOptions private client: OpenAI + private readonly providerName = "xAI" constructor(options: ApiHandlerOptions) { super() this.options = options + + const apiKey = this.options.xaiApiKey ?? "not-provided" + this.client = new OpenAI({ baseURL: "https://api.x.ai/v1", - apiKey: this.options.xaiApiKey ?? "not-provided", + apiKey: apiKey, defaultHeaders: DEFAULT_HEADERS, }) } @@ -48,15 +53,20 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler const { id: modelId, info: modelInfo, reasoning } = this.getModel() // Use the OpenAI-compatible API. - const stream = await this.client.chat.completions.create({ - model: modelId, - max_tokens: modelInfo.maxTokens, - temperature: this.options.modelTemperature ?? XAI_DEFAULT_TEMPERATURE, - messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], - stream: true, - stream_options: { include_usage: true }, - ...(reasoning && reasoning), - }) + let stream + try { + stream = await this.client.chat.completions.create({ + model: modelId, + max_tokens: modelInfo.maxTokens, + temperature: this.options.modelTemperature ?? XAI_DEFAULT_TEMPERATURE, + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], + stream: true, + stream_options: { include_usage: true }, + ...(reasoning && reasoning), + }) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } for await (const chunk of stream) { const delta = chunk.choices[0]?.delta @@ -78,12 +88,15 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler if (chunk.usage) { // Extract detailed token information if available // First check for prompt_tokens_details structure (real API response) - const promptDetails = "prompt_tokens_details" in chunk.usage ? chunk.usage.prompt_tokens_details : null; - const cachedTokens = promptDetails && "cached_tokens" in promptDetails ? promptDetails.cached_tokens : 0; + const promptDetails = "prompt_tokens_details" in chunk.usage ? chunk.usage.prompt_tokens_details : null + const cachedTokens = promptDetails && "cached_tokens" in promptDetails ? promptDetails.cached_tokens : 0 // Fall back to direct fields in usage (used in test mocks) - const readTokens = cachedTokens || ("cache_read_input_tokens" in chunk.usage ? (chunk.usage as any).cache_read_input_tokens : 0); - const writeTokens = "cache_creation_input_tokens" in chunk.usage ? (chunk.usage as any).cache_creation_input_tokens : 0; + const readTokens = + cachedTokens || + ("cache_read_input_tokens" in chunk.usage ? (chunk.usage as any).cache_read_input_tokens : 0) + const writeTokens = + "cache_creation_input_tokens" in chunk.usage ? (chunk.usage as any).cache_creation_input_tokens : 0 yield { type: "usage", @@ -108,11 +121,7 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler return response.choices[0]?.message.content || "" } catch (error) { - if (error instanceof Error) { - throw new Error(`xAI completion error: ${error.message}`) - } - - throw error + throw handleOpenAIError(error, this.providerName) } } } diff --git a/src/i18n/locales/ca/common.json b/src/i18n/locales/ca/common.json index 74b265f51313..583f82693a6a 100644 --- a/src/i18n/locales/ca/common.json +++ b/src/i18n/locales/ca/common.json @@ -107,6 +107,9 @@ "roo": { "authenticationRequired": "El proveïdor Roo requereix autenticació al núvol. Si us plau, inicieu sessió a Roo Code Cloud." }, + "api": { + "invalidKeyInvalidChars": "La clau API conté caràcters no vàlids." + }, "mode_import_failed": "Ha fallat la importació del mode: {{error}}" }, "warnings": { diff --git a/src/i18n/locales/de/common.json b/src/i18n/locales/de/common.json index 856e4e1dce18..2d1a2778edcf 100644 --- a/src/i18n/locales/de/common.json +++ b/src/i18n/locales/de/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Roo-Anbieter erfordert Cloud-Authentifizierung. Bitte melde dich bei Roo Code Cloud an." + }, + "api": { + "invalidKeyInvalidChars": "API-Schlüssel enthält ungültige Zeichen." } }, "warnings": { diff --git a/src/i18n/locales/en/common.json b/src/i18n/locales/en/common.json index e413bc0890ce..40a897ceb312 100644 --- a/src/i18n/locales/en/common.json +++ b/src/i18n/locales/en/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Roo provider requires cloud authentication. Please sign in to Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "API key contains invalid characters." } }, "warnings": { diff --git a/src/i18n/locales/es/common.json b/src/i18n/locales/es/common.json index 7b2b9a43476c..0b7c7e12ee8f 100644 --- a/src/i18n/locales/es/common.json +++ b/src/i18n/locales/es/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "El proveedor Roo requiere autenticación en la nube. Por favor, inicia sesión en Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "La clave API contiene caracteres inválidos." } }, "warnings": { diff --git a/src/i18n/locales/fr/common.json b/src/i18n/locales/fr/common.json index e9282a0b97f2..9b92cf7240c8 100644 --- a/src/i18n/locales/fr/common.json +++ b/src/i18n/locales/fr/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Le fournisseur Roo nécessite une authentification cloud. Veuillez vous connecter à Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "La clé API contient des caractères invalides." } }, "warnings": { diff --git a/src/i18n/locales/hi/common.json b/src/i18n/locales/hi/common.json index 3f5ab60413e9..f9bbed0dfcab 100644 --- a/src/i18n/locales/hi/common.json +++ b/src/i18n/locales/hi/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Roo प्रदाता को क्लाउड प्रमाणीकरण की आवश्यकता है। कृपया Roo Code Cloud में साइन इन करें।" + }, + "api": { + "invalidKeyInvalidChars": "API कुंजी में अमान्य वर्ण हैं।" } }, "warnings": { diff --git a/src/i18n/locales/id/common.json b/src/i18n/locales/id/common.json index 3c4305650342..147d88c4e74e 100644 --- a/src/i18n/locales/id/common.json +++ b/src/i18n/locales/id/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Penyedia Roo memerlukan autentikasi cloud. Silakan masuk ke Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "Kunci API mengandung karakter tidak valid." } }, "warnings": { diff --git a/src/i18n/locales/it/common.json b/src/i18n/locales/it/common.json index c19114baf1f5..c304896163e8 100644 --- a/src/i18n/locales/it/common.json +++ b/src/i18n/locales/it/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Il provider Roo richiede l'autenticazione cloud. Accedi a Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "La chiave API contiene caratteri non validi." } }, "warnings": { diff --git a/src/i18n/locales/ja/common.json b/src/i18n/locales/ja/common.json index d595484fa1f5..3f2863510814 100644 --- a/src/i18n/locales/ja/common.json +++ b/src/i18n/locales/ja/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Rooプロバイダーはクラウド認証が必要です。Roo Code Cloudにサインインしてください。" + }, + "api": { + "invalidKeyInvalidChars": "APIキーに無効な文字が含まれています。" } }, "warnings": { diff --git a/src/i18n/locales/ko/common.json b/src/i18n/locales/ko/common.json index 3209952c6d8e..d1f2ef9c44ad 100644 --- a/src/i18n/locales/ko/common.json +++ b/src/i18n/locales/ko/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Roo 제공업체는 클라우드 인증이 필요합니다. Roo Code Cloud에 로그인하세요." + }, + "api": { + "invalidKeyInvalidChars": "API 키에 유효하지 않은 문자가 포함되어 있습니다." } }, "warnings": { diff --git a/src/i18n/locales/nl/common.json b/src/i18n/locales/nl/common.json index c0c6ba35e9fa..a2b29d8df03c 100644 --- a/src/i18n/locales/nl/common.json +++ b/src/i18n/locales/nl/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Roo provider vereist cloud authenticatie. Log in bij Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "API-sleutel bevat ongeldige karakters." } }, "warnings": { diff --git a/src/i18n/locales/pl/common.json b/src/i18n/locales/pl/common.json index 475ba069eefe..45e0651fac6e 100644 --- a/src/i18n/locales/pl/common.json +++ b/src/i18n/locales/pl/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Dostawca Roo wymaga uwierzytelnienia w chmurze. Zaloguj się do Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "Klucz API zawiera nieprawidłowe znaki." } }, "warnings": { diff --git a/src/i18n/locales/pt-BR/common.json b/src/i18n/locales/pt-BR/common.json index 55a41fcf1b78..001457707e02 100644 --- a/src/i18n/locales/pt-BR/common.json +++ b/src/i18n/locales/pt-BR/common.json @@ -107,6 +107,9 @@ }, "roo": { "authenticationRequired": "O provedor Roo requer autenticação na nuvem. Faça login no Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "A chave API contém caracteres inválidos." } }, "warnings": { diff --git a/src/i18n/locales/ru/common.json b/src/i18n/locales/ru/common.json index 505998daa22c..3500a9a5add6 100644 --- a/src/i18n/locales/ru/common.json +++ b/src/i18n/locales/ru/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Провайдер Roo требует облачной аутентификации. Войдите в Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "API-ключ содержит недопустимые символы." } }, "warnings": { diff --git a/src/i18n/locales/tr/common.json b/src/i18n/locales/tr/common.json index 9b8af8d94cbe..4089cff21765 100644 --- a/src/i18n/locales/tr/common.json +++ b/src/i18n/locales/tr/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Roo sağlayıcısı bulut kimlik doğrulaması gerektirir. Lütfen Roo Code Cloud'a giriş yapın." + }, + "api": { + "invalidKeyInvalidChars": "API anahtarı geçersiz karakterler içeriyor." } }, "warnings": { diff --git a/src/i18n/locales/vi/common.json b/src/i18n/locales/vi/common.json index 4877f297adc7..ecf686f520e0 100644 --- a/src/i18n/locales/vi/common.json +++ b/src/i18n/locales/vi/common.json @@ -103,6 +103,9 @@ }, "roo": { "authenticationRequired": "Nhà cung cấp Roo yêu cầu xác thực đám mây. Vui lòng đăng nhập vào Roo Code Cloud." + }, + "api": { + "invalidKeyInvalidChars": "Khóa API chứa ký tự không hợp lệ." } }, "warnings": { diff --git a/src/i18n/locales/zh-CN/common.json b/src/i18n/locales/zh-CN/common.json index 5bac0d2847ea..9f4d24f6ebfb 100644 --- a/src/i18n/locales/zh-CN/common.json +++ b/src/i18n/locales/zh-CN/common.json @@ -108,6 +108,9 @@ }, "roo": { "authenticationRequired": "Roo 提供商需要云认证。请登录 Roo Code Cloud。" + }, + "api": { + "invalidKeyInvalidChars": "API 密钥包含无效字符。" } }, "warnings": { diff --git a/src/i18n/locales/zh-TW/common.json b/src/i18n/locales/zh-TW/common.json index 0f82f48d1372..d40b3e094f57 100644 --- a/src/i18n/locales/zh-TW/common.json +++ b/src/i18n/locales/zh-TW/common.json @@ -103,6 +103,9 @@ "roo": { "authenticationRequired": "Roo 提供者需要雲端認證。請登入 Roo Code Cloud。" }, + "api": { + "invalidKeyInvalidChars": "API 金鑰包含無效字元。" + }, "mode_import_failed": "匯入模式失敗:{{error}}" }, "warnings": {