diff --git a/.cursor/rules/llm.mdc b/.cursor/rules/llm.mdc index e96571bc40..c0350176fd 100644 --- a/.cursor/rules/llm.mdc +++ b/.cursor/rules/llm.mdc @@ -28,39 +28,20 @@ import { z } from "zod"; import { createScopedLogger } from "@/utils/logger"; import { chatCompletionObject } from "@/utils/llms"; import type { UserEmailWithAI } from "@/utils/llms/types"; -// Import other necessary types and utilities - -// 1. Create a scoped logger -const logger = createScopedLogger("feature-name"); - -// 2. Define output schema with zod -export const schema = z.object({ - // Define expected response fields - field1: z.string(), - field2: z.number(), - nested: z.object({ - subfield: z.string(), - }), - array_field: z.array(z.string()), -}); - -// 3. Create main function with typed options + export async function featureFunction(options: { inputData: InputType; user: UserEmailWithAI; }) { const { inputData, user } = options; - // 4. Add early validation/returns if (!inputData || [other validation conditions]) { logger.warn("Invalid input for feature function"); return null; } - // 5. Define system prompt const system = `[Detailed system prompt that defines the LLM's role and task]`; - // 6. Construct user prompt const prompt = `[User prompt with context and specific instructions] @@ -69,24 +50,23 @@ export async function featureFunction(options: { ${user.about ? `${user.about}` : ""}`; - // 7. Log inputs - logger.trace("Input", { system, prompt }); - - // 8. Call LLM with proper configuration const result = await chatCompletionObject({ userAi: user, system, prompt, - schema, + schema: z.object({ + field1: z.string(), + field2: z.number(), + nested: z.object({ + subfield: z.string(), + }), + array_field: z.array(z.string()), + }), userEmail: user.email, usageLabel: "Feature Name", }); - // 9. Log outputs - logger.trace("Output", { result }); - - // 10. Return validated result - return result.object; +return result.object; } ``` diff --git a/apps/web/app/(app)/[emailAccountId]/assistant/RulesPrompt.tsx b/apps/web/app/(app)/[emailAccountId]/assistant/RulesPrompt.tsx index 905007c9df..d9ccd58e76 100644 --- a/apps/web/app/(app)/[emailAccountId]/assistant/RulesPrompt.tsx +++ b/apps/web/app/(app)/[emailAccountId]/assistant/RulesPrompt.tsx @@ -309,7 +309,7 @@ function RulesPromptForm({ if (result?.data?.rulesPrompt) { editorRef.current?.appendText( - result?.data?.rulesPrompt, + `\n${result?.data?.rulesPrompt || ""}`, ); } else { toast.error("Error generating prompt"); diff --git a/apps/web/env.ts b/apps/web/env.ts index 61acd67271..87ee39747c 100644 --- a/apps/web/env.ts +++ b/apps/web/env.ts @@ -42,6 +42,11 @@ export const env = createEnv({ CHAT_LLM_MODEL: z.string().optional(), CHAT_OPENROUTER_PROVIDERS: z.string().optional(), // Comma-separated list of OpenRouter providers for chat (e.g., "Google Vertex,Anthropic") + OPENROUTER_BACKUP_MODEL: z + .string() + .optional() + .default("google/gemini-2.5-flash"), + OPENAI_API_KEY: z.string().optional(), ANTHROPIC_API_KEY: z.string().optional(), BEDROCK_ACCESS_KEY: z.string().optional(), @@ -160,9 +165,6 @@ export const env = createEnv({ NEXT_PUBLIC_BEDROCK_SONNET_MODEL: z .string() .default("us.anthropic.claude-3-7-sonnet-20250219-v1:0"), - NEXT_PUBLIC_BEDROCK_ANTHROPIC_BACKUP_MODEL: z - .string() - .default("us.anthropic.claude-3-5-sonnet-20241022-v2:0"), NEXT_PUBLIC_OLLAMA_MODEL: z.string().optional(), NEXT_PUBLIC_APP_HOME_PATH: z.string().default("/setup"), NEXT_PUBLIC_DUB_REFER_DOMAIN: z.string().optional(), @@ -216,8 +218,6 @@ export const env = createEnv({ NEXT_PUBLIC_AXIOM_TOKEN: process.env.NEXT_PUBLIC_AXIOM_TOKEN, NEXT_PUBLIC_BEDROCK_SONNET_MODEL: process.env.NEXT_PUBLIC_BEDROCK_SONNET_MODEL, - NEXT_PUBLIC_BEDROCK_ANTHROPIC_BACKUP_MODEL: - process.env.NEXT_PUBLIC_BEDROCK_ANTHROPIC_BACKUP_MODEL, NEXT_PUBLIC_OLLAMA_MODEL: process.env.NEXT_PUBLIC_OLLAMA_MODEL, NEXT_PUBLIC_APP_HOME_PATH: process.env.NEXT_PUBLIC_APP_HOME_PATH, NEXT_PUBLIC_DUB_REFER_DOMAIN: process.env.NEXT_PUBLIC_DUB_REFER_DOMAIN, diff --git a/apps/web/utils/actions/ai-rule.ts b/apps/web/utils/actions/ai-rule.ts index ba7c74bc60..68197da4ab 100644 --- a/apps/web/utils/actions/ai-rule.ts +++ b/apps/web/utils/actions/ai-rule.ts @@ -552,7 +552,7 @@ export const generateRulesPromptAction = actionClient const result = await aiGenerateRulesPrompt({ emailAccount, lastSentEmails, - snippets: snippetsResult.map((snippet) => snippet.text), + snippets: snippetsResult.snippets.map((snippet) => snippet.text), userLabels: labelsWithCounts.map((label) => label.label), }); diff --git a/apps/web/utils/ai/assistant/process-user-request.ts b/apps/web/utils/ai/assistant/process-user-request.ts index 9d0c7e3d18..da5383e7c9 100644 --- a/apps/web/utils/ai/assistant/process-user-request.ts +++ b/apps/web/utils/ai/assistant/process-user-request.ts @@ -1,7 +1,7 @@ -import { tool } from "ai"; +import { stepCountIs, tool } from "ai"; import { z } from "zod"; import { after } from "next/server"; -import { chatCompletionTools } from "@/utils/llms"; +import { createGenerateText } from "@/utils/llms"; import { createScopedLogger } from "@/utils/logger"; import { type Category, @@ -35,6 +35,7 @@ import { import { env } from "@/env"; import { posthogCaptureEvent } from "@/utils/posthog"; import { getUserCategoriesForNames } from "@/utils/category.server"; +import { getModel } from "@/utils/llms/model"; const logger = createScopedLogger("ai-fix-rules"); @@ -198,10 +199,18 @@ ${senderCategory || "No category"} } } - const result = await chatCompletionTools({ - userAi: emailAccount.user, - modelType: "chat", + const modelOptions = getModel(emailAccount.user, "chat"); + + const generateText = createGenerateText({ + userEmail: emailAccount.email, + label: "Process user request", + modelOptions, + }); + + const result = await generateText({ + ...modelOptions, messages: allMessages, + stopWhen: stepCountIs(5), tools: { update_conditional_operator: tool({ description: "Update the conditional operator of a rule", @@ -622,9 +631,6 @@ ${senderCategory || "No category"} // no execute function - invoking it will terminate the agent }), }, - maxSteps: 5, - label: "Fix Rule", - userEmail: emailAccount.email, }); const toolCalls = result.steps.flatMap((step) => step.toolCalls); diff --git a/apps/web/utils/ai/categorize-sender/ai-categorize-senders.ts b/apps/web/utils/ai/categorize-sender/ai-categorize-senders.ts index dcc995284f..cad15c296c 100644 --- a/apps/web/utils/ai/categorize-sender/ai-categorize-senders.ts +++ b/apps/web/utils/ai/categorize-sender/ai-categorize-senders.ts @@ -3,13 +3,9 @@ import { isDefined } from "@/utils/types"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import type { Category } from "@prisma/client"; import { formatCategoriesForPrompt } from "@/utils/ai/categorize-sender/format-categories"; -import { createScopedLogger } from "@/utils/logger"; import { extractEmailAddress } from "@/utils/email"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; - -const logger = createScopedLogger("ai-categorize-senders"); +import { createGenerateObject } from "@/utils/llms"; export const REQUEST_MORE_INFORMATION_CATEGORY = "RequestMoreInformation"; export const UNKNOWN_CATEGORY = "Unknown"; @@ -87,43 +83,22 @@ ${formatCategoriesForPrompt(categories)} - Accuracy is more important than completeness - Only use the categories provided above - Respond with "Unknown" if unsure +- Return your response in JSON format `; - logger.trace("Categorize senders", { system, prompt }); - - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // system, - // prompt, - // schema: categorizeSendersSchema, - // userEmail: emailAccount.email, - // usageLabel: "Categorize senders bulk", - // }); + const modelOptions = getModel(emailAccount.user, "chat"); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Categorize senders bulk", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema: categorizeSendersSchema, - providerOptions, - }); - - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Categorize senders bulk", - }); - } - - logger.trace("Categorize senders response", { - senders: aiResponse.object.senders, }); const matchedSenders = matchSendersWithFullEmail( @@ -143,8 +118,6 @@ ${formatCategoriesForPrompt(categories)} return r; }); - logger.trace("Categorize senders results", { results }); - return results; } diff --git a/apps/web/utils/ai/categorize-sender/ai-categorize-single-sender.ts b/apps/web/utils/ai/categorize-sender/ai-categorize-single-sender.ts index 33fe120793..e5bac0f9e8 100644 --- a/apps/web/utils/ai/categorize-sender/ai-categorize-single-sender.ts +++ b/apps/web/utils/ai/categorize-sender/ai-categorize-single-sender.ts @@ -1,21 +1,9 @@ -import { generateObject } from "ai"; import { z } from "zod"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import type { Category } from "@prisma/client"; import { formatCategoriesForPrompt } from "@/utils/ai/categorize-sender/format-categories"; -import { createScopedLogger } from "@/utils/logger"; import { getModel } from "@/utils/llms/model"; -import { saveAiUsage } from "@/utils/usage"; - -const logger = createScopedLogger("aiCategorizeSender"); - -const categorizeSenderSchema = z.object({ - rationale: z.string().describe("Keep it short. 1-2 sentences max."), - category: z.string(), - // possibleCategories: z - // .array(z.string()) - // .describe("Possible categories when the main category is unknown"), -}); +import { createGenerateObject } from "@/utils/llms"; export async function aiCategorizeSender({ emailAccount, @@ -55,45 +43,29 @@ ${formatCategoriesForPrompt(categories)} 3. If the category is clear, assign it. 4. If you're not certain, respond with "Unknown". 5. If multiple categories are possible, respond with "Unknown". +6. Return your response in JSON format. `; - logger.trace("aiCategorizeSender", { system, prompt }); - - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // system, - // prompt, - // schema: categorizeSenderSchema, - // userEmail: emailAccount.email, - // usageLabel: "Categorize sender", - // }); + const modelOptions = getModel(emailAccount.user); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Categorize sender", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, - schema: categorizeSenderSchema, - providerOptions, + schema: z.object({ + rationale: z.string().describe("Keep it short. 1-2 sentences max."), + category: z.string(), + }), }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Categorize sender", - }); - } - if (!categories.find((c) => c.name === aiResponse.object.category)) return null; - logger.trace("aiCategorizeSender result", { aiResponse: aiResponse.object }); - return aiResponse.object; } diff --git a/apps/web/utils/ai/choose-rule/ai-choose-args.ts b/apps/web/utils/ai/choose-rule/ai-choose-args.ts index 7eddfcdec5..fca0dce20f 100644 --- a/apps/web/utils/ai/choose-rule/ai-choose-args.ts +++ b/apps/web/utils/ai/choose-rule/ai-choose-args.ts @@ -1,12 +1,12 @@ import { z } from "zod"; import { InvalidArgumentError } from "ai"; -import { chatCompletionObject, withRetry } from "@/utils/llms"; +import { createGenerateText, withRetry } from "@/utils/llms"; import { stringifyEmail } from "@/utils/stringify-email"; import { createScopedLogger } from "@/utils/logger"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import type { EmailForLLM, RuleWithActions } from "@/utils/types"; import type { ActionType } from "@prisma/client"; -import type { ModelType } from "@/utils/llms/model"; +import { getModel, type ModelType } from "@/utils/llms/model"; /** * AI Argument Generator for Email Actions @@ -71,25 +71,35 @@ export async function aiGenerateArgs({ const prompt = getPrompt({ email, selectedRule }); logger.info("Calling chat completion tools", loggerOptions); - logger.trace("System and prompt", { system, prompt }); // logger.trace("Parameters:", zodToJsonSchema(parameters)); + const modelOptions = getModel(emailAccount.user, modelType); + + const generateText = createGenerateText({ + label: "Args for rule", + userEmail: emailAccount.email, + modelOptions, + }); + const aiResponse = await withRetry( () => - chatCompletionObject({ - userAi: emailAccount.user, - modelType, - prompt, + generateText({ + ...modelOptions, system, - schemaName: "Apply rule", - schemaDescription: "Apply the rule with the given arguments.", - schema: z.object( - Object.fromEntries( - parameters.map((p) => [`${p.type}-${p.actionId}`, p.parameters]), - ), - ), - usageLabel: "Args for rule", - userEmail: emailAccount.email, + prompt, + tools: { + apply_rule: { + description: "Apply the rule with the given arguments.", + inputSchema: z.object( + Object.fromEntries( + parameters.map((p) => [ + `${p.type}-${p.actionId}`, + p.parameters, + ]), + ), + ), + }, + }, }), { retryIf: (error: unknown) => InvalidArgumentError.isInstance(error), @@ -98,9 +108,17 @@ export async function aiGenerateArgs({ }, ); - const result = aiResponse.object; + const toolCall = aiResponse.toolCalls?.[0]; + + if (!toolCall?.input) { + logger.warn("No tool call found", { + ...loggerOptions, + aiResponse, + }); + return; + } - logger.trace("Result", { result }); + const result = toolCall.input; return result; } diff --git a/apps/web/utils/ai/choose-rule/ai-choose-rule.ts b/apps/web/utils/ai/choose-rule/ai-choose-rule.ts index 1a1bb85df3..2f2cfa7102 100644 --- a/apps/web/utils/ai/choose-rule/ai-choose-rule.ts +++ b/apps/web/utils/ai/choose-rule/ai-choose-rule.ts @@ -2,14 +2,10 @@ import { z } from "zod"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import { stringifyEmail } from "@/utils/stringify-email"; import type { EmailForLLM } from "@/utils/types"; -import { createScopedLogger } from "@/utils/logger"; import { getModel, type ModelType } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; +import { createGenerateObject } from "@/utils/llms"; // import { Braintrust } from "@/utils/braintrust"; -const logger = createScopedLogger("ai-choose-rule"); - // const braintrust = new Braintrust("choose-rule-2"); type GetAiResponseOptions = { @@ -79,44 +75,16 @@ Respond with a valid JSON object with the following fields: ${emailSection} `; - logger.trace("Input", { system, prompt }); - - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // modelType, - // messages: [ - // { - // role: "system", - // content: system, - // // This will cache if the user has a very long prompt. Although usually won't do anything as it's hard for this prompt to reach 1024 tokens - // // https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#cache-limitations - // // NOTE: Needs permission from AWS to use this. Otherwise gives error: "You do not have access to explicit prompt caching" - // // Currently only available to select customers: https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-caching.html - // // providerOptions: { - // // bedrock: { cachePoint: { type: "ephemeral" } }, - // // anthropic: { cacheControl: { type: "ephemeral" } }, - // // }, - // }, - // { - // role: "user", - // content: prompt, - // }, - // ], - // schema: z.object({ - // reason: z.string(), - // ruleName: z.string().nullish(), - // noMatchFound: z.boolean().nullish(), - // }), - // userEmail: emailAccount.email, - // usageLabel: "Choose rule", - // }); + const modelOptions = getModel(emailAccount.user, modelType); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Choose rule", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema: z.object({ @@ -124,21 +92,8 @@ ${emailSection} ruleName: z.string().nullish(), noMatchFound: z.boolean().nullish(), }), - providerOptions, }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Choose rule", - }); - } - - logger.trace("Response", aiResponse.object); - // braintrust.insertToDataset({ // id: email.id, // input: { diff --git a/apps/web/utils/ai/choose-rule/ai-detect-recurring-pattern.ts b/apps/web/utils/ai/choose-rule/ai-detect-recurring-pattern.ts index 09e0213db5..f715a8fd9e 100644 --- a/apps/web/utils/ai/choose-rule/ai-detect-recurring-pattern.ts +++ b/apps/web/utils/ai/choose-rule/ai-detect-recurring-pattern.ts @@ -2,12 +2,11 @@ import { z } from "zod"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import type { EmailForLLM } from "@/utils/types"; import { stringifyEmail } from "@/utils/stringify-email"; -import { createScopedLogger } from "@/utils/logger"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; +import { createGenerateObject } from "@/utils/llms"; +import { createScopedLogger } from "@/utils/logger"; -const logger = createScopedLogger("detect-recurring-pattern"); +const logger = createScopedLogger("ai-detect-recurring-pattern"); // const braintrust = new Braintrust("recurring-pattern-detection"); @@ -97,42 +96,22 @@ ${stringifyEmail(email, 500)} .join("\n")} `; - logger.trace("Input", { system, prompt }); - try { - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // system, - // prompt, - // schema, - // userEmail: emailAccount.email, - // usageLabel: "Detect recurring pattern", - // }); + const modelOptions = getModel(emailAccount.user, "chat"); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Detect recurring pattern", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema, - providerOptions, }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Detect recurring pattern", - }); - } - - logger.trace("Response", aiResponse.object); - // braintrust.insertToDataset({ // id: emails[0].id, // input: { diff --git a/apps/web/utils/ai/clean/ai-clean-select-labels.ts b/apps/web/utils/ai/clean/ai-clean-select-labels.ts index fc9a63e165..be8739cc11 100644 --- a/apps/web/utils/ai/clean/ai-clean-select-labels.ts +++ b/apps/web/utils/ai/clean/ai-clean-select-labels.ts @@ -1,11 +1,7 @@ import { z } from "zod"; import type { EmailAccountWithAI } from "@/utils/llms/types"; -import { createScopedLogger } from "@/utils/logger"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; - -const logger = createScopedLogger("ai/clean/select-labels"); +import { createGenerateObject } from "@/utils/llms"; const schema = z.object({ labels: z.array(z.string()).optional() }); @@ -25,46 +21,26 @@ Guidelines: - Do not create labels that weren't mentioned - If no labels are specified, return an empty array -Return the labels as an array of strings.`; +Return the labels as an array of strings in JSON format.`; const prompt = ` ${instructions} `.trim(); - logger.trace("Input", { system, prompt }); - - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // system, - // prompt, - // schema, - // userEmail: emailAccount.email, - // usageLabel: "Clean - Select Labels", - // }); + const modelOptions = getModel(emailAccount.user); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Clean - Select Labels", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema, - providerOptions, }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Clean - Select Labels", - }); - } - - logger.trace("Result", { response: aiResponse.object }); - return aiResponse.object.labels; } diff --git a/apps/web/utils/ai/clean/ai-clean.ts b/apps/web/utils/ai/clean/ai-clean.ts index dc0ee6acda..a9117095d0 100644 --- a/apps/web/utils/ai/clean/ai-clean.ts +++ b/apps/web/utils/ai/clean/ai-clean.ts @@ -1,17 +1,13 @@ import { z } from "zod"; -import { generateObject } from "ai"; import type { EmailAccountWithAI } from "@/utils/llms/types"; -import { createScopedLogger } from "@/utils/logger"; import type { EmailForLLM } from "@/utils/types"; import { stringifyEmailSimple } from "@/utils/stringify-email"; import { formatDateForLLM, formatRelativeTimeForLLM } from "@/utils/date"; import { preprocessBooleanLike } from "@/utils/zod"; import { getModel } from "@/utils/llms/model"; -import { saveAiUsage } from "@/utils/usage"; +import { createGenerateObject } from "@/utils/llms"; // import { Braintrust } from "@/utils/braintrust"; -const logger = createScopedLogger("ai/clean"); - // TODO: allow specific labels // Pass in prompt labels const schema = z.object({ @@ -24,7 +20,7 @@ const schema = z.object({ export async function aiClean({ emailAccount, - messageId, + messageId: _messageId, messages, instructions, skips, @@ -62,7 +58,9 @@ ${ ? `Do not archive emails that are actual financial records: receipts, payment confirmations, or invoices. However, do archive payment-related communications like overdue payment notifications, payment reminders, or subscription renewal notices.` : "" -}`.trim(); +} + +Return your response in JSON format.`.trim(); const message = `${stringifyEmailSimple(lastMessage)} ${ @@ -92,41 +90,21 @@ The current date is ${currentDate}. // ${user.about ? `${user.about}` : ""} - logger.trace("Input", { system, prompt }); + const modelOptions = getModel(emailAccount.user); - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // system, - // prompt, - // schema, - // userEmail: emailAccount.email, - // usageLabel: "Clean", - // }); - - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Clean", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema, - providerOptions, }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Clean", - }); - } - - logger.trace("Result", { response: aiResponse.object }); - // braintrust.insertToDataset({ // id: messageId, // input: { message, currentDate }, diff --git a/apps/web/utils/ai/digest/summarize-email-for-digest.ts b/apps/web/utils/ai/digest/summarize-email-for-digest.ts index 3eba41dbae..8a4cd69e08 100644 --- a/apps/web/utils/ai/digest/summarize-email-for-digest.ts +++ b/apps/web/utils/ai/digest/summarize-email-for-digest.ts @@ -4,8 +4,7 @@ import { createScopedLogger } from "@/utils/logger"; import type { EmailForLLM } from "@/utils/types"; import { stringifyEmailSimple } from "@/utils/stringify-email"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; +import { createGenerateObject } from "@/utils/llms"; export const schema = z.object({ type: z.enum(["structured", "unstructured"]).describe("Type of content"), @@ -58,7 +57,7 @@ Your task is to: **Formatting rules:** - Follow the schema provided separately (do not describe or return the schema). - Do not include HTML, markdown, or explanations. -- Return only the final JSON result (or "null"). +- Return only the final result in JSON format (or "null"). Now, classify and summarize the following email: `; @@ -70,44 +69,24 @@ ${stringifyEmailSimple(userMessageForPrompt)} Use this category as context to help interpret the email: ${ruleName}.`; - logger.trace("Input", { system, prompt }); - logger.info("Summarizing email for digest"); try { - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // system, - // prompt, - // schema, - // userEmail: emailAccount.email, - // usageLabel: "Summarize email", - // }); - - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const modelOptions = getModel(emailAccount.user); + + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Summarize email", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema, - providerOptions, }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Summarize email", - }); - } - - logger.trace("Result", { response: aiResponse.object }); - // Temporary logging to check the summarization output if (aiResponse.object.type === "unstructured") { logger.info("Summarized email as summary", { diff --git a/apps/web/utils/ai/example-matches/find-example-matches.ts b/apps/web/utils/ai/example-matches/find-example-matches.ts index da62569365..0acfe03ba0 100644 --- a/apps/web/utils/ai/example-matches/find-example-matches.ts +++ b/apps/web/utils/ai/example-matches/find-example-matches.ts @@ -1,9 +1,10 @@ -import { tool } from "ai"; +import { stepCountIs, tool } from "ai"; import { z } from "zod"; import type { gmail_v1 } from "@googleapis/gmail"; -import { chatCompletionTools } from "@/utils/llms"; +import { createGenerateText } from "@/utils/llms"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import { queryBatchMessages } from "@/utils/gmail/message"; +import { getModel } from "@/utils/llms/model"; const FIND_EXAMPLE_MATCHES = "findExampleMatches"; @@ -91,11 +92,19 @@ Remember, precision is crucial - only include matches you are absolutely sure ab }, }); - const aiResponse = await chatCompletionTools({ - userAi: emailAccount.user, + const modelOptions = getModel(emailAccount.user, "chat"); + + const generateText = createGenerateText({ + userEmail: emailAccount.email, + label: "Find example matches", + modelOptions, + }); + + const aiResponse = await generateText({ + ...modelOptions, system, prompt, - maxSteps: 10, + stopWhen: stepCountIs(10), tools: { listEmails: listEmailsTool(gmail), [FIND_EXAMPLE_MATCHES]: tool({ @@ -103,8 +112,6 @@ Remember, precision is crucial - only include matches you are absolutely sure ab inputSchema: findExampleMatchesSchema, }), }, - userEmail: emailAccount.email, - label: "Find example matches", }); const findExampleMatchesToolCalls = aiResponse.toolCalls.filter( diff --git a/apps/web/utils/ai/group/create-group.ts b/apps/web/utils/ai/group/create-group.ts index 3b21401b05..ae41809244 100644 --- a/apps/web/utils/ai/group/create-group.ts +++ b/apps/web/utils/ai/group/create-group.ts @@ -1,13 +1,12 @@ -import { tool } from "ai"; +import { stepCountIs, tool } from "ai"; import { z } from "zod"; import type { gmail_v1 } from "@googleapis/gmail"; -import { chatCompletionTools } from "@/utils/llms"; +import { createGenerateText } from "@/utils/llms"; import type { Group } from "@prisma/client"; import { queryBatchMessages } from "@/utils/gmail/message"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import { createScopedLogger } from "@/utils/logger"; - -// no longer in use. delete? +import { getModel } from "@/utils/llms/model"; const logger = createScopedLogger("aiCreateGroup"); @@ -59,11 +58,6 @@ export async function aiGenerateGroupItems( gmail: gmail_v1.Gmail, group: Pick, ): Promise> { - logger.info("aiGenerateGroupItems", { - name: group.name, - prompt: group.prompt, - }); - const system = `You are an AI assistant specializing in email management and organization. Your task is to create highly specific email groups based on user prompts and their actual email history. @@ -87,13 +81,19 @@ Key guidelines: 8. It's better to suggest fewer, more reliable criteria than to risk overgeneralization. 9. If the user explicitly excludes certain types of emails, ensure your suggestions do not include them.`; - logger.trace("aiGenerateGroupItems", { system, prompt }); + const modelOptions = getModel(emailAccount.user); + + const generateText = createGenerateText({ + userEmail: emailAccount.email, + label: "Create group", + modelOptions, + }); - const aiResponse = await chatCompletionTools({ - userAi: emailAccount.user, + const aiResponse = await generateText({ + ...modelOptions, system, prompt, - maxSteps: 10, + stopWhen: stepCountIs(10), tools: { listEmails: listEmailsTool(gmail), [GENERATE_GROUP_ITEMS]: tool({ @@ -101,16 +101,12 @@ Key guidelines: inputSchema: generateGroupItemsSchema, }), }, - userEmail: emailAccount.email, - label: "Create group", }); const generateGroupItemsToolCalls = aiResponse.toolCalls.filter( ({ toolName }) => toolName === GENERATE_GROUP_ITEMS, ); - logger.trace("aiGenerateGroupItems result", { generateGroupItemsToolCalls }); - const combinedArgs = generateGroupItemsToolCalls.reduce< z.infer >( @@ -133,11 +129,6 @@ async function verifyGroupItems( group: Pick, initialItems: z.infer, ): Promise> { - logger.info("verifyGroupItems", { - name: group.name, - prompt: group.prompt, - }); - const system = `You are an AI assistant specializing in email management and organization. Your task is to identify and remove any incorrect or overly broad criteria from the generated email group. One word subjects are almost always too broad and should be removed.`; @@ -160,11 +151,19 @@ Guidelines: 5. If all items are correct and specific, you can return empty arrays for removedSenders and removedSubjects. 6. When using listEmails, make separate calls for each sender and subject. Do not combine them in a single query.`; - const aiResponse = await chatCompletionTools({ - userAi: emailAccount.user, + const modelOptions = getModel(emailAccount.user); + + const generateText = createGenerateText({ + userEmail: emailAccount.email, + label: "Verify group criteria", + modelOptions, + }); + + const aiResponse = await generateText({ + ...modelOptions, system, prompt, - maxSteps: 10, + stopWhen: stepCountIs(10), tools: { listEmails: listEmailsTool(gmail), [VERIFY_GROUP_ITEMS]: tool({ @@ -172,8 +171,6 @@ Guidelines: inputSchema: verifyGroupItemsSchema, }), }, - userEmail: emailAccount.email, - label: "Verify group criteria", }); const verifyGroupItemsToolCalls = aiResponse.toolCalls.filter( diff --git a/apps/web/utils/ai/knowledge/extract-from-email-history.ts b/apps/web/utils/ai/knowledge/extract-from-email-history.ts index 952f02ad8f..28fcfa0abf 100644 --- a/apps/web/utils/ai/knowledge/extract-from-email-history.ts +++ b/apps/web/utils/ai/knowledge/extract-from-email-history.ts @@ -6,12 +6,11 @@ import { stringifyEmail } from "@/utils/stringify-email"; import { getTodayForLLM } from "@/utils/llms/helpers"; import { preprocessBooleanLike } from "@/utils/zod"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; +import { createGenerateObject } from "@/utils/llms"; const logger = createScopedLogger("EmailHistoryExtractor"); -const SYSTEM_PROMPT = `You are an email history analysis agent. Your task is to analyze the provided historical email threads and extract relevant information that would be helpful for drafting a response to the current email thread. +const system = `You are an email history analysis agent. Your task is to analyze the provided historical email threads and extract relevant information that would be helpful for drafting a response to the current email thread. Your task: 1. Analyze the historical email threads to understand relevant past context and interactions @@ -23,7 +22,9 @@ Provide a concise summary (max 500 characters) that captures the most important - Key unresolved points or questions from past exchanges - Any commitments or promises made in previous conversations - Important dates or deadlines established in past emails -- Notable preferences or patterns in communication`; +- Notable preferences or patterns in communication + +Return your response in JSON format.`; const getUserPrompt = ({ currentThreadMessages, @@ -61,7 +62,7 @@ ${getTodayForLLM()} Analyze the historical email threads and extract any relevant information that would be helpful for drafting a response to the current email thread. Provide a concise summary of the key historical context.`; }; -const extractionSchema = z.object({ +const schema = z.object({ hasHistoricalContext: z .preprocess(preprocessBooleanLike, z.boolean()) .describe("Whether there is any relevant historical context found."), @@ -89,50 +90,27 @@ export async function aiExtractFromEmailHistory({ if (historicalMessages.length === 0) return null; - const system = SYSTEM_PROMPT; const prompt = getUserPrompt({ currentThreadMessages, historicalMessages, emailAccount, }); - logger.trace("Input", { system, prompt }); - - // const result = await chatCompletionObject({ - // system, - // prompt, - // schema: extractionSchema, - // usageLabel: "Email history extraction", - // userAi: emailAccount.user, - // userEmail: emailAccount.email, - // modelType: "economy", - // }); + const modelOptions = getModel(emailAccount.user, "economy"); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - "economy", - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Email history extraction", + modelOptions, + }); const result = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, - schema: extractionSchema, - providerOptions, + schema, }); - if (result.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: result.usage, - provider, - model, - label: "Email history extraction", - }); - } - - logger.trace("Output", result.object); - return result.object.summary; } catch (error) { logger.error("Failed to extract information from email history", { error }); diff --git a/apps/web/utils/ai/knowledge/extract.ts b/apps/web/utils/ai/knowledge/extract.ts index 7919ecd2e0..d4dc669cde 100644 --- a/apps/web/utils/ai/knowledge/extract.ts +++ b/apps/web/utils/ai/knowledge/extract.ts @@ -3,12 +3,11 @@ import { createScopedLogger } from "@/utils/logger"; import type { Knowledge } from "@prisma/client"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; +import { createGenerateObject } from "@/utils/llms"; const logger = createScopedLogger("ai/knowledge/extract"); -const SYSTEM_PROMPT = `You are a knowledge extraction agent. Your task is to analyze the provided knowledge base entries and extract the most relevant information for drafting an email response, based ONLY on the provided knowledge base entries. +const system = `You are a knowledge extraction agent. Your task is to analyze the provided knowledge base entries and extract the most relevant information for drafting an email response, based ONLY on the provided knowledge base entries. Given: 1. A set of knowledge base entries (each with a title and content) @@ -95,46 +94,23 @@ export async function aiExtractRelevantKnowledge({ try { if (!knowledgeBase.length) return null; - const system = SYSTEM_PROMPT; const prompt = getUserPrompt({ knowledgeBase, emailContent, emailAccount }); - logger.trace("Input", { system, prompt: prompt.slice(0, 500) }); + const modelOptions = getModel(emailAccount.user, "economy"); - // const result = await chatCompletionObject({ - // system, - // prompt, - // schema: extractionSchema, - // usageLabel: "Knowledge extraction", - // userAi: emailAccount.user, - // userEmail: emailAccount.email, - // modelType: "economy", - // }); - - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - "economy", - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Knowledge extraction", + modelOptions, + }); const result = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema: extractionSchema, - providerOptions, }); - if (result.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: result.usage, - provider, - model, - label: "Knowledge extraction", - }); - } - - logger.trace("Output", result.object); - return result.object; } catch (error) { logger.error("Failed to extract knowledge", { error }); diff --git a/apps/web/utils/ai/knowledge/writing-style.ts b/apps/web/utils/ai/knowledge/writing-style.ts index aa11e1987d..57c07f4ff0 100644 --- a/apps/web/utils/ai/knowledge/writing-style.ts +++ b/apps/web/utils/ai/knowledge/writing-style.ts @@ -5,8 +5,7 @@ import type { EmailForLLM } from "@/utils/types"; import { truncate } from "@/utils/string"; import { removeExcessiveWhitespace } from "@/utils/string"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; +import { createGenerateObject } from "@/utils/llms"; const logger = createScopedLogger("writing-style-analyzer"); @@ -78,40 +77,20 @@ ${ : "" }`; - logger.trace("Input", { system, prompt }); + const modelOptions = getModel(emailAccount.user); - // const result = await chatCompletionObject({ - // userAi: emailAccount.user, - // system, - // prompt, - // schema, - // userEmail: emailAccount.email, - // usageLabel: "Writing Style Analysis", - // }); - - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Writing Style Analysis", + modelOptions, + }); const result = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema, - providerOptions, }); - if (result.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: result.usage, - provider, - model, - label: "Writing Style Analysis", - }); - } - - logger.trace("Output", { result }); - return result.object; } diff --git a/apps/web/utils/ai/reply/check-if-needs-reply.ts b/apps/web/utils/ai/reply/check-if-needs-reply.ts index 58dcb024ea..1357ed99b0 100644 --- a/apps/web/utils/ai/reply/check-if-needs-reply.ts +++ b/apps/web/utils/ai/reply/check-if-needs-reply.ts @@ -1,26 +1,13 @@ import { z } from "zod"; -import { chatCompletionObject } from "@/utils/llms"; +import { createGenerateObject } from "@/utils/llms"; import type { EmailAccountWithAI } from "@/utils/llms/types"; -import { createScopedLogger } from "@/utils/logger"; import type { EmailForLLM } from "@/utils/types"; import { stringifyEmailFromBody, stringifyEmailSimple, } from "@/utils/stringify-email"; import { preprocessBooleanLike } from "@/utils/zod"; - -const logger = createScopedLogger("check-if-needs-reply"); - -const schema = z.object({ - rationale: z - .string() - .describe("Brief one-line explanation for the decision."), - needsReply: z.preprocess( - preprocessBooleanLike, - z.boolean().describe("Whether a reply is needed."), - ), -}); -export type AICheckResult = z.infer; +import { getModel } from "@/utils/llms/model"; export async function aiCheckIfNeedsReply({ emailAccount, @@ -30,7 +17,7 @@ export async function aiCheckIfNeedsReply({ emailAccount: EmailAccountWithAI; messageToSend: EmailForLLM; threadContextMessages: EmailForLLM[]; -}): Promise { +}) { // If messageToSend somehow is null/undefined, default to no reply needed. if (!messageToSend) return { needsReply: false, rationale: "No message provided" }; @@ -60,21 +47,33 @@ ${threadContextMessages : "" } -Decide if the message we are sending needs a reply. +Decide if the message we are sending needs a reply. Respond with a JSON object with the following fields: +- rationale: Brief one-line explanation for the decision. +- needsReply: Whether a reply is needed. `.trim(); - logger.trace("Input", { system, prompt }); + const modelOptions = getModel(emailAccount.user); - const aiResponse = await chatCompletionObject({ - userAi: emailAccount.user, - system, - prompt, - schema, + const generateObject = createGenerateObject({ userEmail: emailAccount.email, - usageLabel: "Check if needs reply", + label: "Check if needs reply", + modelOptions, }); - logger.trace("Result", { response: aiResponse.object }); + const aiResponse = await generateObject({ + ...modelOptions, + system, + prompt, + schema: z.object({ + rationale: z + .string() + .describe("Brief one-line explanation for the decision."), + needsReply: z.preprocess( + preprocessBooleanLike, + z.boolean().describe("Whether a reply is needed."), + ), + }), + }); - return aiResponse.object as AICheckResult; + return aiResponse.object; } diff --git a/apps/web/utils/ai/reply/draft-with-knowledge.ts b/apps/web/utils/ai/reply/draft-with-knowledge.ts index 2c9755200b..82ea167528 100644 --- a/apps/web/utils/ai/reply/draft-with-knowledge.ts +++ b/apps/web/utils/ai/reply/draft-with-knowledge.ts @@ -1,13 +1,11 @@ import { z } from "zod"; import { createScopedLogger } from "@/utils/logger"; -import { chatCompletionObject } from "@/utils/llms"; +import { createGenerateObject } from "@/utils/llms"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import type { EmailForLLM } from "@/utils/types"; import { stringifyEmail } from "@/utils/stringify-email"; import { getTodayForLLM } from "@/utils/llms/helpers"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; const logger = createScopedLogger("DraftWithKnowledge"); @@ -25,6 +23,8 @@ Don't reply with a Subject. Only reply with the body of the email. IMPORTANT: Use placeholders sparingly! Only use them where you have limited information. Never use placeholders for the user's name. You do not need to sign off with the user's name. Do not add a signature. Do not invent information. For example, DO NOT offer to meet someone at a specific time as you don't know what time the user is available. + +Return your response in JSON format. `; const getUserPrompt = ({ @@ -131,41 +131,21 @@ export async function aiDraftWithKnowledge({ writingStyle, }); - logger.trace("Input", { system, prompt }); - - // const result = await chatCompletionObject({ - // system, - // prompt, - // schema: draftSchema, - // usageLabel: "Email draft with knowledge", - // userAi: emailAccount.user, - // userEmail: emailAccount.email, - // }); + const modelOptions = getModel(emailAccount.user); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Email draft with knowledge", + modelOptions, + }); const result = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema: draftSchema, - providerOptions, }); - if (result.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: result.usage, - provider, - model, - label: "Email draft with knowledge", - }); - } - - logger.trace("Output", result.object); - return result.object.reply; } catch (error) { logger.error("Failed to draft email with knowledge", { error }); diff --git a/apps/web/utils/ai/reply/generate-nudge.ts b/apps/web/utils/ai/reply/generate-nudge.ts index e9bf751ff9..4e335490b9 100644 --- a/apps/web/utils/ai/reply/generate-nudge.ts +++ b/apps/web/utils/ai/reply/generate-nudge.ts @@ -1,11 +1,9 @@ -import { chatCompletion } from "@/utils/llms"; +import { createGenerateText } from "@/utils/llms"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import { stringifyEmail } from "@/utils/stringify-email"; -import { createScopedLogger } from "@/utils/logger"; import type { EmailForLLM } from "@/utils/types"; import { getTodayForLLM } from "@/utils/llms/helpers"; - -const logger = createScopedLogger("generate-nudge"); +import { getModel } from "@/utils/llms/model"; export async function aiGenerateNudge({ messages, @@ -37,18 +35,19 @@ Write a brief follow-up email to politely nudge for a response. ${getTodayForLLM()} IMPORTANT: The person you're writing an email for is: ${messages.at(-1)?.from}.`; - logger.trace("Input", { system, prompt }); + const modelOptions = getModel(emailAccount.user, "chat"); - const response = await chatCompletion({ - userAi: emailAccount.user, - system, - prompt, + const generateText = createGenerateText({ + label: "Reply", userEmail: emailAccount.email, - modelType: "chat", - usageLabel: "Reply", + modelOptions, }); - logger.trace("Output", { response: response.text }); + const response = await generateText({ + ...modelOptions, + system, + prompt, + }); return response.text; } diff --git a/apps/web/utils/ai/rule/create-rule.ts b/apps/web/utils/ai/rule/create-rule.ts index a352a1cf9c..ba01db3455 100644 --- a/apps/web/utils/ai/rule/create-rule.ts +++ b/apps/web/utils/ai/rule/create-rule.ts @@ -3,12 +3,8 @@ import { type CreateOrUpdateRuleSchemaWithCategories, createRuleSchema, } from "@/utils/ai/rule/create-rule-schema"; -import { createScopedLogger } from "@/utils/logger"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; - -const logger = createScopedLogger("ai-create-rule"); +import { createGenerateText } from "@/utils/llms"; export async function aiCreateRule( instructions: string, @@ -18,42 +14,26 @@ export async function aiCreateRule( "You are an AI assistant that helps people manage their emails."; const prompt = `Generate a rule for these instructions:\n${instructions}`; - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // prompt, - // system, - // schemaName: "Generate rule", - // schemaDescription: "Generate a rule to handle the email", - // schema: createRuleSchema, - // userEmail: emailAccount.email, - // usageLabel: "Categorize rule", - // }); + const modelOptions = getModel(emailAccount.user); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateText = createGenerateText({ + userEmail: emailAccount.email, + label: "Categorize rule", + modelOptions, + }); - const aiResponse = await generateObject({ - model: llmModel, + const aiResponse = await generateText({ + ...modelOptions, system, prompt, - schema: createRuleSchema, - providerOptions, + tools: { + generate_rule: { + description: "Generate a rule to handle the email", + parameters: createRuleSchema, + }, + }, }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Categorize rule", - }); - } - - const result = aiResponse.object; - - logger.trace("Result", { result }); - - return result as CreateOrUpdateRuleSchemaWithCategories; + return aiResponse.toolCalls?.[0] + ?.input as CreateOrUpdateRuleSchemaWithCategories; } diff --git a/apps/web/utils/ai/rule/diff-rules.ts b/apps/web/utils/ai/rule/diff-rules.ts index 5771117d93..bd99e1b40d 100644 --- a/apps/web/utils/ai/rule/diff-rules.ts +++ b/apps/web/utils/ai/rule/diff-rules.ts @@ -1,12 +1,21 @@ -import { z } from "zod"; +import z from "zod"; import { createPatch } from "diff"; -import { generateObject } from "ai"; import type { EmailAccountWithAI } from "@/utils/llms/types"; -import { createScopedLogger } from "@/utils/logger"; import { getModel } from "@/utils/llms/model"; -import { saveAiUsage } from "@/utils/usage"; - -const logger = createScopedLogger("ai-diff-rules"); +import { createGenerateText } from "@/utils/llms"; + +const inputSchema = z.object({ + addedRules: z.array(z.string()).describe("The added rules"), + editedRules: z + .array( + z.object({ + oldRule: z.string().describe("The old rule"), + newRule: z.string().describe("The new rule"), + }), + ) + .describe("The edited rules"), + removedRules: z.array(z.string()).describe("The removed rules"), +}); export async function aiDiffRules({ emailAccount, @@ -46,78 +55,29 @@ IMPORTANT: Do not include a rule in more than one category. If a rule is edited, If a rule is edited, it is an edit and not a removal! Be extra careful to not make this mistake. `; - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // prompt, - // system, - // schemaName: "Diff rules", - // schemaDescription: - // "Analyze two prompt files and their diff to return the differences", - // schema: z.object({ - // addedRules: z.array(z.string()).describe("The added rules"), - // editedRules: z - // .array( - // z.object({ - // oldRule: z.string().describe("The old rule"), - // newRule: z.string().describe("The new rule"), - // }), - // ) - // .describe("The edited rules"), - // removedRules: z.array(z.string()).describe("The removed rules"), - // }), - // output: "array", - // userEmail: emailAccount.email, - // usageLabel: "Diff rules", - // }); - - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - "chat", - ); - - try { - const result = await generateObject({ - model: llmModel, - system, - prompt, - providerOptions, - schemaName: "Diff rules", - schemaDescription: - "Analyze two prompt files and their diff to return the differences. Return the result as JSON.", - schema: z.object({ - addedRules: z.array(z.string()).describe("The added rules"), - editedRules: z - .array( - z.object({ - oldRule: z.string().describe("The old rule"), - newRule: z.string().describe("The new rule"), - }), - ) - .describe("The edited rules"), - removedRules: z.array(z.string()).describe("The removed rules"), - }), - }); - - if (result.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: result.usage, - provider, - model, - label: "Diff rules", - }); - } - - const parsedRules = result.object; - - logger.trace("Result", { parsedRules }); - - return parsedRules; - } catch (error) { - logger.error("Error diffing rules", { - errorMessage: error instanceof Error ? error.message : "Unknown error", - error, - }); - throw error; - } + const modelOptions = getModel(emailAccount.user, "chat"); + + const generateObject = createGenerateText({ + userEmail: emailAccount.email, + label: "Diff rules", + modelOptions, + }); + + const result = await generateObject({ + ...modelOptions, + system, + prompt, + tools: { + diff_rules: { + description: + "Analyze two prompt files and their diff to return the differences", + inputSchema, + }, + }, + }); + + const parsedRules = result.toolCalls?.[0]?.input as z.infer< + typeof inputSchema + >; + return parsedRules; } diff --git a/apps/web/utils/ai/rule/find-existing-rules.ts b/apps/web/utils/ai/rule/find-existing-rules.ts index 6c5aeb1be0..96cea7d377 100644 --- a/apps/web/utils/ai/rule/find-existing-rules.ts +++ b/apps/web/utils/ai/rule/find-existing-rules.ts @@ -1,19 +1,22 @@ import { z } from "zod"; -import { generateText, tool } from "ai"; +import { tool } from "ai"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import type { Action, Rule } from "@prisma/client"; import { getModel } from "@/utils/llms/model"; -import { saveAiUsage } from "@/utils/usage"; -import { isDefined } from "@/utils/types"; - -const schema = z - .object({ - ruleId: z.string().describe("The id of the existing rule"), - promptNumber: z - .number() - .describe("The index of the prompt that matches the rule"), - }) - .describe("The existing rules that match the prompt rules"); +import { createGenerateText } from "@/utils/llms"; + +const schema = z.object({ + existingRules: z + .array( + z.object({ + ruleId: z.string().describe("The id of the existing rule"), + promptNumber: z + .number() + .describe("The index of the prompt that matches the rule"), + }), + ) + .describe("The existing rules that match the prompt rules"), +}); const findExistingRules = tool({ description: "Find the existing rules that match the prompt rules.", @@ -48,78 +51,47 @@ ${JSON.stringify(databaseRules, null, 2)} Please return the existing rules that match the prompt rules.`; - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // prompt, - // system, - // output: "array", - // schemaName: "Find existing rules", - // schemaDescription: "Find the existing rules that match the prompt rules", - // schema: z - // .object({ - // ruleId: z.string().describe("The id of the existing rule"), - // promptNumber: z - // .number() - // .describe("The index of the prompt that matches the rule"), - // }) - // .describe("The existing rules that match the prompt rules"), - // userEmail: emailAccount.email, - // usageLabel: "Find existing rules", - // }); - - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - "chat", - ); + const modelOptions = getModel(emailAccount.user, "chat"); + + const generateText = createGenerateText({ + userEmail: emailAccount.email, + label: "Find existing rules", + modelOptions, + }); const result = await generateText({ - model: llmModel, + ...modelOptions, system, prompt, - providerOptions, tools: { findExistingRules, }, }); - if (result.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: result.usage, - provider, - model, - label: "Find existing rules", - }); - } - - const existingRules = result.toolCalls - .map((toolCall) => { - if (toolCall.toolName !== "findExistingRules") return; - - const rule = toolCall.input as z.infer; - - const promptRule = rule.promptNumber - ? promptRules[rule.promptNumber - 1] - : null; - - const toRemove = promptRule - ? promptRulesToRemove.includes(promptRule) - : null; - - const toEdit = promptRule - ? promptRulesToEdit.find((r) => r.oldRule === promptRule) - : null; - - return { - rule: databaseRules.find((dbRule) => dbRule.id === rule.ruleId), - promptNumber: rule.promptNumber, - promptRule, - toRemove: !!toRemove, - toEdit: !!toEdit, - updatedPromptRule: toEdit?.newRule, - }; - }) - .filter(isDefined); + const parsedRules = result.toolCalls?.[0]?.input as z.infer; + + const existingRules = parsedRules.existingRules.map((rule) => { + const promptRule = rule.promptNumber + ? promptRules[rule.promptNumber - 1] + : null; + + const toRemove = promptRule + ? promptRulesToRemove.includes(promptRule) + : null; + + const toEdit = promptRule + ? promptRulesToEdit.find((r) => r.oldRule === promptRule) + : null; + + return { + rule: databaseRules.find((dbRule) => dbRule.id === rule.ruleId), + promptNumber: rule.promptNumber, + promptRule, + toRemove: !!toRemove, + toEdit: !!toEdit, + updatedPromptRule: toEdit?.newRule, + }; + }); return { editedRules: existingRules.filter((rule) => rule.toEdit), diff --git a/apps/web/utils/ai/rule/generate-prompt-on-delete-rule.ts b/apps/web/utils/ai/rule/generate-prompt-on-delete-rule.ts index 187cc4321b..b2c32d12c9 100644 --- a/apps/web/utils/ai/rule/generate-prompt-on-delete-rule.ts +++ b/apps/web/utils/ai/rule/generate-prompt-on-delete-rule.ts @@ -1,13 +1,9 @@ import { z } from "zod"; import type { EmailAccountWithAI } from "@/utils/llms/types"; -import { createScopedLogger } from "@/utils/logger"; import type { RuleWithRelations } from "./create-prompt-from-rule"; import { createPromptFromRule } from "./create-prompt-from-rule"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; - -const logger = createScopedLogger("generate-prompt-on-delete-rule"); +import { createGenerateObject } from "@/utils/llms"; const parameters = z.object({ updatedPrompt: z @@ -30,7 +26,7 @@ export async function generatePromptOnDeleteRule({ if (!deletedRulePrompt) return ""; const system = - "You are an AI assistant that helps maintain email management rule prompts. Your task is to update an existing prompt file by removing a specific rule while maintaining the exact format and style."; + "You are an AI assistant that helps maintain email management rule prompts. Your task is to update an existing prompt file by removing a specific rule while maintaining the exact format and style. Return the result in JSON format."; const prompt = `Here is the current prompt file that defines email management rules: @@ -52,42 +48,22 @@ ${deletedRulePrompt} 6. If you cannot find the rule in the current prompt, return the current prompt as is `; - logger.trace("Input", { system, prompt }); - - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // prompt, - // system, - // schema: parameters, - // userEmail: emailAccount.email, - // usageLabel: "Update prompt on delete rule", - // }); + const modelOptions = getModel(emailAccount.user, "chat"); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Update prompt on delete rule", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema: parameters, - providerOptions, }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Update prompt on delete rule", - }); - } - const parsedResponse = aiResponse.object; - logger.trace("Output", { updatedPrompt: parsedResponse.updatedPrompt }); - return parsedResponse.updatedPrompt; } diff --git a/apps/web/utils/ai/rule/generate-prompt-on-update-rule.ts b/apps/web/utils/ai/rule/generate-prompt-on-update-rule.ts index 129024875f..a2153ebe89 100644 --- a/apps/web/utils/ai/rule/generate-prompt-on-update-rule.ts +++ b/apps/web/utils/ai/rule/generate-prompt-on-update-rule.ts @@ -1,19 +1,9 @@ import { z } from "zod"; import type { EmailAccountWithAI } from "@/utils/llms/types"; -import { createScopedLogger } from "@/utils/logger"; import type { RuleWithRelations } from "./create-prompt-from-rule"; import { createPromptFromRule } from "./create-prompt-from-rule"; import { getModel } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; - -const logger = createScopedLogger("generate-prompt-on-update-rule"); - -const parameters = z.object({ - updatedPrompt: z - .string() - .describe("The updated prompt file with the rule updated"), -}); +import { createGenerateObject } from "@/utils/llms"; export async function generatePromptOnUpdateRule({ emailAccount, @@ -38,7 +28,7 @@ Your task is to update an existing prompt file by modifying a specific rule whil Requirements: 1. Maintain the exact same format and style as the original 2. Keep all other rules intact -3. Return only the updated prompt file +3. Return only the updated prompt file in JSON format 4. Ensure the output is properly formatted with consistent spacing and line breaks 5. If you cannot find a similar rule in the current prompt, append the new rule at the end.`; @@ -57,42 +47,24 @@ ${currentRulePrompt} ${updatedRulePrompt} `; - logger.trace("Input", { system, prompt }); + const modelOptions = getModel(emailAccount.user, "chat"); - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // prompt, - // system, - // schema: parameters, - // userEmail: emailAccount.email, - // usageLabel: "Update prompt on update rule", - // }); - - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Update prompt on update rule", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, - schema: parameters, - providerOptions, + schema: z.object({ + updatedPrompt: z + .string() + .describe("The updated prompt file with the rule updated"), + }), }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "Update prompt on update rule", - }); - } - - const parsedResponse = aiResponse.object; - - logger.trace("Output", { updatedPrompt: parsedResponse.updatedPrompt }); - - return parsedResponse.updatedPrompt; + return aiResponse.object.updatedPrompt; } diff --git a/apps/web/utils/ai/rule/generate-rules-prompt.ts b/apps/web/utils/ai/rule/generate-rules-prompt.ts index 25bad8f137..35a2fde3e6 100644 --- a/apps/web/utils/ai/rule/generate-rules-prompt.ts +++ b/apps/web/utils/ai/rule/generate-rules-prompt.ts @@ -1,9 +1,7 @@ import { z } from "zod"; -import { chatCompletionObject } from "@/utils/llms"; +import { createGenerateText } from "@/utils/llms"; import type { EmailAccountWithAI } from "@/utils/llms/types"; -import { createScopedLogger } from "@/utils/logger"; - -const logger = createScopedLogger("ai-generate-rules-prompt"); +import { getModel } from "@/utils/llms/model"; const parameters = z.object({ rules: z @@ -37,11 +35,9 @@ export async function aiGenerateRulesPrompt({ lastSentEmails: string[]; userLabels: string[]; snippets: string[]; -}): Promise { +}): Promise { const labelsList = userLabels - ? userLabels - .map((label) => ``) - .join("\n") + ? userLabels.map((label) => ``).join("\n") : "No labels found"; const hasSnippets = snippets.length > 0; @@ -104,47 +100,54 @@ ${ IMPORTANT: Our system can only perform email management actions (labeling, archiving, forwarding, drafting responses). We cannot add events to calendars or create todo list items. Do not suggest rules that include these actions. -Your response should only include the list of general rules. Aim for 3-10 broadly applicable rules that would be useful for this user's email management.`; - - logger.trace("generate-rules-prompt", { system, prompt }); - - const aiResponse = hasSnippets - ? await chatCompletionObject({ - userAi: emailAccount.user, - prompt, - system, - schemaName: "Generate rules", - schemaDescription: "Generate a list of email management rules", - schema: parametersSnippets, - userEmail: emailAccount.email, - usageLabel: "Generate rules prompt", - }) - : await chatCompletionObject({ - userAi: emailAccount.user, - prompt, - system, - schemaName: "Generate rules", - schemaDescription: "Generate a list of email management rules", - schema: parameters, - userEmail: emailAccount.email, - usageLabel: "Generate rules prompt", - }); - - const args = aiResponse?.object; - - logger.trace("Result", { args }); - - return parseRulesResponse(args, hasSnippets); +Your response should only include the list of general rules. Aim for 3-10 broadly applicable rules that would be useful for this user's email management. + +IMPORTANT: Do not create overly specific rules that only occur on a one off basis.`; + + const modelOptions = getModel(emailAccount.user, "chat"); + + const generateText = createGenerateText({ + userEmail: emailAccount.email, + label: "Generate rules prompt", + modelOptions, + }); + + const aiResponse = await generateText({ + ...modelOptions, + system, + prompt, + tools: { + generate_rules: { + description: "Generate a list of email management rules", + inputSchema: hasSnippets ? parametersSnippets : parameters, + }, + }, + }); + + const result = aiResponse?.toolCalls?.[0]?.input; + + if (!result) return; + + return parseRulesResponse(result, hasSnippets); } -function parseRulesResponse(args: unknown, hasSnippets: boolean): string[] { +function parseRulesResponse(result: unknown, hasSnippets: boolean): string[] { if (hasSnippets) { - const parsedRules = args as z.infer; - return parsedRules.rules.map(({ rule, snippet }) => - snippet ? `${rule}\n---\n${snippet}\n---\n` : rule, - ); + const parsedRules = result as z.infer; + return parsedRules.rules.map(({ rule, snippet }) => { + const formattedRule = `* ${rule}\n`; + if (snippet) return `${formattedRule}${formatSnippet(snippet)}\n`; + return formattedRule; + }); } - const parsedRules = args as z.infer; + const parsedRules = result as z.infer; return parsedRules.rules; } + +function formatSnippet(snippet: string) { + return snippet + .split("\n") + .map((line) => `> ${line}`) + .join("\n"); +} diff --git a/apps/web/utils/ai/rule/prompt-to-rules.ts b/apps/web/utils/ai/rule/prompt-to-rules.ts index 2086bcafba..85acb8f9dc 100644 --- a/apps/web/utils/ai/rule/prompt-to-rules.ts +++ b/apps/web/utils/ai/rule/prompt-to-rules.ts @@ -1,6 +1,6 @@ import { z } from "zod"; import { tool } from "ai"; -import { chatCompletionTools } from "@/utils/llms"; +import { createGenerateText } from "@/utils/llms"; import type { EmailAccountWithAI } from "@/utils/llms/types"; import { type CreateOrUpdateRuleSchemaWithCategories, @@ -9,6 +9,7 @@ import { } from "@/utils/ai/rule/create-rule-schema"; import { createScopedLogger } from "@/utils/logger"; import { convertMentionsToLabels } from "@/utils/mention"; +import { getModel } from "@/utils/llms/model"; const logger = createScopedLogger("ai-prompt-to-rules"); @@ -56,10 +57,16 @@ export async function aiPromptToRules({ ${cleanedPromptFile} `; - logger.trace("Input", { system, prompt }); + const modelOptions = getModel(emailAccount.user, "chat"); - const aiResponse = await chatCompletionTools({ - userAi: emailAccount.user, + const generateText = createGenerateText({ + userEmail: emailAccount.email, + label: "Prompt to rules", + modelOptions, + }); + + const aiResponse = await generateText({ + ...modelOptions, prompt, system, tools: { @@ -68,8 +75,6 @@ ${cleanedPromptFile} inputSchema: getSchema(), }), }, - userEmail: emailAccount.email, - label: "Prompt to rules", }); const toolCall = aiResponse.toolCalls.find( @@ -83,8 +88,6 @@ ${cleanedPromptFile} const rules = (toolCall.input as any)?.rules; - logger.trace("Output", { rules }); - return rules as CreateOrUpdateRuleSchemaWithCategories[]; } diff --git a/apps/web/utils/ai/snippets/find-snippets.ts b/apps/web/utils/ai/snippets/find-snippets.ts index e17b77336a..422559fcac 100644 --- a/apps/web/utils/ai/snippets/find-snippets.ts +++ b/apps/web/utils/ai/snippets/find-snippets.ts @@ -1,13 +1,9 @@ import { z } from "zod"; -import { generateObject } from "ai"; import { stringifyEmail } from "@/utils/stringify-email"; import type { EmailForLLM } from "@/utils/types"; import type { EmailAccountWithAI } from "@/utils/llms/types"; -import { createScopedLogger } from "@/utils/logger"; import { getModel } from "@/utils/llms/model"; -import { saveAiUsage } from "@/utils/usage"; - -const logger = createScopedLogger("AI Find Snippets"); +import { createGenerateObject } from "@/utils/llms"; export async function aiFindSnippets({ emailAccount, @@ -53,51 +49,27 @@ ${sentEmails .map((email) => `${stringifyEmail(email, 2000)}`) .join("\n")}`; - // const aiResponse = await chatCompletionObject({ - // userAi: emailAccount.user, - // prompt, - // system, - // output: "array", - // schemaName: "Find snippets", - // schemaDescription: "Snippets", - // schema: z.object({ - // text: z.string(), - // count: z.number(), - // }), - // userEmail: emailAccount.email ?? "", - // usageLabel: "ai-find-snippets", - // }); + const modelOptions = getModel(emailAccount.user, "chat"); - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - "chat", - ); + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "ai-find-snippets", + modelOptions, + }); const aiResponse = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, - providerOptions, - output: "array", - schemaName: "Find snippets", - schemaDescription: "Snippets", schema: z.object({ - text: z.string(), - count: z.number(), + snippets: z.array( + z.object({ + text: z.string(), + count: z.number(), + }), + ), }), }); - if (aiResponse.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: aiResponse.usage, - provider, - model, - label: "ai-find-snippets", - }); - } - - logger.trace("Result", { result: aiResponse.object }); - return aiResponse.object; } diff --git a/apps/web/utils/cold-email/is-cold-email.ts b/apps/web/utils/cold-email/is-cold-email.ts index 4c601e92c8..aeb14b2032 100644 --- a/apps/web/utils/cold-email/is-cold-email.ts +++ b/apps/web/utils/cold-email/is-cold-email.ts @@ -13,8 +13,7 @@ import { createScopedLogger } from "@/utils/logger"; import type { EmailForLLM } from "@/utils/types"; import type { EmailProvider } from "@/utils/email/provider"; import { getModel, type ModelType } from "@/utils/llms/model"; -import { generateObject } from "ai"; -import { saveAiUsage } from "@/utils/usage"; +import { createGenerateObject } from "@/utils/llms"; const logger = createScopedLogger("ai-cold-email"); @@ -203,49 +202,24 @@ Determine if the email is a cold email or not.`; ${stringifyEmail(email, 500)} `; - logger.trace("AI is cold email prompt", { system, prompt }); - - // const response = await chatCompletionObject({ - // userAi: emailAccount.user, - // system, - // prompt, - // schema: z.object({ - // coldEmail: z.boolean(), - // reason: z.string(), - // }), - // userEmail: emailAccount.email, - // usageLabel: "Cold email check", - // modelType, - // }); - - const { provider, model, llmModel, providerOptions } = getModel( - emailAccount.user, - modelType, - ); + const modelOptions = getModel(emailAccount.user, modelType); + + const generateObject = createGenerateObject({ + userEmail: emailAccount.email, + label: "Cold email check", + modelOptions, + }); const response = await generateObject({ - model: llmModel, + ...modelOptions, system, prompt, schema: z.object({ coldEmail: z.boolean(), reason: z.string(), }), - providerOptions, }); - if (response.usage) { - await saveAiUsage({ - email: emailAccount.email, - usage: response.usage, - provider, - model, - label: "Cold email check", - }); - } - - logger.trace("AI is cold email response", { response: response.object }); - return response.object; } diff --git a/apps/web/utils/llms/config.ts b/apps/web/utils/llms/config.ts index 6692af38b0..0fa9e7004d 100644 --- a/apps/web/utils/llms/config.ts +++ b/apps/web/utils/llms/config.ts @@ -19,8 +19,6 @@ export const Model = { GPT_4O_MINI: "gpt-4o-mini", CLAUDE_3_7_SONNET_BEDROCK: env.NEXT_PUBLIC_BEDROCK_SONNET_MODEL, CLAUDE_4_SONNET_BEDROCK: "us.anthropic.claude-sonnet-4-20250514-v1:0", - // BEDROCK_ANTHROPIC_BACKUP_MODEL: - // env.NEXT_PUBLIC_BEDROCK_ANTHROPIC_BACKUP_MODEL, CLAUDE_3_7_SONNET_ANTHROPIC: "claude-3-7-sonnet-20250219", CLAUDE_3_5_SONNET_OPENROUTER: "anthropic/claude-3.5-sonnet", CLAUDE_3_7_SONNET_OPENROUTER: "anthropic/claude-3.7-sonnet", diff --git a/apps/web/utils/llms/index.ts b/apps/web/utils/llms/index.ts index 9e317edf1a..7d24bb04a5 100644 --- a/apps/web/utils/llms/index.ts +++ b/apps/web/utils/llms/index.ts @@ -1,4 +1,3 @@ -import type { z } from "zod"; import { APICallError, type ModelMessage, @@ -12,11 +11,9 @@ import { stepCountIs, type StreamTextOnFinishCallback, type StreamTextOnStepFinishCallback, - type ToolSet, } from "ai"; -import { env } from "@/env"; +import type { LanguageModelV2 } from "@ai-sdk/provider"; import { saveAiUsage } from "@/utils/usage"; -import { Provider } from "@/utils/llms/config"; import type { UserAIFields } from "@/utils/llms/types"; import { addUserErrorMessage, ErrorType } from "@/utils/error-messages"; import { @@ -41,127 +38,129 @@ const commonOptions: { providerOptions?: Record>; } = { experimental_telemetry: { isEnabled: true } }; -export async function chatCompletion({ - userAi, - modelType = "default", - prompt, - system, +export function createGenerateText({ userEmail, - usageLabel, + label, + modelOptions, }: { - userAi: UserAIFields; - modelType?: ModelType; - prompt: string; - system?: string; userEmail: string; - usageLabel: string; -}) { - try { - const { provider, model, llmModel, providerOptions } = getModel( - userAi, - modelType, - ); - - const result = await generateText({ - model: llmModel, - prompt, - system, - providerOptions, - ...commonOptions, - }); - - if (result.usage) { - await saveAiUsage({ - email: userEmail, - usage: result.usage, - provider, - model, - label: usageLabel, + label: string; + modelOptions: ReturnType; +}): typeof generateText { + return async (...args) => { + const [options, ...restArgs] = args; + + const generate = async (model: LanguageModelV2) => { + logger.trace("Generating text", { + label, + system: options.system?.slice(0, 200), + prompt: options.prompt?.slice(0, 200), }); - } - return result; - } catch (error) { - await handleError(error, userEmail); - throw error; - } -} + const result = await generateText( + { + ...options, + ...commonOptions, + model, + }, + ...restArgs, + ); -type ChatCompletionObjectArgs = { - userAi: UserAIFields; - modelType?: ModelType; - schema: z.Schema; - schemaName?: string; - schemaDescription?: string; - output?: "object" | "array" | "enum" | "no-schema"; - userEmail: string; - usageLabel: string; -} & ( - | { - system?: string; - prompt: string; - messages?: never; - } - | { - system?: never; - prompt?: never; - messages: ModelMessage[]; - } -); + if (result.usage) { + await saveAiUsage({ + email: userEmail, + usage: result.usage, + provider: modelOptions.provider, + model: modelOptions.modelName, + label, + }); + } + + if (args[0].tools) { + const toolCallInput = result.toolCalls?.[0]?.input; + logger.trace("Result", { + label, + result: toolCallInput, + }); + } + + return result; + }; -export async function chatCompletionObject( - options: ChatCompletionObjectArgs, -) { - return withBackupModel(chatCompletionObjectInternal, options); + try { + return await generate(modelOptions.model); + } catch (error) { + if ( + modelOptions.backupModel && + (isServiceUnavailableError(error) || isAWSThrottlingError(error)) + ) { + logger.warn("Using backup model", { + error, + model: modelOptions.backupModel, + }); + + try { + return await generate(modelOptions.backupModel); + } catch (error) { + await handleError(error, userEmail); + throw error; + } + } + + await handleError(error, userEmail); + throw error; + } + }; } -async function chatCompletionObjectInternal({ - userAi, - modelType, - system, - prompt, - messages, - schema, - schemaName, - schemaDescription, - output = "object", +export function createGenerateObject({ userEmail, - usageLabel, -}: ChatCompletionObjectArgs) { - try { - const { provider, model, llmModel, providerOptions } = getModel( - userAi, - modelType, - ); - - const result = await generateObject({ - model: llmModel, - system, - prompt, - messages, - schema, - schemaName, - schemaDescription, - output, - providerOptions, - ...commonOptions, - }); - - if (result.usage) { - await saveAiUsage({ - email: userEmail, - usage: result.usage, - provider, - model, - label: usageLabel, + label, + modelOptions, +}: { + userEmail: string; + label: string; + modelOptions: ReturnType; +}): typeof generateObject { + return async (...args) => { + try { + const [options, ...restArgs] = args; + + logger.trace("Generating object", { + label, + system: options.system?.slice(0, 200), + prompt: options.prompt?.slice(0, 200), }); - } - return result; - } catch (error) { - await handleError(error, userEmail); - throw error; - } + const result = await generateObject( + { + ...options, + ...commonOptions, + }, + ...restArgs, + ); + + if (result.usage) { + await saveAiUsage({ + email: userEmail, + usage: result.usage, + provider: modelOptions.provider, + model: modelOptions.modelName, + label, + }); + } + + logger.trace("Generated object", { + label, + result: result.object, + }); + + return result; + } catch (error) { + await handleError(error, userEmail); + throw error; + } + }; } export async function chatCompletionStream({ @@ -189,13 +188,13 @@ export async function chatCompletionStream({ onFinish?: StreamTextOnFinishCallback>; onStepFinish?: StreamTextOnStepFinishCallback>; }) { - const { provider, model, llmModel, providerOptions } = getModel( + const { provider, model, modelName, providerOptions } = getModel( userAi, modelType, ); const result = streamText({ - model: llmModel, + model, system, prompt, messages, @@ -209,7 +208,7 @@ export async function chatCompletionStream({ const usagePromise = saveAiUsage({ email: userEmail, provider, - model, + model: modelName, usage: result.usage, label, }); @@ -253,132 +252,52 @@ export async function chatCompletionStream({ return result; } -type ChatCompletionToolsArgs = { - userAi: UserAIFields; - modelType?: ModelType; - tools?: TOOLS; - maxSteps?: number; - label: string; - userEmail: string; -} & ( - | { - system?: string; - prompt: string; - messages?: never; +async function handleError(error: unknown, userEmail: string) { + logger.error("Error in LLM call", { error, userEmail }); + + if (APICallError.isInstance(error)) { + if (isIncorrectOpenAIAPIKeyError(error)) { + return await addUserErrorMessage( + userEmail, + ErrorType.INCORRECT_OPENAI_API_KEY, + error.message, + ); } - | { - system?: never; - prompt?: never; - messages: ModelMessage[]; + + if (isInvalidOpenAIModelError(error)) { + return await addUserErrorMessage( + userEmail, + ErrorType.INVALID_OPENAI_MODEL, + error.message, + ); } -); -export async function chatCompletionTools( - options: ChatCompletionToolsArgs, -) { - return withBackupModel(chatCompletionToolsInternal, options); -} + if (isOpenAIAPIKeyDeactivatedError(error)) { + return await addUserErrorMessage( + userEmail, + ErrorType.OPENAI_API_KEY_DEACTIVATED, + error.message, + ); + } -async function chatCompletionToolsInternal({ - userAi, - modelType, - system, - prompt, - messages, - tools, - maxSteps, - label, - userEmail, -}: ChatCompletionToolsArgs) { - try { - const { provider, model, llmModel, providerOptions } = getModel( - userAi, - modelType, - ); - - const result = await generateText({ - model: llmModel, - tools, - toolChoice: "required", - system, - prompt, - messages, - stopWhen: maxSteps ? stepCountIs(maxSteps) : undefined, - providerOptions, - ...commonOptions, - }); - - if (result.usage) { - await saveAiUsage({ - email: userEmail, - usage: result.usage, - provider, - model, - label, - }); + if (RetryError.isInstance(error) && isOpenAIRetryError(error)) { + return await addUserErrorMessage( + userEmail, + ErrorType.OPENAI_RETRY_ERROR, + error.message, + ); } - return result; - } catch (error) { - await handleError(error, userEmail); - throw error; + if (isAnthropicInsufficientBalanceError(error)) { + return await addUserErrorMessage( + userEmail, + ErrorType.ANTHROPIC_INSUFFICIENT_BALANCE, + error.message, + ); + } } } -// not in use atm -// async function _streamCompletionTools({ -// userAi, -// modelType, -// prompt, -// system, -// tools, -// maxSteps, -// userEmail, -// label, -// onFinish, -// }: { -// userAi: UserAIFields; -// modelType?: ModelType; -// prompt: string; -// system?: string; -// tools: Record; -// maxSteps?: number; -// userEmail: string; -// label: string; -// onFinish?: (text: string) => Promise; -// }) { -// const { provider, model, llmModel, providerOptions } = getModel( -// userAi, -// modelType, -// ); - -// const result = streamText({ -// model: llmModel, -// tools, -// toolChoice: "required", -// prompt, -// system, -// stopWhen: maxSteps ? stepCountIs(maxSteps) : undefined, -// providerOptions, -// ...commonOptions, -// onFinish: async ({ usage, text }) => { -// const usagePromise = saveAiUsage({ -// email: userEmail, -// provider, -// model, -// usage, -// label, -// }); - -// const finishPromise = onFinish?.(text); - -// await Promise.all([usagePromise, finishPromise]); -// }, -// }); - -// return result; -// } - // NOTE: Think we can just switch this out for p-retry that we already use in the project export async function withRetry( fn: () => Promise, @@ -420,72 +339,3 @@ export async function withRetry( throw lastError; } - -// Helps when service is unavailable / throttled / rate limited -async function withBackupModel( - fn: (args: Args) => Promise, - args: Args, -): Promise { - try { - return await fn(args); - } catch (error) { - if ( - env.USE_BACKUP_MODEL && - (isServiceUnavailableError(error) || isAWSThrottlingError(error)) - ) { - return await fn({ - ...args, - userAi: { - aiProvider: Provider.ANTHROPIC, - aiModel: env.NEXT_PUBLIC_BEDROCK_ANTHROPIC_BACKUP_MODEL, - aiApiKey: args.userAi.aiApiKey, - }, - }); - } - throw error; - } -} - -async function handleError(error: unknown, userEmail: string) { - if (APICallError.isInstance(error)) { - if (isIncorrectOpenAIAPIKeyError(error)) { - return await addUserErrorMessage( - userEmail, - ErrorType.INCORRECT_OPENAI_API_KEY, - error.message, - ); - } - - if (isInvalidOpenAIModelError(error)) { - return await addUserErrorMessage( - userEmail, - ErrorType.INVALID_OPENAI_MODEL, - error.message, - ); - } - - if (isOpenAIAPIKeyDeactivatedError(error)) { - return await addUserErrorMessage( - userEmail, - ErrorType.OPENAI_API_KEY_DEACTIVATED, - error.message, - ); - } - - if (RetryError.isInstance(error) && isOpenAIRetryError(error)) { - return await addUserErrorMessage( - userEmail, - ErrorType.OPENAI_RETRY_ERROR, - error.message, - ); - } - - if (isAnthropicInsufficientBalanceError(error)) { - return await addUserErrorMessage( - userEmail, - ErrorType.ANTHROPIC_INSUFFICIENT_BALANCE, - error.message, - ); - } - } -} diff --git a/apps/web/utils/llms/model.test.ts b/apps/web/utils/llms/model.test.ts index 0a3487945e..2d10771519 100644 --- a/apps/web/utils/llms/model.test.ts +++ b/apps/web/utils/llms/model.test.ts @@ -85,7 +85,7 @@ describe("Models", () => { const result = getModel(userAi); expect(result.provider).toBe(Provider.OPEN_AI); - expect(result.model).toBe("gpt-4o"); + expect(result.modelName).toBe("gpt-4o"); }); it("should use user's provider and model when API key is provided", () => { @@ -97,7 +97,7 @@ describe("Models", () => { const result = getModel(userAi); expect(result.provider).toBe(Provider.GOOGLE); - expect(result.model).toBe(Model.GEMINI_1_5_PRO); + expect(result.modelName).toBe(Model.GEMINI_1_5_PRO); }); it("should use user's API key with default provider when only API key is provided", () => { @@ -109,7 +109,7 @@ describe("Models", () => { const result = getModel(userAi); expect(result.provider).toBe(Provider.OPEN_AI); - expect(result.model).toBe("gpt-4o"); + expect(result.modelName).toBe("gpt-4o"); }); it("should configure Google model correctly", () => { @@ -121,8 +121,8 @@ describe("Models", () => { const result = getModel(userAi); expect(result.provider).toBe(Provider.GOOGLE); - expect(result.model).toBe(Model.GEMINI_1_5_PRO); - expect(result.llmModel).toBeDefined(); + expect(result.modelName).toBe(Model.GEMINI_1_5_PRO); + expect(result.model).toBeDefined(); }); it("should configure Groq model correctly", () => { @@ -134,8 +134,8 @@ describe("Models", () => { const result = getModel(userAi); expect(result.provider).toBe(Provider.GROQ); - expect(result.model).toBe(Model.GROQ_LLAMA_3_3_70B); - expect(result.llmModel).toBeDefined(); + expect(result.modelName).toBe(Model.GROQ_LLAMA_3_3_70B); + expect(result.model).toBeDefined(); }); it("should configure OpenRouter model correctly", () => { @@ -147,8 +147,8 @@ describe("Models", () => { const result = getModel(userAi); expect(result.provider).toBe(Provider.OPENROUTER); - expect(result.model).toBe(Model.GROQ_LLAMA_3_3_70B); - expect(result.llmModel).toBeDefined(); + expect(result.modelName).toBe(Model.GROQ_LLAMA_3_3_70B); + expect(result.model).toBeDefined(); }); // it("should configure Ollama model correctly", () => { @@ -160,8 +160,8 @@ describe("Models", () => { // const result = getModel(userAi); // expect(result.provider).toBe(Provider.OLLAMA); - // expect(result.model).toBe("llama3"); - // expect(result.llmModel).toBeDefined(); + // expect(result.modelName).toBe("llama3"); + // expect(result.model).toBeDefined(); // }); it("should configure Anthropic model correctly without Bedrock credentials", () => { @@ -176,8 +176,8 @@ describe("Models", () => { const result = getModel(userAi); expect(result.provider).toBe(Provider.ANTHROPIC); - expect(result.model).toBe(Model.CLAUDE_3_7_SONNET_ANTHROPIC); - expect(result.llmModel).toBeDefined(); + expect(result.modelName).toBe(Model.CLAUDE_3_7_SONNET_ANTHROPIC); + expect(result.model).toBeDefined(); }); it("should configure Anthropic model with Bedrock when Bedrock credentials exist", () => { @@ -192,8 +192,8 @@ describe("Models", () => { const result = getModel(userAi); expect(result.provider).toBe(Provider.ANTHROPIC); - expect(result.model).toBe(Model.CLAUDE_3_7_SONNET_BEDROCK); - expect(result.llmModel).toBeDefined(); + expect(result.modelName).toBe(Model.CLAUDE_3_7_SONNET_BEDROCK); + expect(result.model).toBeDefined(); }); it("should throw error for unsupported provider", () => { @@ -219,7 +219,7 @@ describe("Models", () => { // const result = getModel(userAi, "chat"); // expect(result.provider).toBe(Provider.OPENROUTER); - // expect(result.model).toBe("moonshotai/kimi-k2"); + // expect(result.modelName).toBe("moonshotai/kimi-k2"); // }); // it("should use OpenRouter with provider options for chat", () => { @@ -236,7 +236,7 @@ describe("Models", () => { // const result = getModel(userAi, "chat"); // expect(result.provider).toBe(Provider.OPENROUTER); - // expect(result.model).toBe("moonshotai/kimi-k2"); + // expect(result.modelName).toBe("moonshotai/kimi-k2"); // expect(result.providerOptions?.openrouter?.provider?.order).toEqual([ // "Google Vertex", // "Anthropic", @@ -257,7 +257,7 @@ describe("Models", () => { const result = getModel(userAi, "economy"); expect(result.provider).toBe(Provider.OPENROUTER); - expect(result.model).toBe("google/gemini-2.5-flash-preview-05-20"); + expect(result.modelName).toBe("google/gemini-2.5-flash-preview-05-20"); }); it("should use OpenRouter with provider options for economy", () => { @@ -275,7 +275,7 @@ describe("Models", () => { const result = getModel(userAi, "economy"); expect(result.provider).toBe(Provider.OPENROUTER); - expect(result.model).toBe("google/gemini-2.5-flash-preview-05-20"); + expect(result.modelName).toBe("google/gemini-2.5-flash-preview-05-20"); expect(result.providerOptions?.openrouter?.provider?.order).toEqual([ "Google Vertex", "Anthropic", @@ -291,7 +291,7 @@ describe("Models", () => { const result = getModel(userAi, "default"); expect(result.provider).toBe(Provider.OPEN_AI); - expect(result.model).toBe("gpt-4o"); + expect(result.modelName).toBe("gpt-4o"); }); it("should use OpenRouter with provider options for default model", () => { @@ -308,7 +308,7 @@ describe("Models", () => { const result = getModel(userAi, "default"); expect(result.provider).toBe(Provider.OPENROUTER); - expect(result.model).toBe("anthropic/claude-3.5-sonnet"); + expect(result.modelName).toBe("anthropic/claude-3.5-sonnet"); expect(result.providerOptions?.openrouter?.provider?.order).toEqual([ "Google Vertex", "Anthropic", diff --git a/apps/web/utils/llms/model.ts b/apps/web/utils/llms/model.ts index f247a3eb11..a5c8c69e2e 100644 --- a/apps/web/utils/llms/model.ts +++ b/apps/web/utils/llms/model.ts @@ -15,16 +15,24 @@ const logger = createScopedLogger("llms/model"); export type ModelType = "default" | "economy" | "chat"; +type SelectModel = { + provider: string; + modelName: string; + model: LanguageModelV2; + providerOptions?: Record; + backupModel: LanguageModelV2 | null; +}; + export function getModel( userAi: UserAIFields, modelType: ModelType = "default", -) { +): SelectModel { const data = selectModelByType(userAi, modelType); logger.info("Using model", { modelType, provider: data.provider, - model: data.model, + model: data.modelName, providerOptions: data.providerOptions, }); @@ -35,9 +43,8 @@ function selectModelByType(userAi: UserAIFields, modelType: ModelType) { switch (modelType) { case "economy": return selectEconomyModel(userAi); - // disabled for now - // case "chat": - // return selectChatModel(userAi); + case "chat": + return selectChatModel(userAi); default: return selectDefaultModel(userAi); } @@ -54,43 +61,41 @@ function selectModel( aiApiKey: string | null; }, providerOptions?: Record, -): { - provider: string; - model: string; - llmModel: LanguageModelV2; - providerOptions?: Record; -} { +): SelectModel { switch (aiProvider) { case Provider.OPEN_AI: { - const model = aiModel || Model.GPT_4O; + const modelName = aiModel || Model.GPT_4O; return { provider: Provider.OPEN_AI, - model, - llmModel: createOpenAI({ apiKey: aiApiKey || env.OPENAI_API_KEY })( - model, + modelName, + model: createOpenAI({ apiKey: aiApiKey || env.OPENAI_API_KEY })( + modelName, ), + backupModel: getBackupModel(aiApiKey), }; } case Provider.GOOGLE: { const mod = aiModel || Model.GEMINI_2_0_FLASH; return { provider: Provider.GOOGLE, - model: mod, - llmModel: createGoogleGenerativeAI({ + modelName: mod, + model: createGoogleGenerativeAI({ apiKey: aiApiKey || env.GOOGLE_API_KEY, })(mod), + backupModel: getBackupModel(aiApiKey), }; } case Provider.GROQ: { - const model = aiModel || Model.GROQ_LLAMA_3_3_70B; + const modelName = aiModel || Model.GROQ_LLAMA_3_3_70B; return { provider: Provider.GROQ, - model, - llmModel: createGroq({ apiKey: aiApiKey || env.GROQ_API_KEY })(model), + modelName, + model: createGroq({ apiKey: aiApiKey || env.GROQ_API_KEY })(modelName), + backupModel: getBackupModel(aiApiKey), }; } case Provider.OPENROUTER: { - const model = aiModel || Model.CLAUDE_4_SONNET_OPENROUTER; + const modelName = aiModel || Model.CLAUDE_4_SONNET_OPENROUTER; const openrouter = createOpenRouter({ apiKey: aiApiKey || env.OPENROUTER_API_KEY, headers: { @@ -98,37 +103,38 @@ function selectModel( "X-Title": "Inbox Zero", }, }); - const chatModel = openrouter.chat(model); + const chatModel = openrouter.chat(modelName); return { provider: Provider.OPENROUTER, - model, - llmModel: chatModel, + modelName, + model: chatModel, providerOptions, + backupModel: getBackupModel(aiApiKey), }; } case Provider.OLLAMA: { throw new Error( "Ollama is not supported. Revert to version v1.7.28 or older to use it.", ); - // const model = aiModel || env.NEXT_PUBLIC_OLLAMA_MODEL; - // if (!model) throw new Error("Ollama model is not set"); + // const modelName = aiModel || env.NEXT_PUBLIC_OLLAMA_MODEL; + // if (!modelName) throw new Error("Ollama model is not set"); // return { // provider: Provider.OLLAMA!, - // model, - // llmModel: createOllama({ baseURL: env.OLLAMA_BASE_URL })(model), + // modelName, + // model: createOllama({ baseURL: env.OLLAMA_BASE_URL })(model), // }; } // this is messy. better to have two providers. one for bedrock and one for anthropic case Provider.ANTHROPIC: { if (env.BEDROCK_ACCESS_KEY && env.BEDROCK_SECRET_KEY && !aiApiKey) { - const model = aiModel || Model.CLAUDE_3_7_SONNET_BEDROCK; + const modelName = aiModel || Model.CLAUDE_3_7_SONNET_BEDROCK; return { provider: Provider.ANTHROPIC, - model, + modelName, // Based on: https://github.com/vercel/ai/issues/4996#issuecomment-2751630936 - llmModel: createAmazonBedrock({ + model: createAmazonBedrock({ // accessKeyId: env.BEDROCK_ACCESS_KEY, // secretAccessKey: env.BEDROCK_SECRET_KEY, // sessionToken: undefined, @@ -138,16 +144,18 @@ function selectModel( secretAccessKey: env.BEDROCK_SECRET_KEY!, sessionToken: undefined, }), - })(model), + })(modelName), + backupModel: getBackupModel(aiApiKey), }; } else { - const model = aiModel || Model.CLAUDE_3_7_SONNET_ANTHROPIC; + const modelName = aiModel || Model.CLAUDE_3_7_SONNET_ANTHROPIC; return { provider: Provider.ANTHROPIC, - model, - llmModel: createAnthropic({ + modelName, + model: createAnthropic({ apiKey: aiApiKey || env.ANTHROPIC_API_KEY, - })(model), + })(modelName), + backupModel: getBackupModel(aiApiKey), }; } } @@ -183,7 +191,7 @@ function createOpenRouterProviderOptions( * - Bulk processing emails * - Any task with large context windows where cost efficiency matters */ -function selectEconomyModel(userAi: UserAIFields) { +function selectEconomyModel(userAi: UserAIFields): SelectModel { if (env.ECONOMY_LLM_PROVIDER && env.ECONOMY_LLM_MODEL) { const apiKey = getProviderApiKey(env.ECONOMY_LLM_PROVIDER); if (!apiKey) { @@ -220,7 +228,7 @@ function selectEconomyModel(userAi: UserAIFields) { /** * Selects the appropriate chat model for fast conversational tasks */ -function selectChatModel(userAi: UserAIFields) { +function selectChatModel(userAi: UserAIFields): SelectModel { if (env.CHAT_LLM_PROVIDER && env.CHAT_LLM_MODEL) { const apiKey = getProviderApiKey(env.CHAT_LLM_PROVIDER); if (!apiKey) { @@ -254,7 +262,7 @@ function selectChatModel(userAi: UserAIFields) { return selectDefaultModel(userAi); } -function selectDefaultModel(userAi: UserAIFields) { +function selectDefaultModel(userAi: UserAIFields): SelectModel { let aiProvider: string; let aiModel: string | null = null; const aiApiKey = userAi.aiApiKey; @@ -276,15 +284,15 @@ function selectDefaultModel(userAi: UserAIFields) { const models = [ // { // provider: Provider.ANTHROPIC, - // model: Model.CLAUDE_3_7_SONNET_BEDROCK, + // modelName: Model.CLAUDE_3_7_SONNET_BEDROCK, // }, // { // provider: Provider.ANTHROPIC, - // model: Model.CLAUDE_4_SONNET_BEDROCK, + // modelName: Model.CLAUDE_4_SONNET_BEDROCK, // }, { provider: Provider.OPENROUTER, - model: null, + modelName: null, }, ]; @@ -292,7 +300,7 @@ function selectDefaultModel(userAi: UserAIFields) { models[Math.floor(Math.random() * models.length)]; aiProvider = selectedProviderAndModel.provider; - aiModel = selectedProviderAndModel.model; + aiModel = selectedProviderAndModel.modelName; if (aiProvider === Provider.OPENROUTER) { function selectRandomModel() { @@ -365,3 +373,13 @@ function getProviderApiKey(provider: string) { return providerApiKeys[provider]; } + +function getBackupModel(userApiKey: string | null): LanguageModelV2 | null { + // disable backup model if user is using their own api key + if (userApiKey) return null; + if (!env.OPENROUTER_BACKUP_MODEL) return null; + + return createOpenRouter({ + apiKey: env.OPENROUTER_API_KEY, + }).chat(env.OPENROUTER_BACKUP_MODEL); +} diff --git a/turbo.json b/turbo.json index 8d776983fa..fa56ac4c37 100644 --- a/turbo.json +++ b/turbo.json @@ -28,6 +28,8 @@ "CHAT_LLM_MODEL", "CHAT_OPENROUTER_PROVIDERS", + "OPENROUTER_BACKUP_MODEL", + "OPENAI_API_KEY", "ANTHROPIC_API_KEY", "BEDROCK_ACCESS_KEY", @@ -120,7 +122,6 @@ "NEXT_PUBLIC_AXIOM_DATASET", "NEXT_PUBLIC_AXIOM_TOKEN", "NEXT_PUBLIC_BEDROCK_SONNET_MODEL", - "NEXT_PUBLIC_BEDROCK_ANTHROPIC_BACKUP_MODEL", "NEXT_PUBLIC_OLLAMA_MODEL", "NEXT_PUBLIC_APP_HOME_PATH", "NEXT_PUBLIC_DUB_REFER_DOMAIN" diff --git a/version.txt b/version.txt index 557fefcbf0..cf2dc0bc44 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v2.1.3 \ No newline at end of file +v2.2.0 \ No newline at end of file