diff --git a/frontend/src/components/LLMSelection/N1nOptions/index.jsx b/frontend/src/components/LLMSelection/N1nOptions/index.jsx
new file mode 100644
index 00000000000..4ac01259d5f
--- /dev/null
+++ b/frontend/src/components/LLMSelection/N1nOptions/index.jsx
@@ -0,0 +1,99 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function N1nOptions({ settings }) {
+ const [inputValue, setInputValue] = useState(settings?.N1nApiKey);
+ const [n1nApiKey, setN1nApiKey] = useState(settings?.N1nApiKey);
+
+ return (
+
+
+
+ setInputValue(e.target.value)}
+ onBlur={() => setN1nApiKey(inputValue)}
+ />
+
+ {!settings?.credentialsOnly && (
+
+ )}
+
+ );
+}
+
+function N1nModelSelection({ apiKey, settings }) {
+ const [models, setModels] = useState([]);
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ if (!apiKey) {
+ setModels([]);
+ setLoading(true);
+ return;
+ }
+
+ setLoading(true);
+ const { models } = await System.customModels(
+ "n1n",
+ typeof apiKey === "boolean" ? null : apiKey
+ );
+ setModels(models || []);
+ setLoading(false);
+ }
+ findCustomModels();
+ }, [apiKey]);
+
+ if (loading) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ );
+}
+
diff --git a/frontend/src/media/llmprovider/n1n.png b/frontend/src/media/llmprovider/n1n.png
new file mode 100644
index 00000000000..c9611219917
Binary files /dev/null and b/frontend/src/media/llmprovider/n1n.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 671f7e867da..88b55d951d0 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -35,6 +35,7 @@ import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
+import N1nLogo from "@/media/llmprovider/n1n.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -67,6 +68,7 @@ import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import FoundryOptions from "@/components/LLMSelection/FoundryOptions";
+import N1nOptions from "@/components/LLMSelection/N1nOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -349,6 +351,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
"GenericOpenAiKey",
],
},
+ {
+ name: "n1n",
+ value: "n1n",
+ logo: N1nLogo,
+ options: (settings) => ,
+ description: "Access 400+ LLMs and multimodal models through n1n API.",
+ requiredConfig: ["N1nApiKey"],
+ },
];
export default function GeneralLLMPreference() {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 7a16985fe11..9df78583f48 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -29,6 +29,7 @@ import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
+import N1nLogo from "@/media/llmprovider/n1n.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@@ -59,6 +60,7 @@ import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import CometApiLLMOptions from "@/components/LLMSelection/CometApiLLMOptions";
+import N1nOptions from "@/components/LLMSelection/N1nOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@@ -281,6 +283,13 @@ const LLMS = [
options: (settings) => ,
description: "500+ AI Models all in one API.",
},
+ {
+ name: "n1n",
+ value: "n1n",
+ logo: N1nLogo,
+ options: (settings) => ,
+ description: "Access 400+ LLMs and multimodal models through n1n API.",
+ },
];
export default function LLMPreference({
diff --git a/server/utils/AiProviders/n1n/index.js b/server/utils/AiProviders/n1n/index.js
new file mode 100644
index 00000000000..d4efbc408ef
--- /dev/null
+++ b/server/utils/AiProviders/n1n/index.js
@@ -0,0 +1,244 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+ LLMPerformanceMonitor,
+} = require("../../helpers/chat/LLMPerformanceMonitor");
+const { v4: uuidv4 } = require("uuid");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
+
+class N1nLLM {
+ constructor(embedder = null, modelPreference = null) {
+ if (!process.env.N1N_API_KEY)
+ throw new Error("No n1n API key was set.");
+
+ this.className = "N1nLLM";
+ const { OpenAI: OpenAIApi } = require("openai");
+ this.basePath = "https://api.n1n.ai/v1";
+ this.openai = new OpenAIApi({
+ baseURL: this.basePath,
+ apiKey: process.env.N1N_API_KEY ?? null,
+ defaultHeaders: {
+ "HTTP-Referer": "https://anythingllm.com",
+ "X-Title": "AnythingLLM",
+ },
+ });
+ this.model = modelPreference || process.env.N1N_MODEL_PREF || "gpt-4o";
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ this.embedder = embedder ?? new NativeEmbedder();
+ this.defaultTemp = 0.7;
+ this.log(`Initialized with model: ${this.model}`);
+ }
+
+ log(text, ...args) {
+ console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ streamingEnabled() {
+ return "streamGetChatCompletion" in this;
+ }
+
+ static promptWindowLimit(_modelName) {
+ return 128000;
+ }
+
+ promptWindowLimit() {
+ return 128000;
+ }
+
+ async isValidChatCompletionModel(modelName = "") {
+ const models = await this.openai.models
+ .list()
+ .catch(() => ({ data: [] }));
+ return models.data.some((model) => model.id === modelName);
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `n1n chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const result = await LLMPerformanceMonitor.measureAsyncFunction(
+ this.openai.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ temperature,
+ })
+ .catch((e) => {
+ throw new Error(e.message);
+ })
+ );
+
+ if (
+ !result?.output?.hasOwnProperty("choices") ||
+ result?.output?.choices?.length === 0
+ )
+ throw new Error(
+ `Invalid response from n1n: ${result.output?.error?.message || "Unknown error"}`
+ );
+
+ return {
+ textResponse: result.output.choices[0].message.content,
+ metrics: {
+ prompt_tokens: result.output.usage?.prompt_tokens || 0,
+ completion_tokens: result.output.usage?.completion_tokens || 0,
+ total_tokens: result.output.usage?.total_tokens || 0,
+ outputTps:
+ (result.output.usage?.completion_tokens || 0) / result.duration,
+ duration: result.duration,
+ },
+ };
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `n1n chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
+ this.openai.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ temperature,
+ }),
+ messages,
+ false
+ );
+
+ return measuredStreamRequest;
+ }
+
+ handleStream(response, stream, responseProps) {
+ const { uuid = uuidv4(), sources = [] } = responseProps;
+ let usage = {
+ completion_tokens: 0,
+ };
+
+ return new Promise(async (resolve) => {
+ let fullText = "";
+
+ const handleAbort = () => {
+ stream?.endMeasurement(usage);
+ clientAbortedHandler(resolve, fullText);
+ };
+ response.on("close", handleAbort);
+
+ try {
+ for await (const chunk of stream) {
+ const message = chunk?.choices?.[0];
+ const token = message?.delta?.content;
+
+ if (
+ chunk.hasOwnProperty("usage") &&
+ !!chunk.usage &&
+ Object.values(chunk.usage).length > 0
+ ) {
+ if (chunk.usage.hasOwnProperty("prompt_tokens")) {
+ usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
+ }
+ if (chunk.usage.hasOwnProperty("completion_tokens")) {
+ usage.completion_tokens = Number(chunk.usage.completion_tokens);
+ }
+ }
+
+ if (token) {
+ fullText += token;
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: token,
+ close: false,
+ error: false,
+ });
+ }
+
+ if (
+ message?.hasOwnProperty("finish_reason") &&
+ message.finish_reason !== "" &&
+ message.finish_reason !== null
+ ) {
+ writeResponseChunk(response, {
+ uuid,
+ sources,
+ type: "textResponseChunk",
+ textResponse: "",
+ close: true,
+ error: false,
+ });
+ response.removeListener("close", handleAbort);
+ stream?.endMeasurement(usage);
+ resolve(fullText);
+ break;
+ }
+ }
+ } catch (e) {
+ console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
+ writeResponseChunk(response, {
+ uuid,
+ type: "abort",
+ textResponse: null,
+ sources: [],
+ close: true,
+ error: e.message,
+ });
+ stream?.endMeasurement(usage);
+ resolve(fullText);
+ }
+ });
+ }
+
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+}
+
+module.exports = {
+ N1nLLM,
+};
+
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 65b5a146dda..6005ad7cfe5 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -974,6 +974,8 @@ ${this.getHistory({ to: route.to })
return new Providers.CometApiProvider({ model: config.model });
case "foundry":
return new Providers.FoundryProvider({ model: config.model });
+ case "n1n":
+ return new Providers.N1nProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index 8cf2e7422b3..0bb46dd9053 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -26,6 +26,7 @@ const DellProAiStudioProvider = require("./dellProAiStudio.js");
const MoonshotAiProvider = require("./moonshotAi.js");
const CometApiProvider = require("./cometapi.js");
const FoundryProvider = require("./foundry.js");
+const N1nProvider = require("./n1n.js");
module.exports = {
OpenAIProvider,
@@ -56,4 +57,5 @@ module.exports = {
DellProAiStudioProvider,
MoonshotAiProvider,
FoundryProvider,
+ N1nProvider,
};
diff --git a/server/utils/agents/aibitat/providers/n1n.js b/server/utils/agents/aibitat/providers/n1n.js
new file mode 100644
index 00000000000..56f2652dd88
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/n1n.js
@@ -0,0 +1,87 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+const { toValidNumber } = require("../../../http/index.js");
+
+class N1nProvider extends InheritMultiple([Provider, UnTooled]) {
+ model;
+
+ constructor(config = {}) {
+ super();
+ const { model = "gpt-4o" } = config;
+ const client = new OpenAI({
+ baseURL: "https://api.n1n.ai/v1",
+ apiKey: process.env.N1N_API_KEY ?? null,
+ maxRetries: 3,
+ });
+
+ this._client = client;
+ this.model = model;
+ this.verbose = true;
+ this.maxTokens = process.env.N1N_MAX_TOKENS
+ ? toValidNumber(process.env.N1N_MAX_TOKENS, 1024)
+ : 1024;
+ }
+
+ get client() {
+ return this._client;
+ }
+
+ get supportsAgentStreaming() {
+ return true;
+ }
+
+ async #handleFunctionCallChat({ messages = [] }) {
+ return await this.client.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ max_tokens: this.maxTokens,
+ })
+ .then((result) => {
+ if (!result.hasOwnProperty("choices"))
+ throw new Error("n1n chat: No results!");
+ if (result.choices.length === 0)
+ throw new Error("n1n chat: No results length!");
+ return result.choices[0].message.content;
+ })
+ .catch((_) => {
+ return null;
+ });
+ }
+
+ async #handleFunctionCallStream({ messages = [] }) {
+ return await this.client.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ });
+ }
+
+ async stream(messages, functions = [], eventHandler = null) {
+ return await UnTooled.prototype.stream.call(
+ this,
+ messages,
+ functions,
+ this.#handleFunctionCallStream.bind(this),
+ eventHandler
+ );
+ }
+
+ async complete(messages, functions = []) {
+ return await UnTooled.prototype.complete.call(
+ this,
+ messages,
+ functions,
+ this.#handleFunctionCallChat.bind(this)
+ );
+ }
+
+ getCost(_usage) {
+ return 0;
+ }
+}
+
+module.exports = N1nProvider;
+
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 819a464c6d0..0a8c3c2b532 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -218,6 +218,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "foundry":
const { FoundryLLM } = require("../AiProviders/foundry");
return new FoundryLLM(embedder, model);
+ case "n1n":
+ const { N1nLLM } = require("../AiProviders/n1n");
+ return new N1nLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@@ -374,6 +377,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "foundry":
const { FoundryLLM } = require("../AiProviders/foundry");
return FoundryLLM;
+ case "n1n":
+ const { N1nLLM } = require("../AiProviders/n1n");
+ return N1nLLM;
default:
return null;
}