diff --git a/codegen/codegen.tsx b/codegen/codegen.tsx index 3744667a..4374abef 100644 --- a/codegen/codegen.tsx +++ b/codegen/codegen.tsx @@ -188,6 +188,10 @@ import { generateManagerClasses } from './generate-manager-classes'; { name: 'textGen', operationId: 'post_ai_text_gen', + }, + { + name: 'getAiAgentDefaultConfig', + operationId: 'get_ai_agent_default', } ] } diff --git a/docs/ai.md b/docs/ai.md index 9b827eb4..18866b08 100644 --- a/docs/ai.md +++ b/docs/ai.md @@ -6,8 +6,10 @@ AI allows to send an intelligence request to supported large language models and -- [Send AI request](#send-ai-request) -- [Send AI text generation request](#send-ai-text-generation-request) +- [AI](#ai) + - [Send AI request](#send-ai-request) + - [Send AI text generation request](#send-ai-text-generation-request) + - [Get AI agent default configuration](#get-ai-agent-default-configuration) @@ -15,7 +17,7 @@ Send AI request ------------------------ To send an AI request to the supported large language models, call the -[`ai.ask(body, options?, callback?)`](http://opensource.box.com/box-node-sdk/jsdoc/AI.html#ask) method with the prompt and items. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. The `mode` specifies if this request is for a single or multiple items. If you select `single_item_qa` the items array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items. +[`ai.ask(body, options?, callback?)`](http://opensource.box.com/box-node-sdk/jsdoc/AIManager.html#ask) method with the prompt and items. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. The `mode` specifies if this request is for a single or multiple items. If you select `single_item_qa` the items array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items. ```js @@ -47,7 +49,7 @@ Send AI text generation request ------------------------ To send an AI text generation request to the supported large language models, call the -[`ai.textGen(body, options?, callback?)`](http://opensource.box.com/box-node-sdk/jsdoc/AI.html#textGen) method with the prompt, items and dialogue history. The `dialogue_history` parameter is history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. +[`ai.textGen(body, options?, callback?)`](http://opensource.box.com/box-node-sdk/jsdoc/AIManager.html#textGen) method with the prompt, items and dialogue history. The `dialogue_history` parameter is history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. ```js @@ -81,3 +83,37 @@ client.ai.textGen( } */ }); ``` + + + Get AI agent default configuration +------------------------ + +To get an AI agent default configuration call the [ai.getAiAgentDefaultConfig(options?, callback?)](http://opensource.box.com/box-node-sdk/jsdoc/AIManager.html#getAiAgentDefaultConfig) method. The `mode` parameter filters the agent configuration to be returned. It can be either `ask` or `text_gen`. The `language` parameter specifies the ISO language code to return the agent config for. If the language is not supported, the default agent configuration is returned. The `model` parameter specifies the model for which the default agent configuration should be returned. + + +```js +client.ai.getAiAgentDefaultConfig({ + mode: 'ask', + language: 'en', + model:'openai__gpt_3_5_turbo' +}).then(response => { + /* response -> { + "type": "ai_agent_ask", + "basic_text": { + "llm_endpoint_params": { + "type": "openai_params", + "frequency_penalty": 1.5, + "presence_penalty": 1.5, + "stop": "<|im_end|>", + "temperature": 0, + "top_p": 1 + }, + "model": "openai__gpt_3_5_turbo", + "num_tokens_for_completion": 8400, + "prompt_template": "It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What should I see?", + "system_message": "You are a helpful travel assistant specialized in budget travel" + }, + ... + } */ +}); +``` diff --git a/src/managers/ai.generated.ts b/src/managers/ai.generated.ts index b84f5489..0fd19d33 100644 --- a/src/managers/ai.generated.ts +++ b/src/managers/ai.generated.ts @@ -18,13 +18,13 @@ class AIManager { * @param {schemas.AiAsk} body * @param {object} [options] Options for the request * @param {Function} [callback] Passed the result if successful, error otherwise - * @returns {Promise} A promise resolving to the result or rejecting with an error + * @returns {Promise} A promise resolving to the result or rejecting with an error */ ask( body: schemas.AiAsk, options?: {}, callback?: Function - ): Promise { + ): Promise { const { ...queryParams } = options, apiPath = urlPath('ai', 'ask'), params = { @@ -63,5 +63,45 @@ class AIManager { callback ); } + /** + * Get AI agent default configuration + * + * Get the AI agent default config + * @param {object} options Options for the request + * @param {"ask" | "text_gen"} options.mode The mode to filter the agent config to return. + * @param {string} [options.language] The ISO language code to return the agent config for. If the language is not supported the default agent config is returned. + * @param {string} [options.model] The model to return the default agent config for. + * @param {Function} [callback] Passed the result if successful, error otherwise + * @returns {Promise} A promise resolving to the result or rejecting with an error + */ + getAiAgentDefaultConfig( + options: { + /** + * The mode to filter the agent config to return. + */ + readonly mode: 'ask' | 'text_gen'; + /** + * The ISO language code to return the agent config for. + * If the language is not supported the default agent config is returned. + */ + readonly language?: string; + /** + * The model to return the default agent config for. + */ + readonly model?: string; + }, + callback?: Function + ): Promise { + const { ...queryParams } = options, + apiPath = urlPath('ai_agent_default'), + params = { + qs: queryParams, + }; + return this.client.wrapWithDefaultHandler(this.client.get)( + apiPath, + params, + callback + ); + } } export = AIManager; diff --git a/src/schemas/ai-agent-ask.generated.ts b/src/schemas/ai-agent-ask.generated.ts new file mode 100644 index 00000000..35bbfa25 --- /dev/null +++ b/src/schemas/ai-agent-ask.generated.ts @@ -0,0 +1,17 @@ +import * as schemas from '.'; +/** + * AI agent for question requests + * + * The AI agent used to handle queries. + */ +export interface AiAgentAsk { + /** + * The type of AI agent used to handle queries. + * Example: ai_agent_ask + */ + type: 'ai_agent_ask'; + long_text?: schemas.AiAgentLongTextTool; + basic_text?: schemas.AiAgentBasicTextToolAsk; + long_text_multi?: schemas.AiAgentLongTextTool; + basic_text_multi?: schemas.AiAgentBasicTextToolAsk; +} diff --git a/src/schemas/ai-agent-basic-gen-tool.generated.ts b/src/schemas/ai-agent-basic-gen-tool.generated.ts new file mode 100644 index 00000000..1f1e9ded --- /dev/null +++ b/src/schemas/ai-agent-basic-gen-tool.generated.ts @@ -0,0 +1,14 @@ +import * as schemas from '.'; +/** + * AI agent basic text generation tool + * + * AI agent basic tool used to generate text. + */ +export interface AiAgentBasicGenTool extends schemas.AiAgentLongTextTool { + /** + * How the content should be included in a request to the LLM. + * When passing this parameter, you must include `{content}`. + * Example: ---{content}--- + */ + content_template?: string; +} diff --git a/src/schemas/ai-agent-basic-text-tool-ask.generated.ts b/src/schemas/ai-agent-basic-text-tool-ask.generated.ts new file mode 100644 index 00000000..c96d2972 --- /dev/null +++ b/src/schemas/ai-agent-basic-text-tool-ask.generated.ts @@ -0,0 +1,34 @@ +import * as schemas from '.'; +/** + * AI agent basic text tool + * + * AI agent tool used to handle basic text. + */ +export interface AiAgentBasicTextToolAsk { + /** + * The model used for the AI Agent for basic text. + * Example: openai__gpt_3_5_turbo + */ + model?: string; + /** + * System messages try to help the LLM "understand" its role and what it is supposed to do. + * Example: You are a helpful travel assistant specialized in budget travel + */ + system_message?: string; + /** + * The prompt template contains contextual information of the request and the user prompt. + * + * When passing `prompt_template` parameters, you **must include** inputs for `{current_date}`, `{user_question}`, and `{content}`. + * Example: It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What should I see? + */ + prompt_template?: string; + /** + * The number of tokens for completion. + * Example: 8400 + */ + num_tokens_for_completion?: number; + /** + * The parameters for the LLM endpoint specific to OpenAI / Google models. + */ + llm_endpoint_params?: schemas.AiLlmEndpointParamsOpenAi | schemas.AiLlmEndpointParamsGoogle; +} diff --git a/src/schemas/ai-agent-basic-text-tool-text-gen.generated.ts b/src/schemas/ai-agent-basic-text-tool-text-gen.generated.ts new file mode 100644 index 00000000..34eeaea6 --- /dev/null +++ b/src/schemas/ai-agent-basic-text-tool-text-gen.generated.ts @@ -0,0 +1,36 @@ +import * as schemas from '.'; +/** + * AI agent basic text tool + * + * AI agent tool used to handle basic text. + */ +export interface AiAgentBasicTextToolTextGen { + /** + * The model to be used for the AI Agent for basic text. + * Example: openai__gpt_3_5_turbo + */ + model?: string; + /** + * System messages try to help the LLM "understand" its role and what it is supposed to do. + * This parameter requires using `{current_date}`. + * Example: You are a helpful travel assistant specialized in budget travel + */ + system_message?: string; + /** + * The prompt template contains contextual information of the request and the user prompt. + * + * When using the `prompt_template` parameter, you **must include** input for `{user_question}`. + * Inputs for `{current_date}` and`{content}` are optional, depending on the use. + * Example: It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What should I see? + */ + prompt_template?: string; + /** + * The number of tokens for completion. + * Example: 8400 + */ + num_tokens_for_completion?: number; + /** + * The parameters for the LLM endpoint specific to OpenAI / Google models. + */ + llm_endpoint_params?: schemas.AiLlmEndpointParamsOpenAi | schemas.AiLlmEndpointParamsGoogle; +} diff --git a/src/schemas/ai-agent-long-text-tool.generated.ts b/src/schemas/ai-agent-long-text-tool.generated.ts new file mode 100644 index 00000000..22ff0cf0 --- /dev/null +++ b/src/schemas/ai-agent-long-text-tool.generated.ts @@ -0,0 +1,10 @@ +import * as schemas from '.'; +/** + * AI agent long text tool + * + * AI agent tool used to to handle longer text. + */ +export interface AiAgentLongTextTool + extends schemas.AiAgentBasicTextToolTextGen { + embeddings?: object; +} diff --git a/src/schemas/ai-agent-text-gen.generated.ts b/src/schemas/ai-agent-text-gen.generated.ts new file mode 100644 index 00000000..3292d901 --- /dev/null +++ b/src/schemas/ai-agent-text-gen.generated.ts @@ -0,0 +1,14 @@ +import * as schemas from '.'; +/** + * AI agent for text generation requests + * + * The AI agent used for generating text. + */ +export interface AiAgentTextGen { + /** + * The type of AI agent used for generating text. + * Example: ai_agent_text_gen + */ + type: 'ai_agent_text_gen'; + basic_gen?: schemas.AiAgentBasicGenTool; +} diff --git a/src/schemas/ai-ask.generated.ts b/src/schemas/ai-ask.generated.ts index e0a1e828..46124e4c 100644 --- a/src/schemas/ai-ask.generated.ts +++ b/src/schemas/ai-ask.generated.ts @@ -1,8 +1,8 @@ import * as schemas from '.'; /** - * AI Ask Request + * AI ask request * - * AI Ask request object + * AI ask request object */ export interface AiAsk { /** @@ -27,4 +27,14 @@ export interface AiAsk { type?: string; content?: string; }[]; + /** + * The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. + */ + dialogue_history?: schemas.AiDialogueHistory[]; + /** + * A flag to indicate whether citations should be returned. + * Example: true + */ + include_citations?: boolean; + ai_agent?: schemas.AiAgentAsk; } diff --git a/src/schemas/ai-citation.generated.ts b/src/schemas/ai-citation.generated.ts new file mode 100644 index 00000000..c1ba2450 --- /dev/null +++ b/src/schemas/ai-citation.generated.ts @@ -0,0 +1,28 @@ +import * as schemas from '.'; +/** + * The citation of the LLM's answer reference + * + * The citation of the LLM's answer reference. + */ +export interface AiCitation { + /** + * The specific content from where the answer was referenced. + * Example: Public APIs are key drivers of innovation and growth. + */ + content?: string; + /** + * The id of the item. + * Example: 123 + */ + id?: string; + /** + * The type of the item. + * Example: file + */ + type?: 'file'; + /** + * The name of the item. + * Example: The importance of public APIs.pdf + */ + name?: string; +} diff --git a/src/schemas/ai-dialogue-history.generated.ts b/src/schemas/ai-dialogue-history.generated.ts new file mode 100644 index 00000000..893d22e3 --- /dev/null +++ b/src/schemas/ai-dialogue-history.generated.ts @@ -0,0 +1,23 @@ +import * as schemas from '.'; +/** + * Dialogue history + * + * A context object that can hold prior prompts and answers. + */ +export interface AiDialogueHistory { + /** + * The prompt previously provided by the client and answered by the LLM. + * Example: Make my email about public APIs sound more professional. + */ + prompt?: string; + /** + * The answer previously provided by the LLM. + * Example: Here is the first draft of your professional email about public APIs. + */ + answer?: string; + /** + * The ISO date formatted timestamp of when the previous answer to the prompt was created. + * Example: 2012-12-12T10:53:43-08:00 + */ + created_at?: string; +} diff --git a/src/schemas/ai-llm-endpoint-params-google.generated.ts b/src/schemas/ai-llm-endpoint-params-google.generated.ts new file mode 100644 index 00000000..c0ee95aa --- /dev/null +++ b/src/schemas/ai-llm-endpoint-params-google.generated.ts @@ -0,0 +1,32 @@ +import * as schemas from '.'; +/** + * AI LLM endpoint params Google + * + * AI LLM endpoint params Google object + */ +export interface AiLlmEndpointParamsGoogle { + /** + * The type of the AI LLM endpoint params object for Google. + * This parameter is **required**. + * Example: google_params + */ + type: 'google_params'; + /** + * The temperature is used for sampling during response generation, which occurs when `top-P` and `top-K` are applied. + * Temperature controls the degree of randomness in token selection. + */ + temperature?: number; + /** + * `Top-P` changes how the model selects tokens for output. Tokens are selected from the most (see `top-K`) to least probable + * until the sum of their probabilities equals the `top-P` value. + * Example: 1 + */ + top_p?: number; + /** + * `Top-K` changes how the model selects tokens for output. A `top-K` of 1 means the next selected token is + * the most probable among all tokens in the model's vocabulary (also called greedy decoding), + * while a `top-K` of 3 means that the next token is selected from among the three most probable tokens by using temperature. + * Example: 1 + */ + top_k?: number; +} diff --git a/src/schemas/ai-llm-endpoint-params-open-ai.generated.ts b/src/schemas/ai-llm-endpoint-params-open-ai.generated.ts new file mode 100644 index 00000000..010c2989 --- /dev/null +++ b/src/schemas/ai-llm-endpoint-params-open-ai.generated.ts @@ -0,0 +1,44 @@ +import * as schemas from '.'; +/** + * AI LLM endpoint params OpenAI + * + * AI LLM endpoint params OpenAI object. + */ +export interface AiLlmEndpointParamsOpenAi { + /** + * The type of the AI LLM endpoint params object for OpenAI. + * This parameter is **required**. + * Example: openai_params + */ + type: 'openai_params'; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results + * of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability + * mass are considered. We generally recommend altering this or temperature but not both. + * Example: 1 + */ + top_p?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the + * text so far, decreasing the model's likelihood to repeat the same line verbatim. + * Example: 1.5 + */ + frequency_penalty?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, + * increasing the model's likelihood to talk about new topics. + * Example: 1.5 + */ + presence_penalty?: number; + /** + * Up to 4 sequences where the API will stop generating further tokens. + * Example: <|im_end|> + */ + stop?: string; +} diff --git a/src/schemas/ai-response-full.generated.ts b/src/schemas/ai-response-full.generated.ts new file mode 100644 index 00000000..8a223518 --- /dev/null +++ b/src/schemas/ai-response-full.generated.ts @@ -0,0 +1,12 @@ +import * as schemas from '.'; +/** + * AI response (Full) + * + * AI ask response + */ +export interface AiResponseFull extends schemas.AiResponse { + /** + * The citations of the LLM's answer reference. + */ + citations?: schemas.AiCitation[]; +} diff --git a/src/schemas/ai-response.generated.ts b/src/schemas/ai-response.generated.ts index 7b0b9e0e..56b8c41d 100644 --- a/src/schemas/ai-response.generated.ts +++ b/src/schemas/ai-response.generated.ts @@ -1,6 +1,6 @@ import * as schemas from '.'; /** - * AI Response + * AI response * * AI response */ diff --git a/src/schemas/ai-text-gen.generated.ts b/src/schemas/ai-text-gen.generated.ts index 829083ef..51f1adcf 100644 --- a/src/schemas/ai-text-gen.generated.ts +++ b/src/schemas/ai-text-gen.generated.ts @@ -1,8 +1,8 @@ import * as schemas from '.'; /** - * AI Text Gen Request + * AI text gen request * - * AI Text Gen Request object + * AI text gen request object */ export interface AiTextGen { /** @@ -25,9 +25,6 @@ export interface AiTextGen { /** * The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. */ - dialogue_history?: { - answer?: string; - created_at?: string; - prompt?: string; - }[]; + dialogue_history?: schemas.AiDialogueHistory[]; + ai_agent?: schemas.AiAgentTextGen; } diff --git a/src/schemas/index.generated.ts b/src/schemas/index.generated.ts index 5f309d7c..0f82d9dc 100644 --- a/src/schemas/index.generated.ts +++ b/src/schemas/index.generated.ts @@ -1,4 +1,15 @@ +export * from './ai-agent-ask.generated'; +export * from './ai-agent-basic-gen-tool.generated'; +export * from './ai-agent-basic-text-tool-ask.generated'; +export * from './ai-agent-basic-text-tool-text-gen.generated'; +export * from './ai-agent-long-text-tool.generated'; +export * from './ai-agent-text-gen.generated'; export * from './ai-ask.generated'; +export * from './ai-citation.generated'; +export * from './ai-dialogue-history.generated'; +export * from './ai-llm-endpoint-params-open-ai.generated'; +export * from './ai-llm-endpoint-params-google.generated'; +export * from './ai-response-full.generated'; export * from './ai-response.generated'; export * from './ai-text-gen.generated'; export * from './enterprise-base.generated'; diff --git a/tests/integration_test/__tests__/ai.test.js b/tests/integration_test/__tests__/ai.test.js index 56e32841..9c2f9dd1 100644 --- a/tests/integration_test/__tests__/ai.test.js +++ b/tests/integration_test/__tests__/ai.test.js @@ -47,6 +47,12 @@ test('test AI send ask', async() => { content: 'The sun rises in the east', }, ], + ai_agent: { + type: 'ai_agent_ask', + basic_text_multi: { + model: 'openai__gpt_3_5_turbo' + } + } }); expect(response).toBeDefined(); @@ -83,8 +89,25 @@ test('test AI text gen', async() => { }, ], dialogue_history: dialogueHistory, + ai_agent: { + type: 'ai_agent_text_gen', + basic_gen: { + model: 'openai__gpt_3_5_turbo_16k' + } + } }); expect(response).toBeDefined(); expect(response.answer.toLowerCase().indexOf('api')).toBeGreaterThan(-1); }); + + +test('test AI get default agent', async() => { + const agent = await context.client.ai.getAiAgentDefaultConfig({ + mode: 'text_gen', + language: 'en', + model: 'openai__gpt_3_5_turbo' + }); + expect(agent.type).toBe('ai_agent_text_gen'); + expect(agent.basic_gen.model).toBe('openai__gpt_3_5_turbo'); +}); diff --git a/tests/lib/managers/ai-test.js b/tests/lib/managers/ai-test.js index 43fc127e..8583b041 100644 --- a/tests/lib/managers/ai-test.js +++ b/tests/lib/managers/ai-test.js @@ -65,7 +65,7 @@ describe('AI', function() { mode, prompt, }, - qs: { } + qs: {}, }; sandbox.stub(boxClientFake, 'wrapWithDefaultHandler').returnsArg(0); sandbox @@ -73,13 +73,15 @@ describe('AI', function() { .expects('post') .withArgs('/ai/ask', expectedParams) .returns(Promise.resolve(answer)); - return aimodule.ask({ - items: [{ type: itemType, id: itemId }], - mode, - prompt, - }).then(data => { - assert.equal(data, answer); - }); + return aimodule + .ask({ + items: [{ type: itemType, id: itemId }], + mode, + prompt, + }) + .then(data => { + assert.equal(data, answer); + }); }); }); @@ -118,7 +120,7 @@ describe('AI', function() { dialogue_history: dialogueHistory, prompt, }, - qs: { } + qs: {}, }; sandbox.stub(boxClientFake, 'wrapWithDefaultHandler').returnsArg(0); sandbox @@ -126,12 +128,120 @@ describe('AI', function() { .expects('post') .withArgs('/ai/text_gen', expectedParams) .returns(Promise.resolve(answer)); - return aimodule.textGen({ - items: [{ type: itemType, id: itemId }], - dialogue_history: dialogueHistory, - prompt, + return aimodule + .textGen({ + items: [{ type: itemType, id: itemId }], + dialogue_history: dialogueHistory, + prompt, + }) + .then(data => { + assert.equal(data, answer); + }); + }); + }); + + describe('getAiAgentDefaultConfig()', function() { + const agent = { + type: 'ai_agent_ask', + basic_text: { + llm_endpoint_params: { + type: 'openai_params', + frequency_penalty: 1.5, + presence_penalty: 1.5, + stop: '<|im_end|>', + temperature: 0, + top_p: 1, + }, + model: 'openai__gpt_3_5_turbo', + num_tokens_for_completion: 8400, + prompt_template: + 'It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What should I see?', + system_message: + 'You are a helpful travel assistant specialized in budget travel', + }, + basic_text_multi: { + llm_endpoint_params: { + type: 'openai_params', + frequency_penalty: 1.5, + presence_penalty: 1.5, + stop: '<|im_end|>', + temperature: 0, + top_p: 1, + }, + model: 'openai__gpt_3_5_turbo', + num_tokens_for_completion: 8400, + prompt_template: + 'It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What should I see?', + system_message: + 'You are a helpful travel assistant specialized in budget travel', + }, + long_text: { + embeddings: { + model: 'openai__text_embedding_ada_002', + strategy: { + id: 'basic', + num_tokens_per_chunk: 64, + }, + }, + llm_endpoint_params: { + type: 'openai_params', + frequency_penalty: 1.5, + presence_penalty: 1.5, + stop: '<|im_end|>', + temperature: 0, + top_p: 1, + }, + model: 'openai__gpt_3_5_turbo', + num_tokens_for_completion: 8400, + prompt_template: + 'It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What should I see?', + system_message: + 'You are a helpful travel assistant specialized in budget travel', + }, + long_text_multi: { + embeddings: { + model: 'openai__text_embedding_ada_002', + strategy: { + id: 'basic', + num_tokens_per_chunk: 64, + }, + }, + llm_endpoint_params: { + type: 'openai_params', + frequency_penalty: 1.5, + presence_penalty: 1.5, + stop: '<|im_end|>', + temperature: 0, + top_p: 1, + }, + model: 'openai__gpt_3_5_turbo', + num_tokens_for_completion: 8400, + prompt_template: + 'It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What should I see?', + system_message: + 'You are a helpful travel assistant specialized in budget travel', + }, + }; + it('should make GET request to get default AI agent', function() { + const expected_params = { + qs: { + mode: 'ask', + language: 'en', + model: 'openai__gpt_3_5_turbo', + }, + }; + sandbox.stub(boxClientFake, 'wrapWithDefaultHandler').returnsArg(0); + sandbox + .mock(boxClientFake) + .expects('get') + .withArgs('/ai_agent_default', expected_params) + .returns(Promise.resolve(agent)); + return aimodule.getAiAgentDefaultConfig({ + mode: 'ask', + language: 'en', + model: 'openai__gpt_3_5_turbo', }).then(data => { - assert.equal(data, answer); + assert.equal(data, agent); }); }); });