From ebfa2e9ab8ecbe4bc9adaddd3e4a60e3ba84d0d9 Mon Sep 17 00:00:00 2001 From: Amy Chisholm Date: Wed, 4 Sep 2024 13:44:03 -0700 Subject: [PATCH] test: Converted `llm-events` tests to use `node:test` (#2535) --- .../aws-bedrock/bedrock-command.test.js | 318 ++++++++-------- .../aws-bedrock/bedrock-response.test.js | 247 ++++++------- .../chat-completion-message.test.js | 133 ++++--- .../chat-completion-summary.test.js | 126 +++---- .../llm-events/aws-bedrock/embedding.test.js | 49 ++- .../unit/llm-events/aws-bedrock/error.test.js | 59 ++- .../unit/llm-events/aws-bedrock/event.test.js | 57 +-- .../aws-bedrock/stream-handler.test.js | 182 ++++----- test/unit/llm-events/error.test.js | 16 +- test/unit/llm-events/feedback-message.test.js | 8 +- .../langchain/chat-completion-message.test.js | 68 ++-- .../langchain/chat-completion-summary.test.js | 50 +-- test/unit/llm-events/langchain/event.test.js | 100 ++--- test/unit/llm-events/langchain/tool.test.js | 68 ++-- .../langchain/vector-search-result.test.js | 60 +-- .../langchain/vector-search.test.js | 56 +-- .../openai/chat-completion-message.test.js | 346 +++++++++--------- .../openai/chat-completion-summary.test.js | 112 +++--- test/unit/llm-events/openai/embedding.test.js | 259 ++++++------- 19 files changed, 1168 insertions(+), 1146 deletions(-) diff --git a/test/unit/llm-events/aws-bedrock/bedrock-command.test.js b/test/unit/llm-events/aws-bedrock/bedrock-command.test.js index e19da844aa..639d11ef54 100644 --- a/test/unit/llm-events/aws-bedrock/bedrock-command.test.js +++ b/test/unit/llm-events/aws-bedrock/bedrock-command.test.js @@ -5,7 +5,8 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const structuredClone = require('./clone') const BedrockCommand = require('../../../../lib/llm-events/aws-bedrock/bedrock-command') @@ -73,19 +74,20 @@ const titanEmbed = { } } -tap.beforeEach((t) => { - t.context.input = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.input = { body: JSON.stringify('{"foo":"foo"}') } - t.context.updatePayload = (payload) => { - t.context.input.modelId = payload.modelId - t.context.input.body = JSON.stringify(payload.body) + ctx.nr.updatePayload = (payload) => { + ctx.nr.input.modelId = payload.modelId + ctx.nr.input.body = JSON.stringify(payload.body) } }) -tap.test('non-conforming command is handled gracefully', async (t) => { - const cmd = new BedrockCommand(t.context.input) +test('non-conforming command is handled gracefully', async (t) => { + const cmd = new BedrockCommand(t.nr.input) for (const model of [ 'Ai21', 'Claude', @@ -96,210 +98,210 @@ tap.test('non-conforming command is handled gracefully', async (t) => { 'Titan', 'TitanEmbed' ]) { - t.equal(cmd[`is${model}`](), false) + assert.equal(cmd[`is${model}`](), false) } - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, '') - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, undefined) - t.equal(cmd.temperature, undefined) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, '') + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, undefined) + assert.equal(cmd.temperature, undefined) }) -tap.test('ai21 minimal command works', async (t) => { - t.context.updatePayload(structuredClone(ai21)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isAi21(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, ai21.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, ai21.body.prompt) - t.equal(cmd.temperature, undefined) +test('ai21 minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(ai21)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isAi21(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, ai21.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, ai21.body.prompt) + assert.equal(cmd.temperature, undefined) }) -tap.test('ai21 complete command works', async (t) => { +test('ai21 complete command works', async (t) => { const payload = structuredClone(ai21) payload.body.maxTokens = 25 payload.body.temperature = 0.5 - t.context.updatePayload(payload) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isAi21(), true) - t.equal(cmd.maxTokens, 25) - t.equal(cmd.modelId, payload.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, payload.body.prompt) - t.equal(cmd.temperature, payload.body.temperature) + t.nr.updatePayload(payload) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isAi21(), true) + assert.equal(cmd.maxTokens, 25) + assert.equal(cmd.modelId, payload.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, payload.body.prompt) + assert.equal(cmd.temperature, payload.body.temperature) }) -tap.test('claude minimal command works', async (t) => { - t.context.updatePayload(structuredClone(claude)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isClaude(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, claude.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, claude.body.prompt) - t.equal(cmd.temperature, undefined) +test('claude minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(claude)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isClaude(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, claude.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, claude.body.prompt) + assert.equal(cmd.temperature, undefined) }) -tap.test('claude complete command works', async (t) => { +test('claude complete command works', async (t) => { const payload = structuredClone(claude) payload.body.max_tokens_to_sample = 25 payload.body.temperature = 0.5 - t.context.updatePayload(payload) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isClaude(), true) - t.equal(cmd.maxTokens, 25) - t.equal(cmd.modelId, payload.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, payload.body.prompt) - t.equal(cmd.temperature, payload.body.temperature) + t.nr.updatePayload(payload) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isClaude(), true) + assert.equal(cmd.maxTokens, 25) + assert.equal(cmd.modelId, payload.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, payload.body.prompt) + assert.equal(cmd.temperature, payload.body.temperature) }) -tap.test('claude3 minimal command works', async (t) => { - t.context.updatePayload(structuredClone(claude3)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isClaude3(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, claude3.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, claude3.body.messages[0].content) - t.equal(cmd.temperature, undefined) +test('claude3 minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(claude3)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isClaude3(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, claude3.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, claude3.body.messages[0].content) + assert.equal(cmd.temperature, undefined) }) -tap.test('claude3 complete command works', async (t) => { +test('claude3 complete command works', async (t) => { const payload = structuredClone(claude3) payload.body.max_tokens = 25 payload.body.temperature = 0.5 - t.context.updatePayload(payload) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isClaude3(), true) - t.equal(cmd.maxTokens, 25) - t.equal(cmd.modelId, payload.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, payload.body.messages[0].content) - t.equal(cmd.temperature, payload.body.temperature) + t.nr.updatePayload(payload) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isClaude3(), true) + assert.equal(cmd.maxTokens, 25) + assert.equal(cmd.modelId, payload.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, payload.body.messages[0].content) + assert.equal(cmd.temperature, payload.body.temperature) }) -tap.test('cohere minimal command works', async (t) => { - t.context.updatePayload(structuredClone(cohere)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isCohere(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, cohere.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, cohere.body.prompt) - t.equal(cmd.temperature, undefined) +test('cohere minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(cohere)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isCohere(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, cohere.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, cohere.body.prompt) + assert.equal(cmd.temperature, undefined) }) -tap.test('cohere complete command works', async (t) => { +test('cohere complete command works', async (t) => { const payload = structuredClone(cohere) payload.body.max_tokens = 25 payload.body.temperature = 0.5 - t.context.updatePayload(payload) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isCohere(), true) - t.equal(cmd.maxTokens, 25) - t.equal(cmd.modelId, payload.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, payload.body.prompt) - t.equal(cmd.temperature, payload.body.temperature) + t.nr.updatePayload(payload) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isCohere(), true) + assert.equal(cmd.maxTokens, 25) + assert.equal(cmd.modelId, payload.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, payload.body.prompt) + assert.equal(cmd.temperature, payload.body.temperature) }) -tap.test('cohere embed minimal command works', async (t) => { - t.context.updatePayload(structuredClone(cohereEmbed)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isCohereEmbed(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, cohereEmbed.modelId) - t.equal(cmd.modelType, 'embedding') - t.same(cmd.prompt, cohereEmbed.body.texts.join(' ')) - t.equal(cmd.temperature, undefined) +test('cohere embed minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(cohereEmbed)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isCohereEmbed(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, cohereEmbed.modelId) + assert.equal(cmd.modelType, 'embedding') + assert.deepStrictEqual(cmd.prompt, cohereEmbed.body.texts.join(' ')) + assert.equal(cmd.temperature, undefined) }) -tap.test('llama2 minimal command works', async (t) => { - t.context.updatePayload(structuredClone(llama2)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isLlama(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, llama2.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, llama2.body.prompt) - t.equal(cmd.temperature, undefined) +test('llama2 minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(llama2)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isLlama(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, llama2.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, llama2.body.prompt) + assert.equal(cmd.temperature, undefined) }) -tap.test('llama2 complete command works', async (t) => { +test('llama2 complete command works', async (t) => { const payload = structuredClone(llama2) payload.body.max_gen_length = 25 payload.body.temperature = 0.5 - t.context.updatePayload(payload) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isLlama(), true) - t.equal(cmd.maxTokens, 25) - t.equal(cmd.modelId, payload.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, payload.body.prompt) - t.equal(cmd.temperature, payload.body.temperature) + t.nr.updatePayload(payload) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isLlama(), true) + assert.equal(cmd.maxTokens, 25) + assert.equal(cmd.modelId, payload.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, payload.body.prompt) + assert.equal(cmd.temperature, payload.body.temperature) }) -tap.test('llama3 minimal command works', async (t) => { - t.context.updatePayload(structuredClone(llama3)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isLlama(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, llama3.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, llama3.body.prompt) - t.equal(cmd.temperature, undefined) +test('llama3 minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(llama3)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isLlama(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, llama3.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, llama3.body.prompt) + assert.equal(cmd.temperature, undefined) }) -tap.test('llama3 complete command works', async (t) => { +test('llama3 complete command works', async (t) => { const payload = structuredClone(llama3) payload.body.max_gen_length = 25 payload.body.temperature = 0.5 - t.context.updatePayload(payload) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isLlama(), true) - t.equal(cmd.maxTokens, 25) - t.equal(cmd.modelId, payload.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, payload.body.prompt) - t.equal(cmd.temperature, payload.body.temperature) + t.nr.updatePayload(payload) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isLlama(), true) + assert.equal(cmd.maxTokens, 25) + assert.equal(cmd.modelId, payload.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, payload.body.prompt) + assert.equal(cmd.temperature, payload.body.temperature) }) -tap.test('titan minimal command works', async (t) => { - t.context.updatePayload(structuredClone(titan)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isTitan(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, titan.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, titan.body.inputText) - t.equal(cmd.temperature, undefined) +test('titan minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(titan)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isTitan(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, titan.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, titan.body.inputText) + assert.equal(cmd.temperature, undefined) }) -tap.test('titan complete command works', async (t) => { +test('titan complete command works', async (t) => { const payload = structuredClone(titan) payload.body.textGenerationConfig = { maxTokenCount: 25, temperature: 0.5 } - t.context.updatePayload(payload) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isTitan(), true) - t.equal(cmd.maxTokens, 25) - t.equal(cmd.modelId, payload.modelId) - t.equal(cmd.modelType, 'completion') - t.equal(cmd.prompt, payload.body.inputText) - t.equal(cmd.temperature, payload.body.textGenerationConfig.temperature) + t.nr.updatePayload(payload) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isTitan(), true) + assert.equal(cmd.maxTokens, 25) + assert.equal(cmd.modelId, payload.modelId) + assert.equal(cmd.modelType, 'completion') + assert.equal(cmd.prompt, payload.body.inputText) + assert.equal(cmd.temperature, payload.body.textGenerationConfig.temperature) }) -tap.test('titan embed minimal command works', async (t) => { - t.context.updatePayload(structuredClone(titanEmbed)) - const cmd = new BedrockCommand(t.context.input) - t.equal(cmd.isTitanEmbed(), true) - t.equal(cmd.maxTokens, undefined) - t.equal(cmd.modelId, titanEmbed.modelId) - t.equal(cmd.modelType, 'embedding') - t.equal(cmd.prompt, titanEmbed.body.inputText) - t.equal(cmd.temperature, undefined) +test('titan embed minimal command works', async (t) => { + t.nr.updatePayload(structuredClone(titanEmbed)) + const cmd = new BedrockCommand(t.nr.input) + assert.equal(cmd.isTitanEmbed(), true) + assert.equal(cmd.maxTokens, undefined) + assert.equal(cmd.modelId, titanEmbed.modelId) + assert.equal(cmd.modelType, 'embedding') + assert.equal(cmd.prompt, titanEmbed.body.inputText) + assert.equal(cmd.temperature, undefined) }) diff --git a/test/unit/llm-events/aws-bedrock/bedrock-response.test.js b/test/unit/llm-events/aws-bedrock/bedrock-response.test.js index e2a6cdb976..b4047c7324 100644 --- a/test/unit/llm-events/aws-bedrock/bedrock-response.test.js +++ b/test/unit/llm-events/aws-bedrock/bedrock-response.test.js @@ -5,7 +5,8 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const structuredClone = require('./clone') const BedrockResponse = require('../../../../lib/llm-events/aws-bedrock/bedrock-response') @@ -52,8 +53,9 @@ const titan = { ] } -tap.beforeEach((t) => { - t.context.response = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.response = { response: { statusCode: 200, headers: { @@ -66,7 +68,7 @@ tap.beforeEach((t) => { } } - t.context.bedrockCommand = { + ctx.nr.bedrockCommand = { isAi21() { return false }, @@ -87,148 +89,147 @@ tap.beforeEach((t) => { } } - t.context.updatePayload = (payload) => { - t.context.response.output.body = new TextEncoder().encode(JSON.stringify(payload)) + ctx.nr.updatePayload = (payload) => { + ctx.nr.response.output.body = new TextEncoder().encode(JSON.stringify(payload)) } }) -tap.test('non-conforming response is handled gracefully', async (t) => { - delete t.context.response.response.headers - const res = new BedrockResponse(t.context) - t.same(res.completions, []) - t.equal(res.finishReason, undefined) - t.same(res.headers, undefined) - t.equal(res.id, undefined) - t.equal(res.requestId, undefined) - t.equal(res.statusCode, 200) +test('non-conforming response is handled gracefully', async (t) => { + delete t.nr.response.response.headers + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, []) + assert.equal(res.finishReason, undefined) + assert.deepStrictEqual(res.headers, undefined) + assert.equal(res.id, undefined) + assert.equal(res.requestId, undefined) + assert.equal(res.statusCode, 200) }) -tap.test('ai21 malformed responses work', async (t) => { - t.context.bedrockCommand.isAi21 = () => true - const res = new BedrockResponse(t.context) - t.same(res.completions, []) - t.equal(res.finishReason, undefined) - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, undefined) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('ai21 malformed responses work', async (t) => { + t.nr.bedrockCommand.isAi21 = () => true + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, []) + assert.equal(res.finishReason, undefined) + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, undefined) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('ai21 complete responses work', async (t) => { - t.context.bedrockCommand.isAi21 = () => true - t.context.updatePayload(structuredClone(ai21)) - const res = new BedrockResponse(t.context) - t.same(res.completions, ['ai21-response']) - t.equal(res.finishReason, 'done') - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, 'ai21-response-1') - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('ai21 complete responses work', async (t) => { + t.nr.bedrockCommand.isAi21 = () => true + t.nr.updatePayload(structuredClone(ai21)) + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, ['ai21-response']) + assert.equal(res.finishReason, 'done') + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, 'ai21-response-1') + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('claude malformed responses work', async (t) => { - t.context.bedrockCommand.isClaude = () => true - const res = new BedrockResponse(t.context) - t.same(res.completions, []) - t.equal(res.finishReason, undefined) - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, undefined) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('claude malformed responses work', async (t) => { + t.nr.bedrockCommand.isClaude = () => true + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, []) + assert.equal(res.finishReason, undefined) + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, undefined) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('claude complete responses work', async (t) => { - t.context.bedrockCommand.isClaude = () => true - t.context.updatePayload(structuredClone(claude)) - const res = new BedrockResponse(t.context) - t.same(res.completions, ['claude-response']) - t.equal(res.finishReason, 'done') - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, undefined) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('claude complete responses work', async (t) => { + t.nr.bedrockCommand.isClaude = () => true + t.nr.updatePayload(structuredClone(claude)) + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, ['claude-response']) + assert.equal(res.finishReason, 'done') + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, undefined) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('cohere malformed responses work', async (t) => { - t.context.bedrockCommand.isCohere = () => true - const res = new BedrockResponse(t.context) - t.same(res.completions, []) - t.equal(res.finishReason, undefined) - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, undefined) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('cohere malformed responses work', async (t) => { + t.nr.bedrockCommand.isCohere = () => true + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, []) + assert.equal(res.finishReason, undefined) + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, undefined) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('cohere complete responses work', async (t) => { - t.context.bedrockCommand.isCohere = () => true - t.context.updatePayload(structuredClone(cohere)) - const res = new BedrockResponse(t.context) - t.same(res.completions, ['cohere-response']) - t.equal(res.finishReason, 'done') - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, 'cohere-response-1') - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('cohere complete responses work', async (t) => { + t.nr.bedrockCommand.isCohere = () => true + t.nr.updatePayload(structuredClone(cohere)) + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, ['cohere-response']) + assert.equal(res.finishReason, 'done') + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, 'cohere-response-1') + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('llama malformed responses work', async (t) => { - t.context.bedrockCommand.isLlama = () => true - const res = new BedrockResponse(t.context) - t.same(res.completions, []) - t.equal(res.finishReason, undefined) - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, undefined) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('llama malformed responses work', async (t) => { + t.nr.bedrockCommand.isLlama = () => true + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, []) + assert.equal(res.finishReason, undefined) + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, undefined) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('llama complete responses work', async (t) => { - t.context.bedrockCommand.isLlama = () => true - t.context.updatePayload(structuredClone(llama)) - const res = new BedrockResponse(t.context) - t.same(res.completions, ['llama-response']) - t.equal(res.finishReason, 'done') - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, undefined) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('llama complete responses work', async (t) => { + t.nr.bedrockCommand.isLlama = () => true + t.nr.updatePayload(structuredClone(llama)) + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, ['llama-response']) + assert.equal(res.finishReason, 'done') + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, undefined) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('titan malformed responses work', async (t) => { - t.context.bedrockCommand.isTitan = () => true - const res = new BedrockResponse(t.context) - t.same(res.completions, []) - t.equal(res.finishReason, undefined) - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, undefined) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('titan malformed responses work', async (t) => { + t.nr.bedrockCommand.isTitan = () => true + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, []) + assert.equal(res.finishReason, undefined) + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, undefined) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('titan complete responses work', async (t) => { - t.context.bedrockCommand.isTitan = () => true - t.context.updatePayload(structuredClone(titan)) - const res = new BedrockResponse(t.context) - t.same(res.completions, ['titan-response']) - t.equal(res.finishReason, 'done') - t.same(res.headers, t.context.response.response.headers) - t.equal(res.id, undefined) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) +test('titan complete responses work', async (t) => { + t.nr.bedrockCommand.isTitan = () => true + t.nr.updatePayload(structuredClone(titan)) + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, ['titan-response']) + assert.equal(res.finishReason, 'done') + assert.deepStrictEqual(res.headers, t.nr.response.response.headers) + assert.equal(res.id, undefined) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) -tap.test('should only set data from raw response on error', (t) => { - t.context.response.$response = { ...t.context.response.response } - delete t.context.response.response - delete t.context.response.output - t.context.isError = true - const res = new BedrockResponse(t.context) - t.same(res.completions, []) - t.equal(res.id, undefined) - t.equal(res.finishReason, undefined) - t.same(res.headers, t.context.response.$response.headers) - t.equal(res.requestId, 'aws-request-1') - t.equal(res.statusCode, 200) - t.end() +test('should only set data from raw response on error', (t) => { + t.nr.response.$response = { ...t.nr.response.response } + delete t.nr.response.response + delete t.nr.response.output + t.nr.isError = true + const res = new BedrockResponse(t.nr) + assert.deepStrictEqual(res.completions, []) + assert.equal(res.id, undefined) + assert.equal(res.finishReason, undefined) + assert.deepStrictEqual(res.headers, t.nr.response.$response.headers) + assert.equal(res.requestId, 'aws-request-1') + assert.equal(res.statusCode, 200) }) diff --git a/test/unit/llm-events/aws-bedrock/chat-completion-message.test.js b/test/unit/llm-events/aws-bedrock/chat-completion-message.test.js index 218daf0244..4e484e84e6 100644 --- a/test/unit/llm-events/aws-bedrock/chat-completion-message.test.js +++ b/test/unit/llm-events/aws-bedrock/chat-completion-message.test.js @@ -5,14 +5,16 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const { DESTINATIONS: { TRANS_SCOPE } } = require('../../../../lib/config/attribute-filter') const LlmChatCompletionMessage = require('../../../../lib/llm-events/aws-bedrock/chat-completion-message') -tap.beforeEach((t) => { - t.context.agent = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.agent = { llm: {}, config: { applications() { @@ -31,7 +33,7 @@ tap.beforeEach((t) => { trace: { custom: { get(key) { - t.equal(key, TRANS_SCOPE) + assert.equal(key, TRANS_SCOPE) return { ['llm.conversation_id']: 'conversation-1' } @@ -43,11 +45,11 @@ tap.beforeEach((t) => { } } - t.context.completionId = 'completion-1' + ctx.nr.completionId = 'completion-1' - t.context.content = 'a prompt' + ctx.nr.content = 'a prompt' - t.context.segment = { + ctx.nr.segment = { id: 'segment-1', transaction: { id: 'tx-1', @@ -55,7 +57,7 @@ tap.beforeEach((t) => { } } - t.context.bedrockResponse = { + ctx.nr.bedrockResponse = { headers: { 'x-amzn-requestid': 'request-1' }, @@ -67,7 +69,7 @@ tap.beforeEach((t) => { } } - t.context.bedrockCommand = { + ctx.nr.bedrockCommand = { id: 'cmd-1', prompt: 'who are you', isAi21() { @@ -88,69 +90,66 @@ tap.beforeEach((t) => { } }) -tap.test('create creates a non-response instance', async (t) => { - t.context.agent.llm.tokenCountCallback = () => 3 - const event = new LlmChatCompletionMessage(t.context) - t.equal(event.is_response, false) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.completion_id, 'completion-1') - t.equal(event.sequence, 0) - t.equal(event.content, 'who are you') - t.equal(event.role, 'user') - t.match(event.id, /[\w-]{36}/) - t.equal(event.token_count, 3) +test('create creates a non-response instance', async (t) => { + t.nr.agent.llm.tokenCountCallback = () => 3 + const event = new LlmChatCompletionMessage(t.nr) + assert.equal(event.is_response, false) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.completion_id, 'completion-1') + assert.equal(event.sequence, 0) + assert.equal(event.content, 'who are you') + assert.equal(event.role, 'user') + assert.match(event.id, /[\w-]{36}/) + assert.equal(event.token_count, 3) }) -tap.test('create creates a titan response instance', async (t) => { - t.context.bedrockCommand.isTitan = () => true - t.context.content = 'a response' - t.context.isResponse = true - const event = new LlmChatCompletionMessage(t.context) - t.equal(event.is_response, true) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.completion_id, 'completion-1') - t.equal(event.sequence, 0) - t.equal(event.content, 'a response') - t.equal(event.role, 'assistant') - t.match(event.id, /[\w-]{36}-0/) +test('create creates a titan response instance', async (t) => { + t.nr.bedrockCommand.isTitan = () => true + t.nr.content = 'a response' + t.nr.isResponse = true + const event = new LlmChatCompletionMessage(t.nr) + assert.equal(event.is_response, true) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.completion_id, 'completion-1') + assert.equal(event.sequence, 0) + assert.equal(event.content, 'a response') + assert.equal(event.role, 'assistant') + assert.match(event.id, /[\w-]{36}-0/) }) -tap.test('create creates a cohere response instance', async (t) => { - t.context.bedrockCommand.isCohere = () => true - t.context.content = 'a response' - t.context.isResponse = true - t.context.bedrockResponse.id = 42 - const event = new LlmChatCompletionMessage(t.context) - t.equal(event.is_response, true) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.completion_id, 'completion-1') - t.equal(event.sequence, 0) - t.equal(event.content, 'a response') - t.equal(event.role, 'assistant') - t.match(event.id, /42-0/) +test('create creates a cohere response instance', async (t) => { + t.nr.bedrockCommand.isCohere = () => true + t.nr.content = 'a response' + t.nr.isResponse = true + t.nr.bedrockResponse.id = 42 + const event = new LlmChatCompletionMessage(t.nr) + assert.equal(event.is_response, true) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.completion_id, 'completion-1') + assert.equal(event.sequence, 0) + assert.equal(event.content, 'a response') + assert.equal(event.role, 'assistant') + assert.match(event.id, /42-0/) }) -tap.test('create creates a ai21 response instance when response.id is undefined', async (t) => { - t.context.bedrockCommand.isAi21 = () => true - t.context.content = 'a response' - t.context.isResponse = true - delete t.context.bedrockResponse.id - const event = new LlmChatCompletionMessage(t.context) - t.equal(event.is_response, true) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.completion_id, 'completion-1') - t.equal(event.sequence, 0) - t.equal(event.content, 'a response') - t.equal(event.role, 'assistant') - t.match(event.id, /[\w-]{36}-0/) +test('create creates a ai21 response instance when response.id is undefined', async (t) => { + t.nr.bedrockCommand.isAi21 = () => true + t.nr.content = 'a response' + t.nr.isResponse = true + delete t.nr.bedrockResponse.id + const event = new LlmChatCompletionMessage(t.nr) + assert.equal(event.is_response, true) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.completion_id, 'completion-1') + assert.equal(event.sequence, 0) + assert.equal(event.content, 'a response') + assert.equal(event.role, 'assistant') + assert.match(event.id, /[\w-]{36}-0/) }) -tap.test( - 'should not capture content when `ai_monitoring.record_content.enabled` is false', - async (t) => { - const { agent } = t.context - agent.config.ai_monitoring.record_content.enabled = false - const event = new LlmChatCompletionMessage(t.context) - t.equal(event.content, undefined, 'content should be empty') - } -) +test('should not capture content when `ai_monitoring.record_content.enabled` is false', async (t) => { + const { agent } = t.nr + agent.config.ai_monitoring.record_content.enabled = false + const event = new LlmChatCompletionMessage(t.nr) + assert.equal(event.content, undefined, 'content should be empty') +}) diff --git a/test/unit/llm-events/aws-bedrock/chat-completion-summary.test.js b/test/unit/llm-events/aws-bedrock/chat-completion-summary.test.js index f1704c7ede..0bc79f281f 100644 --- a/test/unit/llm-events/aws-bedrock/chat-completion-summary.test.js +++ b/test/unit/llm-events/aws-bedrock/chat-completion-summary.test.js @@ -5,14 +5,16 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const { DESTINATIONS: { TRANS_SCOPE } } = require('../../../../lib/config/attribute-filter') const LlmChatCompletionSummary = require('../../../../lib/llm-events/aws-bedrock/chat-completion-summary') -tap.beforeEach((t) => { - t.context.agent = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.agent = { config: { applications() { return ['test-app'] @@ -24,7 +26,7 @@ tap.beforeEach((t) => { trace: { custom: { get(key) { - t.equal(key, TRANS_SCOPE) + assert.equal(key, TRANS_SCOPE) return { ['llm.conversation_id']: 'conversation-1' } @@ -36,7 +38,7 @@ tap.beforeEach((t) => { } } - t.context.segment = { + ctx.nr.segment = { transaction: { id: 'tx-1' }, @@ -45,7 +47,7 @@ tap.beforeEach((t) => { } } - t.context.bedrockCommand = { + ctx.nr.bedrockCommand = { maxTokens: 25, temperature: 0.5, isAi21() { @@ -68,7 +70,7 @@ tap.beforeEach((t) => { } } - t.context.bedrockResponse = { + ctx.nr.bedrockResponse = { headers: { 'x-amzn-request-id': 'aws-request-1' }, @@ -77,69 +79,69 @@ tap.beforeEach((t) => { } }) -tap.test('creates a basic summary', async (t) => { - t.context.bedrockResponse.inputTokenCount = 0 - t.context.bedrockResponse.outputTokenCount = 0 - const event = new LlmChatCompletionSummary(t.context) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.duration, 100) - t.equal(event['request.max_tokens'], 25) - t.equal(event['request.temperature'], 0.5) - t.equal(event['response.choices.finish_reason'], 'done') - t.equal(event['response.number_of_messages'], 2) +test('creates a basic summary', async (t) => { + t.nr.bedrockResponse.inputTokenCount = 0 + t.nr.bedrockResponse.outputTokenCount = 0 + const event = new LlmChatCompletionSummary(t.nr) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.duration, 100) + assert.equal(event['request.max_tokens'], 25) + assert.equal(event['request.temperature'], 0.5) + assert.equal(event['response.choices.finish_reason'], 'done') + assert.equal(event['response.number_of_messages'], 2) }) -tap.test('creates an ai21 summary', async (t) => { - t.context.bedrockCommand.isAi21 = () => true - const event = new LlmChatCompletionSummary(t.context) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.duration, 100) - t.equal(event['request.max_tokens'], 25) - t.equal(event['request.temperature'], 0.5) - t.equal(event['response.choices.finish_reason'], 'done') - t.equal(event['response.number_of_messages'], 2) +test('creates an ai21 summary', async (t) => { + t.nr.bedrockCommand.isAi21 = () => true + const event = new LlmChatCompletionSummary(t.nr) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.duration, 100) + assert.equal(event['request.max_tokens'], 25) + assert.equal(event['request.temperature'], 0.5) + assert.equal(event['response.choices.finish_reason'], 'done') + assert.equal(event['response.number_of_messages'], 2) }) -tap.test('creates an claude summary', async (t) => { - t.context.bedrockCommand.isClaude = () => true - const event = new LlmChatCompletionSummary(t.context) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.duration, 100) - t.equal(event['request.max_tokens'], 25) - t.equal(event['request.temperature'], 0.5) - t.equal(event['response.choices.finish_reason'], 'done') - t.equal(event['response.number_of_messages'], 2) +test('creates an claude summary', async (t) => { + t.nr.bedrockCommand.isClaude = () => true + const event = new LlmChatCompletionSummary(t.nr) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.duration, 100) + assert.equal(event['request.max_tokens'], 25) + assert.equal(event['request.temperature'], 0.5) + assert.equal(event['response.choices.finish_reason'], 'done') + assert.equal(event['response.number_of_messages'], 2) }) -tap.test('creates a cohere summary', async (t) => { - t.context.bedrockCommand.isCohere = () => true - const event = new LlmChatCompletionSummary(t.context) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.duration, 100) - t.equal(event['request.max_tokens'], 25) - t.equal(event['request.temperature'], 0.5) - t.equal(event['response.choices.finish_reason'], 'done') - t.equal(event['response.number_of_messages'], 2) +test('creates a cohere summary', async (t) => { + t.nr.bedrockCommand.isCohere = () => true + const event = new LlmChatCompletionSummary(t.nr) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.duration, 100) + assert.equal(event['request.max_tokens'], 25) + assert.equal(event['request.temperature'], 0.5) + assert.equal(event['response.choices.finish_reason'], 'done') + assert.equal(event['response.number_of_messages'], 2) }) -tap.test('creates a llama2 summary', async (t) => { - t.context.bedrockCommand.isLlama2 = () => true - const event = new LlmChatCompletionSummary(t.context) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.duration, 100) - t.equal(event['request.max_tokens'], 25) - t.equal(event['request.temperature'], 0.5) - t.equal(event['response.choices.finish_reason'], 'done') - t.equal(event['response.number_of_messages'], 2) +test('creates a llama2 summary', async (t) => { + t.nr.bedrockCommand.isLlama2 = () => true + const event = new LlmChatCompletionSummary(t.nr) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.duration, 100) + assert.equal(event['request.max_tokens'], 25) + assert.equal(event['request.temperature'], 0.5) + assert.equal(event['response.choices.finish_reason'], 'done') + assert.equal(event['response.number_of_messages'], 2) }) -tap.test('creates a titan summary', async (t) => { - t.context.bedrockCommand.isTitan = () => true - const event = new LlmChatCompletionSummary(t.context) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.duration, 100) - t.equal(event['request.max_tokens'], 25) - t.equal(event['request.temperature'], 0.5) - t.equal(event['response.choices.finish_reason'], 'done') - t.equal(event['response.number_of_messages'], 2) +test('creates a titan summary', async (t) => { + t.nr.bedrockCommand.isTitan = () => true + const event = new LlmChatCompletionSummary(t.nr) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.duration, 100) + assert.equal(event['request.max_tokens'], 25) + assert.equal(event['request.temperature'], 0.5) + assert.equal(event['response.choices.finish_reason'], 'done') + assert.equal(event['response.number_of_messages'], 2) }) diff --git a/test/unit/llm-events/aws-bedrock/embedding.test.js b/test/unit/llm-events/aws-bedrock/embedding.test.js index b3457211e9..801e7f64a4 100644 --- a/test/unit/llm-events/aws-bedrock/embedding.test.js +++ b/test/unit/llm-events/aws-bedrock/embedding.test.js @@ -5,14 +5,16 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const { DESTINATIONS: { TRANS_SCOPE } } = require('../../../../lib/config/attribute-filter') const LlmEmbedding = require('../../../../lib/llm-events/aws-bedrock/embedding') -tap.beforeEach((t) => { - t.context.agent = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.agent = { llm: {}, config: { applications() { @@ -31,7 +33,7 @@ tap.beforeEach((t) => { trace: { custom: { get(key) { - t.equal(key, TRANS_SCOPE) + assert.equal(key, TRANS_SCOPE) return { ['llm.conversation_id']: 'conversation-1' } @@ -43,16 +45,16 @@ tap.beforeEach((t) => { } } - t.context.bedrockCommand = { + ctx.nr.bedrockCommand = { prompt: 'who are you' } - t.context.bedrockResponse = { + ctx.nr.bedrockResponse = { headers: { 'x-amzn-requestid': 'request-1' } } - t.context.segment = { + ctx.nr.segment = { transaction: { traceId: 'id' }, getDurationInMillis() { return 1.008 @@ -60,25 +62,22 @@ tap.beforeEach((t) => { } }) -tap.test('creates a basic embedding', async (t) => { - const event = new LlmEmbedding(t.context) - t.equal(event.input, 'who are you') - t.equal(event.duration, 1.008) - t.equal(event.token_count, undefined) +test('creates a basic embedding', async (t) => { + const event = new LlmEmbedding(t.nr) + assert.equal(event.input, 'who are you') + assert.equal(event.duration, 1.008) + assert.equal(event.token_count, undefined) }) -tap.test( - 'should not capture input when `ai_monitoring.record_content.enabled` is false', - async (t) => { - const { agent } = t.context - agent.config.ai_monitoring.record_content.enabled = false - const event = new LlmEmbedding(t.context) - t.equal(event.input, undefined, 'input should be empty') - } -) +test('should not capture input when `ai_monitoring.record_content.enabled` is false', async (t) => { + const { agent } = t.nr + agent.config.ai_monitoring.record_content.enabled = false + const event = new LlmEmbedding(t.nr) + assert.equal(event.input, undefined, 'input should be empty') +}) -tap.test('should capture token_count when callback is defined', async (t) => { - t.context.agent.llm.tokenCountCallback = () => 3 - const event = new LlmEmbedding(t.context) - t.equal(event.token_count, 3) +test('should capture token_count when callback is defined', async (t) => { + t.nr.agent.llm.tokenCountCallback = () => 3 + const event = new LlmEmbedding(t.nr) + assert.equal(event.token_count, 3) }) diff --git a/test/unit/llm-events/aws-bedrock/error.test.js b/test/unit/llm-events/aws-bedrock/error.test.js index 36e79d6aee..e589b384ba 100644 --- a/test/unit/llm-events/aws-bedrock/error.test.js +++ b/test/unit/llm-events/aws-bedrock/error.test.js @@ -5,52 +5,51 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LlmError = require('../../../../lib/llm-events/aws-bedrock/error') -tap.beforeEach((t) => { - t.context.bedrockResponse = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.bedrockResponse = { statusCode: 400 } - t.context.err = { + ctx.nr.err = { message: 'No soup for you', name: 'SoupRule' } - t.context.summary = { + ctx.nr.summary = { id: 'completion-id' } }) -tap.test('create creates a new instance', (t) => { - const err = new LlmError(t.context) - t.equal(err['http.statusCode'], 400) - t.equal(err['error.message'], 'No soup for you') - t.equal(err['error.code'], 'SoupRule') - t.equal(err.completion_id, 'completion-id') - t.notOk(err.embedding_id) - t.end() +test('create creates a new instance', (t) => { + const err = new LlmError(t.nr) + assert.equal(err['http.statusCode'], 400) + assert.equal(err['error.message'], 'No soup for you') + assert.equal(err['error.code'], 'SoupRule') + assert.equal(err.completion_id, 'completion-id') + assert.ok(!err.embedding_id) }) -tap.test('create error with embedding_id', (t) => { - delete t.context.summary - t.context.embedding = { id: 'embedding-id' } - const err = new LlmError(t.context) - t.equal(err['http.statusCode'], 400) - t.equal(err['error.message'], 'No soup for you') - t.equal(err['error.code'], 'SoupRule') - t.equal(err.embedding_id, 'embedding-id') - t.notOk(err.completion_id) - t.end() +test('create error with embedding_id', (t) => { + delete t.nr.summary + t.nr.embedding = { id: 'embedding-id' } + const err = new LlmError(t.nr) + assert.equal(err['http.statusCode'], 400) + assert.equal(err['error.message'], 'No soup for you') + assert.equal(err['error.code'], 'SoupRule') + assert.equal(err.embedding_id, 'embedding-id') + assert.ok(!err.completion_id) }) -tap.test('empty error', (t) => { +test('empty error', () => { const err = new LlmError() - t.notOk(err['http.statusCode']) - t.notOk(err['error.message']) - t.notOk(err['error.code']) - t.notOk(err.completion_id) - t.notOk(err.embedding_id) - t.end() + assert.ok(!err['http.statusCode']) + assert.ok(!err['error.message']) + assert.ok(!err['error.code']) + assert.ok(!err.completion_id) + assert.ok(!err.embedding_id) }) diff --git a/test/unit/llm-events/aws-bedrock/event.test.js b/test/unit/llm-events/aws-bedrock/event.test.js index 5100c72e5a..1d062b0bee 100644 --- a/test/unit/llm-events/aws-bedrock/event.test.js +++ b/test/unit/llm-events/aws-bedrock/event.test.js @@ -5,14 +5,16 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const { DESTINATIONS: { TRANS_SCOPE } } = require('../../../../lib/config/attribute-filter') const LlmEvent = require('../../../../lib/llm-events/aws-bedrock/event') -tap.beforeEach((t) => { - t.context.agent = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.agent = { config: { applications() { return ['test-app'] @@ -24,7 +26,7 @@ tap.beforeEach((t) => { trace: { custom: { get(key) { - t.equal(key, TRANS_SCOPE) + assert.equal(key, TRANS_SCOPE) return { ['llm.conversation_id']: 'conversation-1', omit: 'me' @@ -37,44 +39,43 @@ tap.beforeEach((t) => { } } - t.context.segment = { + ctx.nr.segment = { id: 'segment-1', transaction: { traceId: 'trace-1' } } - t.context.bedrockResponse = { + ctx.nr.bedrockResponse = { requestId: 'request-1' } - t.context.bedrockCommand = { + ctx.nr.bedrockCommand = { modelId: 'model-1' } }) -tap.test('create creates a new instance', async (t) => { - const event = new LlmEvent(t.context) - t.ok(event) - t.match(event.id, /[a-z0-9]{7}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}/) - t.equal(event.vendor, 'bedrock') - t.equal(event.ingest_source, 'Node') - t.equal(event.appName, 'test-app') - t.equal(event.span_id, 'segment-1') - t.equal(event.trace_id, 'trace-1') - t.equal(event.request_id, 'request-1') - t.equal(event['response.model'], 'model-1') - t.equal(event['request.model'], 'model-1') - t.equal(event['request.max_tokens'], null) - t.equal(event['llm.conversation_id'], 'conversation-1') - t.equal(event.omit, undefined) +test('create creates a new instance', async (t) => { + const event = new LlmEvent(t.nr) + assert.ok(event) + assert.match(event.id, /[a-z0-9]{7}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}/) + assert.equal(event.vendor, 'bedrock') + assert.equal(event.ingest_source, 'Node') + assert.equal(event.appName, 'test-app') + assert.equal(event.span_id, 'segment-1') + assert.equal(event.trace_id, 'trace-1') + assert.equal(event.request_id, 'request-1') + assert.equal(event['response.model'], 'model-1') + assert.equal(event['request.model'], 'model-1') + assert.equal(event['request.max_tokens'], null) + assert.equal(event['llm.conversation_id'], 'conversation-1') + assert.equal(event.omit, undefined) }) -tap.test('serializes the event', (t) => { - const event = new LlmEvent(t.context) +test('serializes the event', (t) => { + const event = new LlmEvent(t.nr) event.serialize() - t.notOk(event.bedrockCommand) - t.notOk(event.bedrockResponse) - t.notOk(event.constructionParams) - t.end() + assert.ok(!event.bedrockCommand) + assert.ok(!event.bedrockResponse) + assert.ok(!event.constructionParams) }) diff --git a/test/unit/llm-events/aws-bedrock/stream-handler.test.js b/test/unit/llm-events/aws-bedrock/stream-handler.test.js index 2d892178ec..a9762dfafe 100644 --- a/test/unit/llm-events/aws-bedrock/stream-handler.test.js +++ b/test/unit/llm-events/aws-bedrock/stream-handler.test.js @@ -5,15 +5,17 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const { BedrockCommand, BedrockResponse, StreamHandler } = require('../../../../lib/llm-events/aws-bedrock') -tap.beforeEach((t) => { - t.context.response = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.response = { response: { headers: { 'x-amzn-requestid': 'aws-req-1' @@ -25,11 +27,11 @@ tap.beforeEach((t) => { } } - t.context.passThroughParams = { - response: t.context.response, + ctx.nr.passThroughParams = { + response: ctx.nr.response, segment: { touch() { - t.pass() + assert.ok(true) } }, bedrockCommand: { @@ -54,16 +56,16 @@ tap.beforeEach((t) => { } } - t.context.onComplete = (params) => { - t.same(params, t.context.passThroughParams) + ctx.nr.onComplete = (params) => { + assert.deepStrictEqual(params, ctx.nr.passThroughParams) } - t.context.chunks = [{ foo: 'foo' }] + ctx.nr.chunks = [{ foo: 'foo' }] /* eslint-disable prettier/prettier */ // It doesn't like the IIFE syntax - t.context.stream = (async function* originalStream() { + ctx.nr.stream = (async function* originalStream() { const encoder = new TextEncoder() - for (const chunk of t.context.chunks) { + for (const chunk of ctx.nr.chunks) { const json = JSON.stringify(chunk) const bytes = encoder.encode(json) yield { chunk: { bytes } } @@ -72,26 +74,26 @@ tap.beforeEach((t) => { /* eslint-enable prettier/prettier */ }) -tap.test('unrecognized or unhandled model uses original stream', async (t) => { - t.context.modelId = 'amazon.titan-embed-text-v1' - const handler = new StreamHandler(t.context) - t.equal(handler.generator.name, undefined) - t.equal(handler.generator, t.context.stream) +test('unrecognized or unhandled model uses original stream', async (t) => { + t.nr.modelId = 'amazon.titan-embed-text-v1' + const handler = new StreamHandler(t.nr) + assert.equal(handler.generator.name, undefined) + assert.equal(handler.generator, t.nr.stream) }) -tap.test('handles claude streams', async (t) => { - t.context.passThroughParams.bedrockCommand.isClaude = () => true - t.context.chunks = [ +test('handles claude streams', async (t) => { + t.nr.passThroughParams.bedrockCommand.isClaude = () => true + t.nr.chunks = [ { completion: '1', stop_reason: null }, - { completion: '2', stop_reason: 'done', ...t.context.metrics } + { completion: '2', stop_reason: 'done', ...t.nr.metrics } ] - const handler = new StreamHandler(t.context) + const handler = new StreamHandler(t.nr) - t.equal(handler.generator.name, 'handleClaude') + assert.equal(handler.generator.name, 'handleClaude') for await (const event of handler.generator()) { - t.type(event.chunk.bytes, Uint8Array) + assert.equal(event.chunk.bytes.constructor, Uint8Array) } - t.same(handler.response, { + assert.deepStrictEqual(handler.response, { response: { headers: { 'x-amzn-requestid': 'aws-req-1' @@ -111,27 +113,31 @@ tap.test('handles claude streams', async (t) => { }) }) const br = new BedrockResponse({ bedrockCommand: bc, response: handler.response }) - t.equal(br.completions.length, 1) - t.equal(br.finishReason, 'done') - t.equal(br.requestId, 'aws-req-1') - t.equal(br.statusCode, 200) + assert.equal(br.completions.length, 1) + assert.equal(br.finishReason, 'done') + assert.equal(br.requestId, 'aws-req-1') + assert.equal(br.statusCode, 200) }) -tap.test('handles claude3streams', async (t) => { - t.context.passThroughParams.bedrockCommand.isClaude3 = () => true - t.context.chunks = [ +test('handles claude3streams', async (t) => { + t.nr.passThroughParams.bedrockCommand.isClaude3 = () => true + t.nr.chunks = [ { type: 'content_block_delta', delta: { type: 'text_delta', text: '42' } }, { type: 'message_delta', delta: { stop_reason: 'done' } }, - { type: 'message_stop', ...t.context.metrics } + { type: 'message_stop', ...t.nr.metrics } ] - const handler = new StreamHandler(t.context) + const handler = new StreamHandler(t.nr) - t.equal(handler.generator.name, 'handleClaude3') + assert.equal(handler.generator.name, 'handleClaude3') for await (const event of handler.generator()) { - t.type(event.chunk.bytes, Uint8Array) + assert.equal(event.chunk.bytes.constructor, Uint8Array) } const foundBody = JSON.parse(new TextDecoder().decode(handler.response.output.body)) - t.same(foundBody, { completions: ['42'], stop_reason: 'done', type: 'message_stop' }) + assert.deepStrictEqual(foundBody, { + completions: ['42'], + stop_reason: 'done', + type: 'message_stop' + }) const bc = new BedrockCommand({ modelId: 'anthropic.claude-3-haiku-20240307-v1:0', @@ -141,25 +147,25 @@ tap.test('handles claude3streams', async (t) => { }) }) const br = new BedrockResponse({ bedrockCommand: bc, response: handler.response }) - t.equal(br.completions.length, 1) - t.equal(br.finishReason, 'done') - t.equal(br.requestId, 'aws-req-1') - t.equal(br.statusCode, 200) + assert.equal(br.completions.length, 1) + assert.equal(br.finishReason, 'done') + assert.equal(br.requestId, 'aws-req-1') + assert.equal(br.statusCode, 200) }) -tap.test('handles cohere streams', async (t) => { - t.context.passThroughParams.bedrockCommand.isCohere = () => true - t.context.chunks = [ +test('handles cohere streams', async (t) => { + t.nr.passThroughParams.bedrockCommand.isCohere = () => true + t.nr.chunks = [ { generations: [{ text: '1', finish_reason: null }] }, - { generations: [{ text: '2', finish_reason: 'done' }], ...t.context.metrics } + { generations: [{ text: '2', finish_reason: 'done' }], ...t.nr.metrics } ] - const handler = new StreamHandler(t.context) + const handler = new StreamHandler(t.nr) - t.equal(handler.generator.name, 'handleCohere') + assert.equal(handler.generator.name, 'handleCohere') for await (const event of handler.generator()) { - t.type(event.chunk.bytes, Uint8Array) + assert.equal(event.chunk.bytes.constructor, Uint8Array) } - t.same(handler.response, { + assert.deepStrictEqual(handler.response, { response: { headers: { 'x-amzn-requestid': 'aws-req-1' @@ -186,30 +192,30 @@ tap.test('handles cohere streams', async (t) => { }) }) const br = new BedrockResponse({ bedrockCommand: bc, response: handler.response }) - t.equal(br.completions.length, 2) - t.equal(br.finishReason, 'done') - t.equal(br.requestId, 'aws-req-1') - t.equal(br.statusCode, 200) + assert.equal(br.completions.length, 2) + assert.equal(br.finishReason, 'done') + assert.equal(br.requestId, 'aws-req-1') + assert.equal(br.statusCode, 200) }) -tap.test('handles cohere embedding streams', async (t) => { - t.context.passThroughParams.bedrockCommand.isCohereEmbed = () => true - t.context.chunks = [ +test('handles cohere embedding streams', async (t) => { + t.nr.passThroughParams.bedrockCommand.isCohereEmbed = () => true + t.nr.chunks = [ { embeddings: [ [1, 2], [3, 4] ], - ...t.context.metrics + ...t.nr.metrics } ] - const handler = new StreamHandler(t.context) + const handler = new StreamHandler(t.nr) - t.equal(handler.generator.name, 'handleCohereEmbed') + assert.equal(handler.generator.name, 'handleCohereEmbed') for await (const event of handler.generator()) { - t.type(event.chunk.bytes, Uint8Array) + assert.equal(event.chunk.bytes.constructor, Uint8Array) } - t.same(handler.response, { + assert.deepStrictEqual(handler.response, { response: { headers: { 'x-amzn-requestid': 'aws-req-1' @@ -236,25 +242,25 @@ tap.test('handles cohere embedding streams', async (t) => { }) }) const br = new BedrockResponse({ bedrockCommand: bc, response: handler.response }) - t.equal(br.completions.length, 0) - t.equal(br.finishReason, undefined) - t.equal(br.requestId, 'aws-req-1') - t.equal(br.statusCode, 200) + assert.equal(br.completions.length, 0) + assert.equal(br.finishReason, undefined) + assert.equal(br.requestId, 'aws-req-1') + assert.equal(br.statusCode, 200) }) -tap.test('handles llama streams', async (t) => { - t.context.passThroughParams.bedrockCommand.isLlama = () => true - t.context.chunks = [ +test('handles llama streams', async (t) => { + t.nr.passThroughParams.bedrockCommand.isLlama = () => true + t.nr.chunks = [ { generation: '1', stop_reason: null }, - { generation: '2', stop_reason: 'done', ...t.context.metrics } + { generation: '2', stop_reason: 'done', ...t.nr.metrics } ] - const handler = new StreamHandler(t.context) + const handler = new StreamHandler(t.nr) - t.equal(handler.generator.name, 'handleLlama') + assert.equal(handler.generator.name, 'handleLlama') for await (const event of handler.generator()) { - t.type(event.chunk.bytes, Uint8Array) + assert.equal(event.chunk.bytes.constructor, Uint8Array) } - t.same(handler.response, { + assert.deepStrictEqual(handler.response, { response: { headers: { 'x-amzn-requestid': 'aws-req-1' @@ -274,25 +280,25 @@ tap.test('handles llama streams', async (t) => { }) }) const br = new BedrockResponse({ bedrockCommand: bc, response: handler.response }) - t.equal(br.completions.length, 1) - t.equal(br.finishReason, 'done') - t.equal(br.requestId, 'aws-req-1') - t.equal(br.statusCode, 200) + assert.equal(br.completions.length, 1) + assert.equal(br.finishReason, 'done') + assert.equal(br.requestId, 'aws-req-1') + assert.equal(br.statusCode, 200) }) -tap.test('handles titan streams', async (t) => { - t.context.passThroughParams.bedrockCommand.isTitan = () => true - t.context.chunks = [ +test('handles titan streams', async (t) => { + t.nr.passThroughParams.bedrockCommand.isTitan = () => true + t.nr.chunks = [ { outputText: '1', completionReason: null }, - { outputText: '2', completionReason: 'done', ...t.context.metrics } + { outputText: '2', completionReason: 'done', ...t.nr.metrics } ] - const handler = new StreamHandler(t.context) + const handler = new StreamHandler(t.nr) - t.equal(handler.generator.name, 'handleTitan') + assert.equal(handler.generator.name, 'handleTitan') for await (const event of handler.generator()) { - t.type(event.chunk.bytes, Uint8Array) + assert.equal(event.chunk.bytes.constructor, Uint8Array) } - t.same(handler.response, { + assert.deepStrictEqual(handler.response, { response: { headers: { 'x-amzn-requestid': 'aws-req-1' @@ -322,8 +328,8 @@ tap.test('handles titan streams', async (t) => { }) }) const br = new BedrockResponse({ bedrockCommand: bc, response: handler.response }) - t.equal(br.completions.length, 2) - t.equal(br.finishReason, 'done') - t.equal(br.requestId, 'aws-req-1') - t.equal(br.statusCode, 200) + assert.equal(br.completions.length, 2) + assert.equal(br.finishReason, 'done') + assert.equal(br.requestId, 'aws-req-1') + assert.equal(br.statusCode, 200) }) diff --git a/test/unit/llm-events/error.test.js b/test/unit/llm-events/error.test.js index 6ec461e457..cd76a34d78 100644 --- a/test/unit/llm-events/error.test.js +++ b/test/unit/llm-events/error.test.js @@ -5,11 +5,12 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LlmErrorMessage = require('../../../lib/llm-events/error-message') const { req, chatRes } = require('./openai/common') -tap.test('LlmErrorMessage', (t) => { +test('LlmErrorMessage', async () => { const res = { ...chatRes, code: 'insufficient_quota', param: 'test-param', status: 429 } const errorMsg = new LlmErrorMessage({ request: req, response: res }) const expected = { @@ -22,6 +23,13 @@ tap.test('LlmErrorMessage', (t) => { 'vector_store_id': undefined, 'tool_id': undefined } - t.same(errorMsg, expected) - t.end() + assert.ok(errorMsg.toString(), 'LlmErrorMessage') + assert.equal(errorMsg['http.statusCode'], expected['http.statusCode']) + assert.equal(errorMsg['error.message'], expected['error.message']) + assert.equal(errorMsg['error.code'], expected['error.code']) + assert.equal(errorMsg['error.param'], expected['error.param']) + assert.equal(errorMsg.completion_id, expected.completion_id) + assert.equal(errorMsg.embedding_id, expected.embedding_id) + assert.equal(errorMsg.vector_store_id, expected.vector_store_id) + assert.equal(errorMsg.tool_id, expected.tool_id) }) diff --git a/test/unit/llm-events/feedback-message.test.js b/test/unit/llm-events/feedback-message.test.js index d6cf817ba8..6702402a6e 100644 --- a/test/unit/llm-events/feedback-message.test.js +++ b/test/unit/llm-events/feedback-message.test.js @@ -5,10 +5,11 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LlmFeedbackMessage = require('../../../lib/llm-events/feedback-message') -tap.test('LlmFeedbackMessage', (t) => { +test('LlmFeedbackMessage', () => { const opts = { traceId: 'trace-id', category: 'informative', @@ -24,6 +25,5 @@ tap.test('LlmFeedbackMessage', (t) => { message: 'This answer was amazing', ingest_source: 'Node' } - t.same(feedbackMsg, expected) - t.end() + assert.deepEqual(feedbackMsg, expected) }) diff --git a/test/unit/llm-events/langchain/chat-completion-message.test.js b/test/unit/llm-events/langchain/chat-completion-message.test.js index b23866a060..78dfe19f9b 100644 --- a/test/unit/llm-events/langchain/chat-completion-message.test.js +++ b/test/unit/llm-events/langchain/chat-completion-message.test.js @@ -5,11 +5,13 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LangChainCompletionMessage = require('../../../../lib/llm-events/langchain/chat-completion-message') -tap.beforeEach((t) => { - t.context._tx = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr._tx = { trace: { custom: { get() { @@ -21,7 +23,7 @@ tap.beforeEach((t) => { } } - t.context.agent = { + ctx.nr.agent = { config: { ai_monitoring: { record_content: { @@ -34,59 +36,57 @@ tap.beforeEach((t) => { }, tracer: { getTransaction() { - return t.context._tx + return ctx.nr._tx } } } - t.context.segment = { + ctx.nr.segment = { id: 'segment-1', transaction: { traceId: 'trace-1' } } - t.context.runId = 'run-1' - t.context.metadata = { foo: 'foo' } + ctx.nr.runId = 'run-1' + ctx.nr.metadata = { foo: 'foo' } }) -tap.test('creates entity', async (t) => { +test('creates entity', async (t) => { const msg = new LangChainCompletionMessage({ - ...t.context, + ...t.nr, sequence: 1, content: 'hello world' }) - t.match(msg, { - id: 'run-1-1', - appName: 'test-app', - ['llm.conversation_id']: 'test-conversation', - span_id: 'segment-1', - request_id: 'run-1', - trace_id: 'trace-1', - ['metadata.foo']: 'foo', - ingest_source: 'Node', - vendor: 'langchain', - virtual_llm: true, - sequence: 1, - content: 'hello world', - completion_id: /[a-z0-9-]{36}/ - }) + assert.equal(msg.id, 'run-1-1') + assert.equal(msg.appName, 'test-app') + assert.equal(msg['llm.conversation_id'], 'test-conversation') + assert.equal(msg.span_id, 'segment-1') + assert.equal(msg.request_id, 'run-1') + assert.equal(msg.trace_id, 'trace-1') + assert.equal(msg['metadata.foo'], 'foo') + assert.equal(msg.ingest_source, 'Node') + assert.equal(msg.vendor, 'langchain') + assert.equal(msg.virtual_llm, true) + assert.equal(msg.sequence, 1) + assert.equal(msg.content, 'hello world') + assert.match(msg.completion_id, /[a-z0-9-]{36}/) }) -tap.test('assigns id correctly', async (t) => { - let msg = new LangChainCompletionMessage({ ...t.context, runId: '', sequence: 1 }) - t.match(msg.id, /[a-z0-9-]{36}-1/) +test('assigns id correctly', async (t) => { + let msg = new LangChainCompletionMessage({ ...t.nr, runId: '', sequence: 1 }) + assert.match(msg.id, /[a-z0-9-]{36}-1/) - msg = new LangChainCompletionMessage({ ...t.context, runId: '123456', sequence: 42 }) - t.equal(msg.id, '123456-42') + msg = new LangChainCompletionMessage({ ...t.nr, runId: '123456', sequence: 42 }) + assert.equal(msg.id, '123456-42') }) -tap.test('respects record_content setting', async (t) => { - t.context.agent.config.ai_monitoring.record_content.enabled = false +test('respects record_content setting', async (t) => { + t.nr.agent.config.ai_monitoring.record_content.enabled = false const search = new LangChainCompletionMessage({ - ...t.context, + ...t.nr, sequence: 1, content: 'hello world' }) - t.equal(search.content, undefined) + assert.equal(search.content, undefined) }) diff --git a/test/unit/llm-events/langchain/chat-completion-summary.test.js b/test/unit/llm-events/langchain/chat-completion-summary.test.js index 5f8bb5d928..83d770fcad 100644 --- a/test/unit/llm-events/langchain/chat-completion-summary.test.js +++ b/test/unit/llm-events/langchain/chat-completion-summary.test.js @@ -5,11 +5,13 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LangChainCompletionSummary = require('../../../../lib/llm-events/langchain/chat-completion-summary') -tap.beforeEach((t) => { - t.context._tx = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr._tx = { trace: { custom: { get() { @@ -21,7 +23,7 @@ tap.beforeEach((t) => { } } - t.context.agent = { + ctx.nr.agent = { config: { applications() { return ['test-app'] @@ -29,12 +31,12 @@ tap.beforeEach((t) => { }, tracer: { getTransaction() { - return t.context._tx + return ctx.nr._tx } } } - t.context.segment = { + ctx.nr.segment = { id: 'segment-1', transaction: { traceId: 'trace-1' @@ -44,25 +46,23 @@ tap.beforeEach((t) => { } } - t.context.runId = 'run-1' - t.context.metadata = { foo: 'foo' } + ctx.nr.runId = 'run-1' + ctx.nr.metadata = { foo: 'foo' } }) -tap.test('creates entity', async (t) => { - const msg = new LangChainCompletionSummary(t.context) - t.match(msg, { - id: /[a-z0-9-]{36}/, - appName: 'test-app', - ['llm.conversation_id']: 'test-conversation', - span_id: 'segment-1', - request_id: 'run-1', - trace_id: 'trace-1', - ['metadata.foo']: 'foo', - ingest_source: 'Node', - vendor: 'langchain', - virtual_llm: true, - tags: '', - duration: 42, - ['response.number_of_messages']: 0 - }) +test('creates entity', async (t) => { + const msg = new LangChainCompletionSummary(t.nr) + assert.match(msg.id, /[a-z0-9-]{36}/) + assert.equal(msg.appName, 'test-app') + assert.equal(msg['llm.conversation_id'], 'test-conversation') + assert.equal(msg.span_id, 'segment-1') + assert.equal(msg.request_id, 'run-1') + assert.equal(msg.trace_id, 'trace-1') + assert.equal(msg['metadata.foo'], 'foo') + assert.equal(msg.ingest_source, 'Node') + assert.equal(msg.vendor, 'langchain') + assert.equal(msg.virtual_llm, true) + assert.equal(msg.tags, '') + assert.equal(msg.duration, 42) + assert.equal(msg['response.number_of_messages'], 0) }) diff --git a/test/unit/llm-events/langchain/event.test.js b/test/unit/llm-events/langchain/event.test.js index 7c07aab8de..2fe2d064d8 100644 --- a/test/unit/llm-events/langchain/event.test.js +++ b/test/unit/llm-events/langchain/event.test.js @@ -5,11 +5,13 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LangChainEvent = require('../../../../lib/llm-events/langchain/event') -tap.beforeEach((t) => { - t.context._tx = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr._tx = { trace: { custom: { get() { @@ -24,7 +26,7 @@ tap.beforeEach((t) => { } } - t.context.agent = { + ctx.nr.agent = { config: { applications() { return ['test-app'] @@ -32,79 +34,77 @@ tap.beforeEach((t) => { }, tracer: { getTransaction() { - return t.context._tx + return ctx.nr._tx } } } - t.context.segment = { + ctx.nr.segment = { id: 'segment-1', transaction: { traceId: 'trace-1' } } - t.context.runId = 'run-1' - t.context.metadata = { foo: 'foo' } + ctx.nr.runId = 'run-1' + ctx.nr.metadata = { foo: 'foo' } }) -tap.test('constructs default instance', async (t) => { - const event = new LangChainEvent(t.context) - t.match(event, { - id: /[a-z0-9-]{36}/, - appName: 'test-app', - ['llm.conversation_id']: 'test-conversation', - span_id: 'segment-1', - request_id: 'run-1', - trace_id: 'trace-1', - ['metadata.foo']: 'foo', - ingest_source: 'Node', - vendor: 'langchain', - error: null, - virtual_llm: true - }) +test('constructs default instance', async (t) => { + const event = new LangChainEvent(t.nr) + assert.match(event.id, /[a-z0-9-]{36}/) + assert.equal(event.appName, 'test-app') + assert.equal(event['llm.conversation_id'], 'test-conversation') + assert.equal(event.span_id, 'segment-1') + assert.equal(event.request_id, 'run-1') + assert.equal(event.trace_id, 'trace-1') + assert.equal(event['metadata.foo'], 'foo') + assert.equal(event.ingest_source, 'Node') + assert.equal(event.vendor, 'langchain') + assert.equal(event.error, null) + assert.equal(event.virtual_llm, true) }) -tap.test('params.virtual is handled correctly', async (t) => { - const event = new LangChainEvent({ ...t.context, virtual: false }) - t.equal(event.virtual_llm, false) +test('params.virtual is handled correctly', async (t) => { + const event = new LangChainEvent({ ...t.nr, virtual: false }) + assert.equal(event.virtual_llm, false) try { - const _ = new LangChainEvent({ ...t.context, virtual: 'false' }) - t.fail(_) + const _ = new LangChainEvent({ ...t.nr, virtual: 'false' }) + assert.fail(_) } catch (error) { - t.match(error, /params\.virtual must be a primitive boolean/) + assert.equal(error.message, 'params.virtual must be a primitive boolean') } }) -tap.test('langchainMeta is parsed correctly', async (t) => { - const event = new LangChainEvent(t.context) +test('langchainMeta is parsed correctly', async (t) => { + const event = new LangChainEvent(t.nr) event.langchainMeta = 'foobar' - t.same(event['metadata.foo'], 'foo') - t.equal(Object.keys(event).filter((k) => k.startsWith('metadata.')).length, 1) + assert.deepStrictEqual(event['metadata.foo'], 'foo') + assert.equal(Object.keys(event).filter((k) => k.startsWith('metadata.')).length, 1) }) -tap.test('metadata is parsed correctly', async (t) => { - const event = new LangChainEvent(t.context) - t.equal(event['llm.foo'], 'bar') - t.equal(event['llm.bar'], 'baz') - t.notOk(event.customKey) +test('metadata is parsed correctly', async (t) => { + const event = new LangChainEvent(t.nr) + assert.equal(event['llm.foo'], 'bar') + assert.equal(event['llm.bar'], 'baz') + assert.ok(!event.customKey) }) -tap.test('sets tags from array', async (t) => { - t.context.tags = ['foo', 'bar'] - const msg = new LangChainEvent(t.context) - t.equal(msg.tags, 'foo,bar') +test('sets tags from array', async (t) => { + t.nr.tags = ['foo', 'bar'] + const msg = new LangChainEvent(t.nr) + assert.equal(msg.tags, 'foo,bar') }) -tap.test('sets tags from string', async (t) => { - t.context.tags = 'foo,bar' - const msg = new LangChainEvent(t.context) - t.equal(msg.tags, 'foo,bar') +test('sets tags from string', async (t) => { + t.nr.tags = 'foo,bar' + const msg = new LangChainEvent(t.nr) + assert.equal(msg.tags, 'foo,bar') }) -tap.test('sets error property', async (t) => { - t.context.error = true - const msg = new LangChainEvent(t.context) - t.equal(msg.error, true) +test('sets error property', async (t) => { + t.nr.error = true + const msg = new LangChainEvent(t.nr) + assert.equal(msg.error, true) }) diff --git a/test/unit/llm-events/langchain/tool.test.js b/test/unit/llm-events/langchain/tool.test.js index ca9d251e15..639a4a7b22 100644 --- a/test/unit/llm-events/langchain/tool.test.js +++ b/test/unit/llm-events/langchain/tool.test.js @@ -5,11 +5,13 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LangChainTool = require('../../../../lib/llm-events/langchain/tool') -tap.beforeEach((t) => { - t.context._tx = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr._tx = { trace: { custom: { get() { @@ -21,7 +23,7 @@ tap.beforeEach((t) => { } } - t.context.agent = { + ctx.nr.agent = { config: { ai_monitoring: { record_content: { @@ -34,12 +36,12 @@ tap.beforeEach((t) => { }, tracer: { getTransaction() { - return t.context._tx + return ctx.nr._tx } } } - t.context.segment = { + ctx.nr.segment = { getDurationInMillis() { return 1.01 }, @@ -49,36 +51,34 @@ tap.beforeEach((t) => { } } - t.context.runId = 'run-1' - t.context.metadata = { foo: 'foo' } - t.context.name = 'test-tool' - t.context.description = 'test tool description' - t.context.input = 'input' - t.context.output = 'output' + ctx.nr.runId = 'run-1' + ctx.nr.metadata = { foo: 'foo' } + ctx.nr.name = 'test-tool' + ctx.nr.description = 'test tool description' + ctx.nr.input = 'input' + ctx.nr.output = 'output' }) -tap.test('constructs default instance', async (t) => { - const event = new LangChainTool(t.context) - t.match(event, { - input: 'input', - output: 'output', - name: 'test-tool', - description: 'test tool description', - run_id: 'run-1', - id: /[a-z0-9-]{36}/, - appName: 'test-app', - span_id: 'segment-1', - trace_id: 'trace-1', - duration: 1.01, - ['metadata.foo']: 'foo', - ingest_source: 'Node', - vendor: 'langchain' - }) +test('constructs default instance', async (t) => { + const event = new LangChainTool(t.nr) + assert.equal(event.input, 'input') + assert.equal(event.output, 'output') + assert.equal(event.name, 'test-tool') + assert.equal(event.description, 'test tool description') + assert.equal(event.run_id, 'run-1') + assert.match(event.id, /[a-z0-9-]{36}/) + assert.equal(event.appName, 'test-app') + assert.equal(event.span_id, 'segment-1') + assert.equal(event.trace_id, 'trace-1') + assert.equal(event.duration, 1.01) + assert.equal(event['metadata.foo'], 'foo') + assert.equal(event.ingest_source, 'Node') + assert.equal(event.vendor, 'langchain') }) -tap.test('respects record_content setting', async (t) => { - t.context.agent.config.ai_monitoring.record_content.enabled = false - const event = new LangChainTool(t.context) - t.equal(event.input, undefined) - t.equal(event.output, undefined) +test('respects record_content setting', async (t) => { + t.nr.agent.config.ai_monitoring.record_content.enabled = false + const event = new LangChainTool(t.nr) + assert.equal(event.input, undefined) + assert.equal(event.output, undefined) }) diff --git a/test/unit/llm-events/langchain/vector-search-result.test.js b/test/unit/llm-events/langchain/vector-search-result.test.js index 8d0729cd9a..7189d32d90 100644 --- a/test/unit/llm-events/langchain/vector-search-result.test.js +++ b/test/unit/llm-events/langchain/vector-search-result.test.js @@ -5,12 +5,14 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LangChainVectorSearchResult = require('../../../../lib/llm-events/langchain/vector-search-result') const LangChainVectorSearch = require('../../../../lib/llm-events/langchain/vector-search') -tap.beforeEach((t) => { - t.context._tx = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr._tx = { trace: { custom: { get() { @@ -22,7 +24,7 @@ tap.beforeEach((t) => { } } - t.context.agent = { + ctx.nr.agent = { config: { ai_monitoring: { record_content: { @@ -35,12 +37,12 @@ tap.beforeEach((t) => { }, tracer: { getTransaction() { - return t.context._tx + return ctx.nr._tx } } } - t.context.segment = { + ctx.nr.segment = { id: 'segment-1', transaction: { traceId: 'trace-1' @@ -50,46 +52,44 @@ tap.beforeEach((t) => { } } - t.context.runId = 'run-1' - t.context.metadata = { foo: 'foo' } + ctx.nr.runId = 'run-1' + ctx.nr.metadata = { foo: 'foo' } }) -tap.test('create entity', async (t) => { +test('create entity', async (t) => { const search = new LangChainVectorSearch({ - ...t.context, + ...t.nr, query: 'hello world', k: 1 }) const searchResult = new LangChainVectorSearchResult({ - ...t.context, + ...t.nr, sequence: 1, pageContent: 'hello world', search_id: search.id }) - t.match(searchResult, { - id: /[a-z0-9-]{36}/, - appName: 'test-app', - ['llm.conversation_id']: 'test-conversation', - request_id: 'run-1', - span_id: 'segment-1', - trace_id: 'trace-1', - ['metadata.foo']: 'foo', - ingest_source: 'Node', - vendor: 'langchain', - virtual_llm: true, - sequence: 1, - page_content: 'hello world', - search_id: search.id - }) + assert.match(searchResult.id, /[a-z0-9-]{36}/) + assert.equal(searchResult.appName, 'test-app') + assert.equal(searchResult['llm.conversation_id'], 'test-conversation') + assert.equal(searchResult.span_id, 'segment-1') + assert.equal(searchResult.request_id, 'run-1') + assert.equal(searchResult.trace_id, 'trace-1') + assert.equal(searchResult['metadata.foo'], 'foo') + assert.equal(searchResult.ingest_source, 'Node') + assert.equal(searchResult.vendor, 'langchain') + assert.equal(searchResult.virtual_llm, true) + assert.equal(searchResult.sequence, 1) + assert.equal(searchResult.page_content, 'hello world') + assert.equal(searchResult.search_id, search.id) }) -tap.test('respects record_content setting', async (t) => { - t.context.agent.config.ai_monitoring.record_content.enabled = false +test('respects record_content setting', async (t) => { + t.nr.agent.config.ai_monitoring.record_content.enabled = false const search = new LangChainVectorSearchResult({ - ...t.context, + ...t.nr, sequence: 1, pageContent: 'hello world' }) - t.equal(search.page_content, undefined) + assert.equal(search.page_content, undefined) }) diff --git a/test/unit/llm-events/langchain/vector-search.test.js b/test/unit/llm-events/langchain/vector-search.test.js index f1cf836b3f..73c04a938f 100644 --- a/test/unit/llm-events/langchain/vector-search.test.js +++ b/test/unit/llm-events/langchain/vector-search.test.js @@ -5,11 +5,13 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LangChainVectorSearch = require('../../../../lib/llm-events/langchain/vector-search') -tap.beforeEach((t) => { - t.context._tx = { +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr._tx = { trace: { custom: { get() { @@ -21,7 +23,7 @@ tap.beforeEach((t) => { } } - t.context.agent = { + ctx.nr.agent = { config: { ai_monitoring: { record_content: { @@ -34,12 +36,12 @@ tap.beforeEach((t) => { }, tracer: { getTransaction() { - return t.context._tx + return ctx.nr._tx } } } - t.context.segment = { + ctx.nr.segment = { id: 'segment-1', transaction: { traceId: 'trace-1' @@ -48,38 +50,36 @@ tap.beforeEach((t) => { return 42 } } - t.context.runId = 'run-1' + ctx.nr.runId = 'run-1' }) -tap.test('create entity', async (t) => { +test('create entity', async (t) => { const search = new LangChainVectorSearch({ - ...t.context, + ...t.nr, query: 'hello world', k: 1 }) - t.match(search, { - 'id': /[a-z0-9-]{36}/, - 'appName': 'test-app', - ['llm.conversation_id']: 'test-conversation', - 'request_id': 'run-1', - 'span_id': 'segment-1', - 'trace_id': 'trace-1', - 'ingest_source': 'Node', - 'vendor': 'langchain', - 'virtual_llm': true, - 'request.query': 'hello world', - 'request.k': 1, - 'duration': 42, - 'response.number_of_documents': 0 - }) + assert.match(search.id, /[a-z0-9-]{36}/) + assert.equal(search.appName, 'test-app') + assert.equal(search['llm.conversation_id'], 'test-conversation') + assert.equal(search.request_id, 'run-1') + assert.equal(search.span_id, 'segment-1') + assert.equal(search.trace_id, 'trace-1') + assert.equal(search.ingest_source, 'Node') + assert.equal(search.vendor, 'langchain') + assert.equal(search.virtual_llm, true) + assert.equal(search['request.query'], 'hello world') + assert.equal(search['request.k'], 1) + assert.equal(search.duration, 42) + assert.equal(search['response.number_of_documents'], 0) }) -tap.test('respects record_content setting', async (t) => { - t.context.agent.config.ai_monitoring.record_content.enabled = false +test('respects record_content setting', async (t) => { + t.nr.agent.config.ai_monitoring.record_content.enabled = false const search = new LangChainVectorSearch({ - ...t.context, + ...t.nr, k: 1, query: 'hello world' }) - t.equal(search.page_content, undefined) + assert.equal(search.page_content, undefined) }) diff --git a/test/unit/llm-events/openai/chat-completion-message.test.js b/test/unit/llm-events/openai/chat-completion-message.test.js index 0599bb5f2f..f727cd35d7 100644 --- a/test/unit/llm-events/openai/chat-completion-message.test.js +++ b/test/unit/llm-events/openai/chat-completion-message.test.js @@ -5,211 +5,213 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LlmChatCompletionMessage = require('../../../../lib/llm-events/openai/chat-completion-message') const helper = require('../../../lib/agent_helper') const { req, chatRes, getExpectedResult } = require('./common') -tap.test('LlmChatCompletionMessage', (t) => { - let agent - t.beforeEach(() => { - agent = helper.loadMockedAgent() - }) +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.agent = helper.loadMockedAgent() +}) - t.afterEach(() => { - helper.unloadAgent(agent) - }) +test.afterEach((ctx) => { + helper.unloadAgent(ctx.nr.agent) +}) - t.test('should create a LlmChatCompletionMessage event', (t) => { - const api = helper.getAgentApi() - helper.runInTransaction(agent, (tx) => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - const summaryId = 'chat-summary-id' - const chatMessageEvent = new LlmChatCompletionMessage({ - agent, - segment, - request: req, - response: chatRes, - completionId: summaryId, - message: req.messages[0], - index: 0 - }) - const expected = getExpectedResult(tx, { id: 'res-id-0' }, 'message', summaryId) - t.same(chatMessageEvent, expected) - t.end() +test('should create a LlmChatCompletionMessage event', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + helper.runInTransaction(agent, (tx) => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const summaryId = 'chat-summary-id' + const chatMessageEvent = new LlmChatCompletionMessage({ + agent, + segment, + request: req, + response: chatRes, + completionId: summaryId, + message: req.messages[0], + index: 0 }) + const expected = getExpectedResult(tx, { id: 'res-id-0' }, 'message', summaryId) + assert.deepEqual(chatMessageEvent, expected) + end() }) }) +}) - t.test('should create a LlmChatCompletionMessage from response choices', (t) => { - const api = helper.getAgentApi() - helper.runInTransaction(agent, (tx) => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - const summaryId = 'chat-summary-id' - const chatMessageEvent = new LlmChatCompletionMessage({ - agent, - segment, - request: req, - response: chatRes, - completionId: summaryId, - message: chatRes.choices[0].message, - index: 2 - }) - const expected = getExpectedResult(tx, { id: 'res-id-2' }, 'message', summaryId) - expected.sequence = 2 - expected.content = chatRes.choices[0].message.content - expected.role = chatRes.choices[0].message.role - expected.is_response = true - t.same(chatMessageEvent, expected) - t.end() +test('should create a LlmChatCompletionMessage from response choices', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + helper.runInTransaction(agent, (tx) => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const summaryId = 'chat-summary-id' + const chatMessageEvent = new LlmChatCompletionMessage({ + agent, + segment, + request: req, + response: chatRes, + completionId: summaryId, + message: chatRes.choices[0].message, + index: 2 }) + const expected = getExpectedResult(tx, { id: 'res-id-2' }, 'message', summaryId) + expected.sequence = 2 + expected.content = chatRes.choices[0].message.content + expected.role = chatRes.choices[0].message.role + expected.is_response = true + assert.deepEqual(chatMessageEvent, expected) + end() }) }) +}) - t.test('should set conversation_id from custom attributes', (t) => { - const api = helper.getAgentApi() - const conversationId = 'convo-id' - helper.runInTransaction(agent, () => { - api.addCustomAttribute('llm.conversation_id', conversationId) - const chatMessageEvent = new LlmChatCompletionMessage({ - agent, - segment: {}, - request: {}, - response: {} - }) - t.equal(chatMessageEvent['llm.conversation_id'], conversationId) - t.end() +test('should set conversation_id from custom attributes', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + const conversationId = 'convo-id' + helper.runInTransaction(agent, () => { + api.addCustomAttribute('llm.conversation_id', conversationId) + const chatMessageEvent = new LlmChatCompletionMessage({ + agent, + segment: {}, + request: {}, + response: {} }) + assert.equal(chatMessageEvent['llm.conversation_id'], conversationId) + end() }) +}) + +test('respects record_content', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + const conversationId = 'convo-id' + agent.config.ai_monitoring.record_content.enabled = false - t.test('respects record_content', (t) => { - const api = helper.getAgentApi() - const conversationId = 'convo-id' - agent.config.ai_monitoring.record_content.enabled = false + helper.runInTransaction(agent, () => { + api.addCustomAttribute('llm.conversation_id', conversationId) + const chatMessageEvent = new LlmChatCompletionMessage({ + agent, + segment: {}, + request: {}, + response: {} + }) + assert.equal(chatMessageEvent.content, undefined) + end() + }) +}) - helper.runInTransaction(agent, () => { - api.addCustomAttribute('llm.conversation_id', conversationId) +test('should use token_count from tokenCountCallback for prompt message', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + const expectedCount = 4 + function cb(model, content) { + assert.equal(model, 'gpt-3.5-turbo-0613') + assert.equal(content, 'What is a woodchuck?') + return expectedCount + } + api.setLlmTokenCountCallback(cb) + helper.runInTransaction(agent, () => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const summaryId = 'chat-summary-id' + delete chatRes.usage const chatMessageEvent = new LlmChatCompletionMessage({ agent, - segment: {}, - request: {}, - response: {} + segment, + request: req, + response: chatRes, + completionId: summaryId, + message: req.messages[0], + index: 0 }) - t.equal(chatMessageEvent.content, undefined) - t.end() + assert.equal(chatMessageEvent.token_count, expectedCount) + end() }) }) +}) - t.test('should use token_count from tokenCountCallback for prompt message', (t) => { - const api = helper.getAgentApi() - const expectedCount = 4 - function cb(model, content) { - t.equal(model, 'gpt-3.5-turbo-0613') - t.equal(content, 'What is a woodchuck?') - return expectedCount - } - api.setLlmTokenCountCallback(cb) - helper.runInTransaction(agent, () => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - const summaryId = 'chat-summary-id' - delete chatRes.usage - const chatMessageEvent = new LlmChatCompletionMessage({ - agent, - segment, - request: req, - response: chatRes, - completionId: summaryId, - message: req.messages[0], - index: 0 - }) - t.equal(chatMessageEvent.token_count, expectedCount) - t.end() +test('should use token_count from tokenCountCallback for completion messages', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + const expectedCount = 4 + function cb(model, content) { + assert.equal(model, 'gpt-3.5-turbo-0613') + assert.equal(content, 'a lot') + return expectedCount + } + api.setLlmTokenCountCallback(cb) + helper.runInTransaction(agent, () => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const summaryId = 'chat-summary-id' + delete chatRes.usage + const chatMessageEvent = new LlmChatCompletionMessage({ + agent, + segment, + request: req, + response: chatRes, + completionId: summaryId, + message: chatRes.choices[0].message, + index: 2 }) + assert.equal(chatMessageEvent.token_count, expectedCount) + end() }) }) +}) - t.test('should use token_count from tokenCountCallback for completion messages', (t) => { - const api = helper.getAgentApi() - const expectedCount = 4 - function cb(model, content) { - t.equal(model, 'gpt-3.5-turbo-0613') - t.equal(content, 'a lot') - return expectedCount - } - api.setLlmTokenCountCallback(cb) - helper.runInTransaction(agent, () => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - const summaryId = 'chat-summary-id' - delete chatRes.usage - const chatMessageEvent = new LlmChatCompletionMessage({ - agent, - segment, - request: req, - response: chatRes, - completionId: summaryId, - message: chatRes.choices[0].message, - index: 2 - }) - t.equal(chatMessageEvent.token_count, expectedCount) - t.end() +test('should not set token_count if not set in usage nor a callback registered', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + helper.runInTransaction(agent, () => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const summaryId = 'chat-summary-id' + delete chatRes.usage + const chatMessageEvent = new LlmChatCompletionMessage({ + agent, + segment, + request: req, + response: chatRes, + completionId: summaryId, + message: chatRes.choices[0].message, + index: 2 }) + assert.equal(chatMessageEvent.token_count, undefined) + end() }) }) +}) - t.test('should not set token_count if not set in usage nor a callback registered', (t) => { - const api = helper.getAgentApi() - helper.runInTransaction(agent, () => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - const summaryId = 'chat-summary-id' - delete chatRes.usage - const chatMessageEvent = new LlmChatCompletionMessage({ - agent, - segment, - request: req, - response: chatRes, - completionId: summaryId, - message: chatRes.choices[0].message, - index: 2 - }) - t.equal(chatMessageEvent.token_count, undefined) - t.end() +test('should not set token_count if not set in usage nor a callback registered returns count', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + function cb() { + // empty cb + } + api.setLlmTokenCountCallback(cb) + helper.runInTransaction(agent, () => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + const summaryId = 'chat-summary-id' + delete chatRes.usage + const chatMessageEvent = new LlmChatCompletionMessage({ + agent, + segment, + request: req, + response: chatRes, + completionId: summaryId, + message: chatRes.choices[0].message, + index: 2 }) + assert.equal(chatMessageEvent.token_count, undefined) + end() }) }) - - t.test( - 'should not set token_count if not set in usage nor a callback registered returns count', - (t) => { - const api = helper.getAgentApi() - function cb() { - // empty cb - } - api.setLlmTokenCountCallback(cb) - helper.runInTransaction(agent, () => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - const summaryId = 'chat-summary-id' - delete chatRes.usage - const chatMessageEvent = new LlmChatCompletionMessage({ - agent, - segment, - request: req, - response: chatRes, - completionId: summaryId, - message: chatRes.choices[0].message, - index: 2 - }) - t.equal(chatMessageEvent.token_count, undefined) - t.end() - }) - }) - } - ) - - t.end() }) diff --git a/test/unit/llm-events/openai/chat-completion-summary.test.js b/test/unit/llm-events/openai/chat-completion-summary.test.js index 11f3cbdb18..ca9e24823a 100644 --- a/test/unit/llm-events/openai/chat-completion-summary.test.js +++ b/test/unit/llm-events/openai/chat-completion-summary.test.js @@ -5,75 +5,75 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LlmChatCompletionSummary = require('../../../../lib/llm-events/openai/chat-completion-summary') const helper = require('../../../lib/agent_helper') const { req, chatRes, getExpectedResult } = require('./common') -tap.test('LlmChatCompletionSummary', (t) => { - t.autoend() - - let agent - t.beforeEach(() => { - agent = helper.loadMockedAgent() - }) +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.agent = helper.loadMockedAgent() +}) - t.afterEach(() => { - helper.unloadAgent(agent) - }) +test.afterEach((ctx) => { + helper.unloadAgent(ctx.nr.agent) +}) - t.test('should properly create a LlmChatCompletionSummary event', (t) => { - const api = helper.getAgentApi() - helper.runInTransaction(agent, (tx) => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - segment.end() - const chatSummaryEvent = new LlmChatCompletionSummary({ - agent, - segment, - request: req, - response: chatRes - }) - const expected = getExpectedResult(tx, chatSummaryEvent, 'summary') - t.same(chatSummaryEvent, expected) - t.end() +test('should properly create a LlmChatCompletionSummary event', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + helper.runInTransaction(agent, (tx) => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + segment.end() + const chatSummaryEvent = new LlmChatCompletionSummary({ + agent, + segment, + request: req, + response: chatRes }) + const expected = getExpectedResult(tx, chatSummaryEvent, 'summary') + assert.deepEqual(chatSummaryEvent, expected) + end() }) }) +}) - t.test('should set error to true', (t) => { - helper.runInTransaction(agent, () => { - const chatSummaryEvent = new LlmChatCompletionSummary({ - agent, - segment: null, - request: {}, - response: {}, - withError: true - }) - t.equal(true, chatSummaryEvent.error) - t.end() +test('should set error to true', (ctx, end) => { + const { agent } = ctx.nr + helper.runInTransaction(agent, () => { + const chatSummaryEvent = new LlmChatCompletionSummary({ + agent, + segment: null, + request: {}, + response: {}, + withError: true }) + assert.equal(true, chatSummaryEvent.error) + end() }) +}) - t.test('should set `llm.` attributes from custom attributes', (t) => { - const api = helper.getAgentApi() - const conversationId = 'convo-id' - helper.runInTransaction(agent, () => { - api.addCustomAttribute('llm.conversation_id', conversationId) - api.addCustomAttribute('llm.foo', 'bar') - api.addCustomAttribute('llm.bar', 'baz') - api.addCustomAttribute('rando-key', 'rando-value') - const chatSummaryEvent = new LlmChatCompletionSummary({ - agent, - segment: null, - request: {}, - response: {} - }) - t.equal(chatSummaryEvent['llm.conversation_id'], conversationId) - t.equal(chatSummaryEvent['llm.foo'], 'bar') - t.equal(chatSummaryEvent['llm.bar'], 'baz') - t.notOk(chatSummaryEvent['rando-key']) - t.end() +test('should set `llm.` attributes from custom attributes', (t, end) => { + const { agent } = t.nr + const api = helper.getAgentApi() + const conversationId = 'convo-id' + helper.runInTransaction(agent, () => { + api.addCustomAttribute('llm.conversation_id', conversationId) + api.addCustomAttribute('llm.foo', 'bar') + api.addCustomAttribute('llm.bar', 'baz') + api.addCustomAttribute('rando-key', 'rando-value') + const chatSummaryEvent = new LlmChatCompletionSummary({ + agent, + segment: null, + request: {}, + response: {} }) + assert.equal(chatSummaryEvent['llm.conversation_id'], conversationId) + assert.equal(chatSummaryEvent['llm.foo'], 'bar') + assert.equal(chatSummaryEvent['llm.bar'], 'baz') + assert.ok(!chatSummaryEvent['rando-key']) + end() }) }) diff --git a/test/unit/llm-events/openai/embedding.test.js b/test/unit/llm-events/openai/embedding.test.js index 7175b072ff..ca8f3d75ae 100644 --- a/test/unit/llm-events/openai/embedding.test.js +++ b/test/unit/llm-events/openai/embedding.test.js @@ -5,162 +5,165 @@ 'use strict' -const tap = require('tap') +const test = require('node:test') +const assert = require('node:assert') const LlmEmbedding = require('../../../../lib/llm-events/openai/embedding') const helper = require('../../../lib/agent_helper') const { res, getExpectedResult } = require('./common') -tap.test('LlmEmbedding', (t) => { - t.autoend() - - let agent - t.beforeEach(() => { - agent = helper.loadMockedAgent() - }) +test.beforeEach((ctx) => { + ctx.nr = {} + ctx.nr.agent = helper.loadMockedAgent() +}) - t.afterEach(() => { - helper.unloadAgent(agent) - }) +test.afterEach((ctx) => { + helper.unloadAgent(ctx.nr.agent) +}) - t.test('should properly create a LlmEmbedding event', (t) => { - const req = { - input: 'This is my test input', - model: 'gpt-3.5-turbo-0613' - } +test('should properly create a LlmEmbedding event', (t, end) => { + const { agent } = t.nr + const req = { + input: 'This is my test input', + model: 'gpt-3.5-turbo-0613' + } - const api = helper.getAgentApi() - helper.runInTransaction(agent, (tx) => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - segment.end() - const embeddingEvent = new LlmEmbedding({ agent, segment, request: req, response: res }) - const expected = getExpectedResult(tx, embeddingEvent, 'embedding') - t.same(embeddingEvent, expected) - t.end() - }) - }) - }) - ;[ - { type: 'string', value: 'test input', expected: 'test input' }, - { - type: 'array of strings', - value: ['test input', 'test input2'], - expected: 'test input,test input2' - }, - { type: 'array of numbers', value: [1, 2, 3, 4], expected: '1,2,3,4' }, - { - type: 'array of array of numbers', - value: [ - [1, 2], - [3, 4], - [5, 6] - ], - expected: '1,2,3,4,5,6' - } - ].forEach(({ type, value, expected }) => { - t.test(`should properly serialize input when it is a ${type}`, (t) => { - const embeddingEvent = new LlmEmbedding({ - agent, - segment: null, - request: { input: value }, - response: {} - }) - t.equal(embeddingEvent.input, expected) - t.end() + const api = helper.getAgentApi() + helper.runInTransaction(agent, (tx) => { + api.startSegment('fakeSegment', false, () => { + const segment = api.shim.getActiveSegment() + segment.end() + const embeddingEvent = new LlmEmbedding({ agent, segment, request: req, response: res }) + const expected = getExpectedResult(tx, embeddingEvent, 'embedding') + assert.deepEqual(embeddingEvent, expected) + end() }) }) - - t.test('should set error to true', (t) => { - const req = { - input: 'This is my test input', - model: 'gpt-3.5-turbo-0613' - } - - const api = helper.getAgentApi() - helper.runInTransaction(agent, () => { - api.startSegment('fakeSegment', false, () => { - const segment = api.shim.getActiveSegment() - const embeddingEvent = new LlmEmbedding({ - agent, - segment, - request: req, - response: res, - withError: true - }) - t.equal(true, embeddingEvent.error) - t.end() - }) +}) +;[ + { type: 'string', value: 'test input', expected: 'test input' }, + { + type: 'array of strings', + value: ['test input', 'test input2'], + expected: 'test input,test input2' + }, + { type: 'array of numbers', value: [1, 2, 3, 4], expected: '1,2,3,4' }, + { + type: 'array of array of numbers', + value: [ + [1, 2], + [3, 4], + [5, 6] + ], + expected: '1,2,3,4,5,6' + } +].forEach(({ type, value, expected }) => { + test(`should properly serialize input when it is a ${type}`, (t, end) => { + const { agent } = t.nr + const embeddingEvent = new LlmEmbedding({ + agent, + segment: null, + request: { input: value }, + response: {} }) + assert.equal(embeddingEvent.input, expected) + end() }) +}) - t.test('respects record_content', (t) => { - const req = { - input: 'This is my test input', - model: 'gpt-3.5-turbo-0613' - } - agent.config.ai_monitoring.record_content.enabled = false +test('should set error to true', (t, end) => { + const { agent } = t.nr + const req = { + input: 'This is my test input', + model: 'gpt-3.5-turbo-0613' + } - const api = helper.getAgentApi() - helper.runInTransaction(agent, () => { + const api = helper.getAgentApi() + helper.runInTransaction(agent, () => { + api.startSegment('fakeSegment', false, () => { const segment = api.shim.getActiveSegment() const embeddingEvent = new LlmEmbedding({ agent, segment, request: req, - response: res + response: res, + withError: true }) - t.equal(embeddingEvent.input, undefined) - t.end() + assert.equal(true, embeddingEvent.error) + end() }) }) +}) - t.test('should calculate token count from tokenCountCallback', (t) => { - const req = { - input: 'This is my test input', - model: 'gpt-3.5-turbo-0613' - } +test('respects record_content', (t, end) => { + const { agent } = t.nr + const req = { + input: 'This is my test input', + model: 'gpt-3.5-turbo-0613' + } + agent.config.ai_monitoring.record_content.enabled = false + + const api = helper.getAgentApi() + helper.runInTransaction(agent, () => { + const segment = api.shim.getActiveSegment() + const embeddingEvent = new LlmEmbedding({ + agent, + segment, + request: req, + response: res + }) + assert.equal(embeddingEvent.input, undefined) + end() + }) +}) - const api = helper.getAgentApi() +test('should calculate token count from tokenCountCallback', (t, end) => { + const { agent } = t.nr + const req = { + input: 'This is my test input', + model: 'gpt-3.5-turbo-0613' + } - function cb(model, content) { - if (model === req.model) { - return content.length - } - } + const api = helper.getAgentApi() - api.setLlmTokenCountCallback(cb) - helper.runInTransaction(agent, () => { - const segment = api.shim.getActiveSegment() - delete res.usage - const embeddingEvent = new LlmEmbedding({ - agent, - segment, - request: req, - response: res - }) - t.equal(embeddingEvent.token_count, 21) - t.end() + function cb(model, content) { + if (model === req.model) { + return content.length + } + } + + api.setLlmTokenCountCallback(cb) + helper.runInTransaction(agent, () => { + const segment = api.shim.getActiveSegment() + delete res.usage + const embeddingEvent = new LlmEmbedding({ + agent, + segment, + request: req, + response: res }) + assert.equal(embeddingEvent.token_count, 21) + end() }) +}) - t.test('should not set token count when not present in usage nor tokenCountCallback', (t) => { - const req = { - input: 'This is my test input', - model: 'gpt-3.5-turbo-0613' - } - - const api = helper.getAgentApi() - helper.runInTransaction(agent, () => { - const segment = api.shim.getActiveSegment() - delete res.usage - const embeddingEvent = new LlmEmbedding({ - agent, - segment, - request: req, - response: res - }) - t.equal(embeddingEvent.token_count, undefined) - t.end() +test('should not set token count when not present in usage nor tokenCountCallback', (t, end) => { + const { agent } = t.nr + const req = { + input: 'This is my test input', + model: 'gpt-3.5-turbo-0613' + } + + const api = helper.getAgentApi() + helper.runInTransaction(agent, () => { + const segment = api.shim.getActiveSegment() + delete res.usage + const embeddingEvent = new LlmEmbedding({ + agent, + segment, + request: req, + response: res }) + assert.equal(embeddingEvent.token_count, undefined) + end() }) })