diff --git a/lib/subscribers/openai/chat.js b/lib/subscribers/openai/chat.js index da0aec0d24..662139635f 100644 --- a/lib/subscribers/openai/chat.js +++ b/lib/subscribers/openai/chat.js @@ -23,6 +23,7 @@ const MIN_STREAM_VERSION = '4.12.2' class OpenAIChatCompletions extends OpenAISubscriber { constructor({ agent, logger, channelName = 'nr_completionsCreate' }) { super({ agent, logger, channelName }) + this.events = ['asyncEnd', 'end'] } handler(data, ctx) { @@ -42,6 +43,32 @@ class OpenAIChatCompletions extends OpenAISubscriber { return newCtx } + /** + * Temporary fix as `tracePromise` wraps the promise in a native one. + * We are now wrapping `openai.chat.completions.parse` in a traceSync call + * and then wrapping the promise here so it returns the custom promise. + * OpenAI has a [custom promise](https://github.com/openai/openai-node/blob/master/src/core/api-promise.ts) that crashes applications using `openai.chat.completions.parse` + * see: https://github.com/newrelic/node-newrelic/issues/3379 + * see: https://github.com/nodejs/node/issues/59936 + * @param data + */ + end(data) { + const promise = data?.result + if (!promise.then) { + return promise + } + + return promise.then((result) => { + data.result = result + this.channel.asyncEnd.publish(data) + return result + }).catch((err) => { + data.error = err + this.channel.asyncEnd.publish(data) + return err + }) + } + asyncEnd(data) { const ctx = this.agent.tracer.getContext() if (!ctx?.segment || !ctx?.transaction) { diff --git a/lib/subscribers/openai/config.js b/lib/subscribers/openai/config.js index c6cfd9fd27..a5bf23383b 100644 --- a/lib/subscribers/openai/config.js +++ b/lib/subscribers/openai/config.js @@ -16,7 +16,7 @@ module.exports = { functionQuery: { className: 'Completions', methodName: 'create', - kind: 'Async' + kind: 'Sync' } }, { @@ -25,7 +25,7 @@ module.exports = { functionQuery: { className: 'Completions', methodName: 'create', - kind: 'Async' + kind: 'Sync' } } ] @@ -39,7 +39,7 @@ module.exports = { functionQuery: { className: 'Responses', methodName: 'create', - kind: 'Async' + kind: 'Sync' } } ] diff --git a/test/versioned/openai/chat-completions-res-api.test.js b/test/versioned/openai/chat-completions-res-api.test.js index 903aa1d81c..9875f915e8 100644 --- a/test/versioned/openai/chat-completions-res-api.test.js +++ b/test/versioned/openai/chat-completions-res-api.test.js @@ -9,6 +9,7 @@ const test = require('node:test') const assert = require('node:assert') const fs = require('node:fs') const path = require('node:path') +const { tspl } = require('@matteo.collina/tspl') const { removeModules } = require('../../lib/cache-buster') const { assertSegments, assertSpanKind, match } = require('../../lib/custom-assertions') @@ -55,6 +56,26 @@ test('responses.create', async (t) => { removeModules('openai') }) + // Note: I cannot figure out how to get the mock server to do the right thing, + // but this was failing with a different issue before + await t.test('should not crash when you call `responses.parse`', async (t) => { + const plan = tspl(t, { plan: 1 }) + const { client, agent } = t.nr + await helper.runInTransaction(agent, async (tx) => { + try { + await client.responses.parse({ + input: [{ role: 'user', content: 'You are a mathematician.' }] + }) + } catch (err) { + plan.match(err.message, /Body is unusable|body used already for/) + } finally { + tx.end() + } + }) + + await plan.completed + }) + await t.test('should create span on successful chat completion create', (t, end) => { const { client, agent, host, port } = t.nr helper.runInTransaction(agent, async (tx) => { diff --git a/test/versioned/openai/chat-completions.test.js b/test/versioned/openai/chat-completions.test.js index 4180b44184..402a1fdc3a 100644 --- a/test/versioned/openai/chat-completions.test.js +++ b/test/versioned/openai/chat-completions.test.js @@ -28,6 +28,7 @@ const TRACKING_METRIC = `Supportability/Nodejs/ML/OpenAI/${pkgVersion}` const responses = require('./mock-chat-api-responses') const { assertChatCompletionMessages, assertChatCompletionSummary } = require('./common-chat-api') +const { tspl } = require('@matteo.collina/tspl') test('chat.completions.create', async (t) => { t.beforeEach(async (ctx) => { @@ -57,13 +58,35 @@ test('chat.completions.create', async (t) => { removeModules('openai') }) + // Note: I cannot figure out how to get the mock server to do the right thing, + // but this was failing with a different issue before + await t.test('should not crash when you call `completions.parse`', { skip: semver.lt(pkgVersion, '5.0.0') }, async (t) => { + const plan = tspl(t, { plan: 1 }) + const { client, agent } = t.nr + await helper.runInTransaction(agent, async (tx) => { + try { + await client.chat.completions.parse({ + messages: [{ role: 'user', content: 'You are a mathematician.' }] + }) + } catch (err) { + plan.match(err.message, /.*Body is unusable.*/) + } finally { + tx.end() + } + }) + + await plan.completed + }) + await t.test('should create span on successful chat completion create', (t, end) => { const { client, agent, host, port } = t.nr helper.runInTransaction(agent, async (tx) => { - const results = await client.chat.completions.create({ + const prom = client.chat.completions.create({ messages: [{ role: 'user', content: 'You are a mathematician.' }] }) + const results = await prom + assert.equal(results.headers, undefined, 'should remove response headers from user result') assert.equal(results.choices[0].message.content, '1 plus 2 is 3.') @@ -138,7 +161,7 @@ test('chat.completions.create', async (t) => { }) if (semver.gte(pkgVersion, '4.12.2')) { - await t.test('should create span on successful chat completion stream create', (t, end) => { + await t.test('should create span on successful chat completion stream create', { skip: semver.lt(pkgVersion, '4.12.2') }, (t, end) => { const { client, agent, host, port } = t.nr helper.runInTransaction(agent, async (tx) => { const content = 'Streamed response'