Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions lib/subscribers/openai/chat.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ const MIN_STREAM_VERSION = '4.12.2'
class OpenAIChatCompletions extends OpenAISubscriber {
constructor({ agent, logger, channelName = 'nr_completionsCreate' }) {
super({ agent, logger, channelName })
this.events = ['asyncEnd', 'end']
}

handler(data, ctx) {
Expand All @@ -42,6 +43,32 @@ class OpenAIChatCompletions extends OpenAISubscriber {
return newCtx
}

/**
* Temporary fix as `tracePromise` wraps the promise in a native one.
* We are now wrapping `openai.chat.completions.parse` in a traceSync call
* and then wrapping the promise here so it returns the custom promise.
* OpenAI has a [custom promise](https://github.com/openai/openai-node/blob/master/src/core/api-promise.ts) that crashes applications using `openai.chat.completions.parse`
* see: https://github.com/newrelic/node-newrelic/issues/3379
* see: https://github.com/nodejs/node/issues/59936
* @param data
*/
end(data) {
const promise = data?.result
if (!promise.then) {
return promise
}

return promise.then((result) => {
data.result = result
this.channel.asyncEnd.publish(data)
return result
}).catch((err) => {
data.error = err
this.channel.asyncEnd.publish(data)
return err
})
}

asyncEnd(data) {
const ctx = this.agent.tracer.getContext()
if (!ctx?.segment || !ctx?.transaction) {
Expand Down
6 changes: 3 additions & 3 deletions lib/subscribers/openai/config.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ module.exports = {
functionQuery: {
className: 'Completions',
methodName: 'create',
kind: 'Async'
kind: 'Sync'
}
},
{
Expand All @@ -25,7 +25,7 @@ module.exports = {
functionQuery: {
className: 'Completions',
methodName: 'create',
kind: 'Async'
kind: 'Sync'
}
}
]
Expand All @@ -39,7 +39,7 @@ module.exports = {
functionQuery: {
className: 'Responses',
methodName: 'create',
kind: 'Async'
kind: 'Sync'
}
}
]
Expand Down
21 changes: 21 additions & 0 deletions test/versioned/openai/chat-completions-res-api.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ const test = require('node:test')
const assert = require('node:assert')
const fs = require('node:fs')
const path = require('node:path')
const { tspl } = require('@matteo.collina/tspl')

const { removeModules } = require('../../lib/cache-buster')
const { assertSegments, assertSpanKind, match } = require('../../lib/custom-assertions')
Expand Down Expand Up @@ -55,6 +56,26 @@ test('responses.create', async (t) => {
removeModules('openai')
})

// Note: I cannot figure out how to get the mock server to do the right thing,
// but this was failing with a different issue before
await t.test('should not crash when you call `responses.parse`', async (t) => {
const plan = tspl(t, { plan: 1 })
const { client, agent } = t.nr
await helper.runInTransaction(agent, async (tx) => {
try {
await client.responses.parse({
input: [{ role: 'user', content: 'You are a mathematician.' }]
})
} catch (err) {
plan.match(err.message, /Body is unusable|body used already for/)
} finally {
tx.end()
}
})

await plan.completed
})

await t.test('should create span on successful chat completion create', (t, end) => {
const { client, agent, host, port } = t.nr
helper.runInTransaction(agent, async (tx) => {
Expand Down
27 changes: 25 additions & 2 deletions test/versioned/openai/chat-completions.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ const TRACKING_METRIC = `Supportability/Nodejs/ML/OpenAI/${pkgVersion}`

const responses = require('./mock-chat-api-responses')
const { assertChatCompletionMessages, assertChatCompletionSummary } = require('./common-chat-api')
const { tspl } = require('@matteo.collina/tspl')

test('chat.completions.create', async (t) => {
t.beforeEach(async (ctx) => {
Expand Down Expand Up @@ -57,13 +58,35 @@ test('chat.completions.create', async (t) => {
removeModules('openai')
})

// Note: I cannot figure out how to get the mock server to do the right thing,
// but this was failing with a different issue before
await t.test('should not crash when you call `completions.parse`', { skip: semver.lt(pkgVersion, '5.0.0') }, async (t) => {
const plan = tspl(t, { plan: 1 })
const { client, agent } = t.nr
await helper.runInTransaction(agent, async (tx) => {
try {
await client.chat.completions.parse({
messages: [{ role: 'user', content: 'You are a mathematician.' }]
})
} catch (err) {
plan.match(err.message, /.*Body is unusable.*/)
} finally {
tx.end()
}
})

await plan.completed
})

await t.test('should create span on successful chat completion create', (t, end) => {
const { client, agent, host, port } = t.nr
helper.runInTransaction(agent, async (tx) => {
const results = await client.chat.completions.create({
const prom = client.chat.completions.create({
messages: [{ role: 'user', content: 'You are a mathematician.' }]
})

const results = await prom

assert.equal(results.headers, undefined, 'should remove response headers from user result')
assert.equal(results.choices[0].message.content, '1 plus 2 is 3.')

Expand Down Expand Up @@ -138,7 +161,7 @@ test('chat.completions.create', async (t) => {
})

if (semver.gte(pkgVersion, '4.12.2')) {
await t.test('should create span on successful chat completion stream create', (t, end) => {
await t.test('should create span on successful chat completion stream create', { skip: semver.lt(pkgVersion, '4.12.2') }, (t, end) => {
const { client, agent, host, port } = t.nr
helper.runInTransaction(agent, async (tx) => {
const content = 'Streamed response'
Expand Down
Loading