Skip to content

Commit a1204b2

Browse files
feat: Support openai.responses.create api (newrelic#3139)
1 parent 1a9d9f5 commit a1204b2

20 files changed

+1746
-605
lines changed

lib/instrumentation/openai.js

Lines changed: 94 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,61 @@ const { DESTINATIONS } = require('../config/attribute-filter')
2323

2424
let TRACKING_METRIC = OPENAI.TRACKING_PREFIX
2525

26+
/**
27+
* Parses the response from OpenAI and extracts the message content and role.
28+
*
29+
* @param {object} response The OpenAI SDK response object
30+
* @returns {{ content: string, role: string }} the message object with fields `content` and `role`
31+
*/
32+
function getMessageFromResponse(response) {
33+
let content
34+
let role
35+
if (response?.object === 'response') {
36+
content = response?.output?.[0]?.content?.[0]?.text
37+
role = response?.output?.[0]?.role
38+
} else {
39+
content = response?.choices?.[0]?.message?.content
40+
role = response?.choices?.[0]?.message?.role
41+
}
42+
43+
return { content, role }
44+
}
45+
46+
/**
47+
* Parses all messages from the OpenAI request object.
48+
*
49+
* @param {object} request The OpenAI SDK request object
50+
* @param {Shim} shim instance of shim
51+
* @returns {Array<object>} an array of message objects with fields `content` and `role`
52+
*/
53+
function getMessagesFromRequest(request, shim) {
54+
// There are a few different ways to pass messages to OpenAI SDK.
55+
//
56+
// For langchain and `chat.completions.create`, messages are passed
57+
// as an array of objects with `content` and `role` properties
58+
// to the `messages` field of the request.
59+
//
60+
// For `responses.create`, messages are passed as an array of objects
61+
// with `content` and `role` properties OR as a single string (implied
62+
// to be a user message) to the `input` field of the request.
63+
let messages = []
64+
65+
if (Array.isArray(request?.input)) {
66+
// Handle array of input messages
67+
messages = request.input.filter(msg => msg?.content && msg?.role)
68+
} else if (typeof request?.input === 'string') {
69+
// Handle single string input as a user message
70+
messages = [{ content: request.input, role: 'user' }]
71+
} else if (Array.isArray(request?.messages)) {
72+
// Handle array of messages
73+
messages = request.messages.filter(msg => msg?.content && msg?.role)
74+
} else {
75+
shim.logger.warn('No valid messages found in OpenAI request object.')
76+
}
77+
78+
return messages
79+
}
80+
2681
/**
2782
* Checks if we should skip instrumentation.
2883
* Currently, it checks if `ai_monitoring.enabled` is true
@@ -145,8 +200,13 @@ function recordChatCompletionMessages({
145200
withError: err != null
146201
})
147202

148-
// Only take the first response message and append to input messages
149-
const messages = [...request.messages, response?.choices?.[0]?.message]
203+
// Note: langchain still expects a message event even
204+
// when the response is empty, so no filtering here
205+
const messages = [
206+
...getMessagesFromRequest(request, shim),
207+
getMessageFromResponse(response)
208+
]
209+
150210
messages.forEach((message, index) => {
151211
const completionMsg = new LlmChatCompletionMessage({
152212
agent,
@@ -218,6 +278,7 @@ function instrumentStream({ agent, shim, request, response, segment, transaction
218278
throw err
219279
} finally {
220280
chunk.choices[0].message = { role, content }
281+
221282
// update segment duration since we want to extend the time it took to
222283
// handle the stream
223284
segment.touch()
@@ -275,7 +336,6 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
275336

276337
// Instruments chat completion creation
277338
// and creates the LLM events
278-
// **Note**: Currently only for promises. streams will come later
279339
shim.record(
280340
openaiClient.Chat.Completions.prototype,
281341
'create',
@@ -312,6 +372,37 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
312372
}
313373
)
314374

375+
// New API introduced in openai@5.0.0
376+
// Also instruments chat completion creation
377+
// and creates the LLM events
378+
if (semver.gte(shim.pkgVersion, '5.0.0')) {
379+
shim.record(openaiClient.Responses.prototype, 'create', function wrapCreate(shim, create, name, args) {
380+
const [request] = args
381+
if (request.stream) {
382+
shim.logger.warn('Streaming is not supported for responses.create yet.')
383+
return
384+
}
385+
386+
return new RecorderSpec({
387+
name: OPENAI.COMPLETION,
388+
promise: true,
389+
after({ error: err, result: response, segment, transaction }) {
390+
recordChatCompletionMessages({
391+
agent,
392+
shim,
393+
segment,
394+
transaction,
395+
request,
396+
response,
397+
err
398+
})
399+
400+
addLlmMeta({ agent, transaction })
401+
}
402+
})
403+
})
404+
}
405+
315406
// Instruments embedding creation
316407
// and creates LlmEmbedding event
317408
shim.record(

lib/llm-events/openai/chat-completion-message.js

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
1313
request = {},
1414
response = {},
1515
index = 0,
16-
message,
16+
message = { content: undefined, role: undefined },
1717
completionId,
1818
transaction
1919
}) {
@@ -22,21 +22,33 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
2222
this.role = message?.role
2323
this.sequence = index
2424
this.completion_id = completionId
25-
this.is_response = response?.choices?.[0]?.message?.content === message?.content
2625

26+
// Check if the given message is from the response.
27+
// The response object differs based on the API called.
28+
// If it's `responses.create`, we check against `response.output`.
29+
// If it's `chat.completions.create` or langchain, we check against `response.choices`.
30+
if (response?.object === 'response') {
31+
this.is_response = message.content === response?.output?.[0]?.content?.[0]?.text
32+
} else {
33+
this.is_response = message.content === response?.choices?.[0]?.message?.content
34+
}
35+
36+
// Assign content to the event.
37+
const content = message?.content
2738
if (agent.config.ai_monitoring.record_content.enabled === true) {
28-
this.content = message?.content
39+
this.content = content
2940
}
3041

42+
// Calculate token count if the callback is available.
3143
const tokenCB = agent.llm?.tokenCountCallback
3244
if (typeof tokenCB !== 'function') {
3345
return
3446
}
3547

3648
if (this.is_response) {
37-
this.token_count = tokenCB(this['response.model'], message?.content)
49+
this.token_count = tokenCB(this['response.model'], content)
3850
} else {
39-
this.token_count = tokenCB(request.model || request.engine, message?.content)
51+
this.token_count = tokenCB(request.model || request.engine, content)
4052
}
4153
}
4254
}

lib/llm-events/openai/chat-completion-summary.js

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,19 @@ module.exports = class LlmChatCompletionSummary extends LlmEvent {
1010
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
1111
super({ agent, segment, request, response, responseAttrs: true, transaction })
1212
this.error = withError
13-
this['request.max_tokens'] = request.max_tokens
13+
this['request.max_tokens'] = request.max_tokens ?? request.max_output_tokens
1414
this['request.temperature'] = request.temperature
15-
this['response.number_of_messages'] = request?.messages?.length + response?.choices?.length
16-
this['response.choices.finish_reason'] = response?.choices?.[0]?.finish_reason
15+
16+
if (response?.object === 'response') {
17+
// `responses.create` logic
18+
// request.input can be an array or a string
19+
const requestLength = Array.isArray(request?.input) ? request.input.length : 1
20+
this['response.number_of_messages'] = requestLength + response?.output?.length
21+
this['response.choices.finish_reason'] = response?.status
22+
} else {
23+
// `chat.completions.create` logic
24+
this['response.number_of_messages'] = request?.messages?.length + response?.choices?.length
25+
this['response.choices.finish_reason'] = response?.choices?.[0]?.finish_reason
26+
}
1727
}
1828
}

0 commit comments

Comments
 (0)