@@ -23,6 +23,61 @@ const { DESTINATIONS } = require('../config/attribute-filter')
2323
2424let TRACKING_METRIC = OPENAI . TRACKING_PREFIX
2525
26+ /**
27+ * Parses the response from OpenAI and extracts the message content and role.
28+ *
29+ * @param {object } response The OpenAI SDK response object
30+ * @returns {{ content: string, role: string } } the message object with fields `content` and `role`
31+ */
32+ function getMessageFromResponse ( response ) {
33+ let content
34+ let role
35+ if ( response ?. object === 'response' ) {
36+ content = response ?. output ?. [ 0 ] ?. content ?. [ 0 ] ?. text
37+ role = response ?. output ?. [ 0 ] ?. role
38+ } else {
39+ content = response ?. choices ?. [ 0 ] ?. message ?. content
40+ role = response ?. choices ?. [ 0 ] ?. message ?. role
41+ }
42+
43+ return { content, role }
44+ }
45+
46+ /**
47+ * Parses all messages from the OpenAI request object.
48+ *
49+ * @param {object } request The OpenAI SDK request object
50+ * @param {Shim } shim instance of shim
51+ * @returns {Array<object> } an array of message objects with fields `content` and `role`
52+ */
53+ function getMessagesFromRequest ( request , shim ) {
54+ // There are a few different ways to pass messages to OpenAI SDK.
55+ //
56+ // For langchain and `chat.completions.create`, messages are passed
57+ // as an array of objects with `content` and `role` properties
58+ // to the `messages` field of the request.
59+ //
60+ // For `responses.create`, messages are passed as an array of objects
61+ // with `content` and `role` properties OR as a single string (implied
62+ // to be a user message) to the `input` field of the request.
63+ let messages = [ ]
64+
65+ if ( Array . isArray ( request ?. input ) ) {
66+ // Handle array of input messages
67+ messages = request . input . filter ( msg => msg ?. content && msg ?. role )
68+ } else if ( typeof request ?. input === 'string' ) {
69+ // Handle single string input as a user message
70+ messages = [ { content : request . input , role : 'user' } ]
71+ } else if ( Array . isArray ( request ?. messages ) ) {
72+ // Handle array of messages
73+ messages = request . messages . filter ( msg => msg ?. content && msg ?. role )
74+ } else {
75+ shim . logger . warn ( 'No valid messages found in OpenAI request object.' )
76+ }
77+
78+ return messages
79+ }
80+
2681/**
2782 * Checks if we should skip instrumentation.
2883 * Currently, it checks if `ai_monitoring.enabled` is true
@@ -145,8 +200,13 @@ function recordChatCompletionMessages({
145200 withError : err != null
146201 } )
147202
148- // Only take the first response message and append to input messages
149- const messages = [ ...request . messages , response ?. choices ?. [ 0 ] ?. message ]
203+ // Note: langchain still expects a message event even
204+ // when the response is empty, so no filtering here
205+ const messages = [
206+ ...getMessagesFromRequest ( request , shim ) ,
207+ getMessageFromResponse ( response )
208+ ]
209+
150210 messages . forEach ( ( message , index ) => {
151211 const completionMsg = new LlmChatCompletionMessage ( {
152212 agent,
@@ -218,6 +278,7 @@ function instrumentStream({ agent, shim, request, response, segment, transaction
218278 throw err
219279 } finally {
220280 chunk . choices [ 0 ] . message = { role, content }
281+
221282 // update segment duration since we want to extend the time it took to
222283 // handle the stream
223284 segment . touch ( )
@@ -275,7 +336,6 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
275336
276337 // Instruments chat completion creation
277338 // and creates the LLM events
278- // **Note**: Currently only for promises. streams will come later
279339 shim . record (
280340 openaiClient . Chat . Completions . prototype ,
281341 'create' ,
@@ -312,6 +372,37 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
312372 }
313373 )
314374
375+ // New API introduced in openai@5.0.0
376+ // Also instruments chat completion creation
377+ // and creates the LLM events
378+ if ( semver . gte ( shim . pkgVersion , '5.0.0' ) ) {
379+ shim . record ( openaiClient . Responses . prototype , 'create' , function wrapCreate ( shim , create , name , args ) {
380+ const [ request ] = args
381+ if ( request . stream ) {
382+ shim . logger . warn ( 'Streaming is not supported for responses.create yet.' )
383+ return
384+ }
385+
386+ return new RecorderSpec ( {
387+ name : OPENAI . COMPLETION ,
388+ promise : true ,
389+ after ( { error : err , result : response , segment, transaction } ) {
390+ recordChatCompletionMessages ( {
391+ agent,
392+ shim,
393+ segment,
394+ transaction,
395+ request,
396+ response,
397+ err
398+ } )
399+
400+ addLlmMeta ( { agent, transaction } )
401+ }
402+ } )
403+ } )
404+ }
405+
315406 // Instruments embedding creation
316407 // and creates LlmEmbedding event
317408 shim . record (
0 commit comments