Skip to content

Commit b7927b6

Browse files
committed
openai chat summary
1 parent 785524c commit b7927b6

File tree

15 files changed

+174
-158
lines changed

15 files changed

+174
-158
lines changed

lib/llm-events-new/chat-message.js

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,19 +38,19 @@ class LlmChatCompletionMessage extends LlmEvent {
3838
super({ agent, segment, transaction, vendor, responseModel, requestId })
3939

4040
this.completion_id = completionId
41-
if (isResponse) this.is_response = isResponse
4241
this.sequence = sequence
42+
if (isResponse) this.is_response = isResponse
4343

4444
if (role) {
4545
this.role = role
4646
} else {
4747
// If the role attribute is not available, a value of user MUST be sent for
4848
// requests and a value of assistant MUST be sent for responses.
49-
if (sequence === 0) {
49+
if (isResponse) {
50+
this.role = 'assistant'
51+
} else if (sequence === 0) {
5052
// We can assume the first message in the sequence is the request message.
5153
this.role = 'user'
52-
} else if (this.is_response) {
53-
this.role = 'assistant'
5454
}
5555
}
5656

lib/llm-events-new/chat-summary.js

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,54 @@
22
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
5+
6+
const LlmEvent = require('./base')
7+
8+
/**
9+
* @property {number} request.temperature Value representing how random or
10+
* deterministic the output responses should be
11+
* @property {number} request.max_tokens Maximum number of tokens that can be
12+
* generated in a chat completion
13+
* @property {string} request.model Model name specified in the request (e.g. 'gpt-4')
14+
* @property {number} response.number_of_messages Number of messages comprising a
15+
* chat completion including system, user, and assistant messages
16+
* @property {string} response.choices.finish_reason Reason the model stopped generating tokens (e.g. "stop")
17+
* @property {string} response.organization Organization ID returned in the response or response headers
18+
* @property {number} timestamp Timestamp captured at the time of the LLM request with millisecond precision
19+
*/
20+
class LlmChatCompletionSummary extends LlmEvent {
21+
/**
22+
* @param {object} params constructor parameters
23+
* @param {Agent} params.agent New Relic agent instance
24+
* @param {object} params.segment Current segment
25+
* @param {object} params.transaction Current and active transaction
26+
* @param {string} params.vendor Lowercase vendor name, e.g. "openai"
27+
* @param {string} params.responseModel Model name from response
28+
* @param {string} params.requestModel Model name specified in the request (e.g. 'gpt-4')
29+
* @param {string} params.requestId ID from request/response headers
30+
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call, omitted if no error occurred
31+
* @param {string} params.responseOrg Organization ID returned in the response or response headers
32+
* @param {number} params.temperature Value representing how random or
33+
* deterministic the output responses should be
34+
* @param {number} params.maxTokens Maximum number of tokens that can be
35+
* generated in a chat completion
36+
* @param {number} params.numMsgs Number of messages comprising a
37+
* chat completion including system, user, and assistant messages
38+
* @param {string} params.finishReason Reason the model stopped generating tokens (e.g. "stop")
39+
*/
40+
constructor({ agent, segment, transaction, vendor, responseModel, requestModel, requestId, error,
41+
responseOrg, temperature, maxTokens, numMsgs, finishReason }) {
42+
super({ agent, segment, transaction, vendor, responseModel, requestId, error })
43+
44+
this['request.model'] = requestModel
45+
this['request.max_tokens'] = maxTokens
46+
this['request.temperature'] = temperature
47+
this['response.number_of_messages'] = numMsgs
48+
this['response.choices.finish_reason'] = finishReason
49+
this['response.organization'] = responseOrg
50+
this.timestamp = segment.timer.start
51+
this.duration = segment.getDurationInMillis()
52+
}
53+
}
54+
55+
module.exports = LlmChatCompletionSummary

lib/llm-events-new/openai/chat-message.js

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66
const LlmChatCompletionMessage = require('../chat-message')
77
const { getUsageTokens } = require('./utils')
88

9+
/**
10+
* Encapsulates an OpenAI `LlmChatCompletionMessage` event.
11+
*/
912
class OpenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
1013
constructor({ agent,
1114
segment,
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
const LlmChatCompletionSummary = require('../chat-summary')
7+
const { getUsageTokens } = require('./utils')
8+
9+
/**
10+
* Encapsulates an OpenAI `LlmChatCompletionSummary` event.
11+
*/
12+
class OpenAiLlmChatCompletionSummary extends LlmChatCompletionSummary {
13+
constructor({ agent, segment, transaction, request = {}, response = {}, error }) {
14+
super({
15+
agent,
16+
segment,
17+
transaction,
18+
vendor: 'openai',
19+
error,
20+
responseModel: response?.model,
21+
responseOrg: response?.headers?.['openai-organization'],
22+
requestModel: request?.model,
23+
requestId: response?.headers?.['x-request-id'],
24+
temperature: request?.temperature,
25+
maxTokens: request?.max_tokens ?? request?.max_output_tokens
26+
})
27+
28+
if (request?.input) {
29+
// `responses.create` api logic
30+
// `request.input` can be an array or a string.
31+
const requestLength = Array.isArray(request.input) ? request.input.length : 1
32+
this['response.number_of_messages'] = requestLength + (response?.output?.length ?? 0)
33+
this['response.choices.finish_reason'] = response?.status
34+
} else {
35+
// `chat.completions.create` api logic
36+
this['response.number_of_messages'] = request?.messages?.length + response?.choices?.length
37+
this['response.choices.finish_reason'] = response?.choices?.[0]?.finish_reason
38+
}
39+
40+
this.setTokens(agent, request, response)
41+
if (response.headers) {
42+
// Set response.headers.*
43+
this['response.headers.llmVersion'] = response.headers['openai-version']
44+
this['response.headers.ratelimitLimitRequests'] = response.headers['x-ratelimit-limit-requests']
45+
this['response.headers.ratelimitLimitTokens'] = response.headers['x-ratelimit-limit-tokens']
46+
this['response.headers.ratelimitResetTokens'] = response.headers['x-ratelimit-reset-tokens']
47+
this['response.headers.ratelimitRemainingTokens'] = response.headers['x-ratelimit-remaining-tokens']
48+
this['response.headers.ratelimitRemainingRequests'] = response.headers['x-ratelimit-remaining-requests']
49+
}
50+
}
51+
52+
setTokens(agent, request, response) {
53+
const tokenCB = agent.llm?.tokenCountCallback
54+
55+
// Prefer callback for prompt and completion tokens; if unavailable, fall back to response data.
56+
if (tokenCB) {
57+
const messages = request?.input || request?.messages
58+
59+
const promptContent = typeof messages === 'string'
60+
? messages
61+
: messages?.map((msg) => msg.content).join(' ')
62+
63+
const completionContent = response?.output
64+
? response.output.map((resContent) => resContent.content[0].text).join(' ')
65+
: response?.choices?.map((resContent) => resContent.message.content).join(' ')
66+
67+
if (promptContent && completionContent) {
68+
this.setTokenUsageFromCallback(
69+
{
70+
tokenCB,
71+
reqModel: request.model,
72+
resModel: this['response.model'],
73+
promptContent,
74+
completionContent
75+
}
76+
)
77+
}
78+
return
79+
}
80+
81+
const tokens = getUsageTokens(response)
82+
this.setTokensInResponse(tokens)
83+
}
84+
}
85+
86+
module.exports = OpenAiLlmChatCompletionSummary

lib/llm-events-new/openai/embedding.js

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@
77
const LlmEmbedding = require('../embedding')
88
const { getUsageTokens } = require('./utils')
99

10+
/**
11+
* Encapsulates an OpenAI `LlmEmbedding` event.
12+
*/
1013
class OpenAiLlmEmbedding extends LlmEmbedding {
1114
constructor({ agent, segment, transaction, request = {}, response = {}, error = null }) {
1215
super({ agent,

lib/llm-events-new/openai/index.js

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmChatCompletionMessage = require('./chat-message')
9+
const LlmChatCompletionSummary = require('./chat-summary')
10+
const LlmEmbedding = require('./embedding')
11+
12+
module.exports = {
13+
LlmChatCompletionMessage,
14+
LlmChatCompletionSummary,
15+
LlmEmbedding
16+
}

lib/llm-events/openai/chat-completion-summary.js

Lines changed: 0 additions & 65 deletions
This file was deleted.

lib/llm-events/openai/event.js

Lines changed: 0 additions & 57 deletions
This file was deleted.

lib/llm-events/openai/index.js

Lines changed: 0 additions & 14 deletions
This file was deleted.

lib/subscribers/openai/chat.js

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,7 @@ const { AiMonitoringChatSubscriber } = require('../ai-monitoring')
1313
const { AI } = require('#agentlib/metrics/names.js')
1414
const semver = require('semver')
1515
const MIN_STREAM_VERSION = '4.12.2'
16-
const { LlmChatCompletionSummary } = require('#agentlib/llm-events/openai/index.js')
17-
const OpenAiLlmChatCompletionMessage = require('#agentlib/llm-events-new/openai/chat-message.js')
16+
const { LlmChatCompletionSummary, LlmChatCompletionMessage } = require('#agentlib/llm-events-new/openai/index.js')
1817
const { wrapPromise } = require('../utils')
1918

2019
class OpenAIChatCompletions extends AiMonitoringChatSubscriber {
@@ -163,7 +162,7 @@ class OpenAIChatCompletions extends AiMonitoringChatSubscriber {
163162
transaction,
164163
request,
165164
response: { ...response, headers },
166-
withError: !!err
165+
error: !!err
167166
})
168167
return summary
169168
}
@@ -250,7 +249,7 @@ class OpenAIChatCompletions extends AiMonitoringChatSubscriber {
250249
isResponse = message.content === response?.choices?.[0]?.message?.content
251250
}
252251

253-
return new OpenAiLlmChatCompletionMessage({
252+
return new LlmChatCompletionMessage({
254253
agent: this.agent,
255254
segment,
256255
transaction,

0 commit comments

Comments
 (0)