Skip to content

Commit 1615347

Browse files
committed
OpenAI LLM event refactor
1 parent c5469ea commit 1615347

21 files changed

+566
-212
lines changed

lib/llm-events/base.js

Lines changed: 181 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,181 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const { DESTINATIONS } = require('../config/attribute-filter')
9+
const { makeId } = require('../util/hashes')
10+
11+
/**
12+
* The base LLM event class that contains logic and properties
13+
* (e.g. `trace_id`, `vendor`) that are common to all LLM events.
14+
*
15+
* @property {string} id UUID or identifier for the event
16+
* @property {string} request_id ID from request/response headers
17+
* @property {string} span_id GUID of active span
18+
* @property {string} trace_id Current trace ID
19+
* @property {string} response.model Model name from response
20+
* @property {string} vendor Lowercased vendor name, e.g. "openai"
21+
* @property {string} ingest_source Always set to 'Node'
22+
* @property {boolean|undefined} error Set to `true` if an error occurred during creation call, omitted if no error occurred
23+
*/
24+
class LlmEvent {
25+
ingest_source = 'Node'
26+
27+
/**
28+
*
29+
* @param {object} params constructor parameters to create an `LlmEvent`
30+
* @param {Agent} params.agent New Relic agent instance
31+
* @param {object} params.segment Current segment
32+
* @param {object} params.transaction Current and active transaction
33+
* @param {string} params.vendor Lowercase vendor name, e.g. "openai"
34+
* @param {string} [params.responseModel] Model name from response
35+
* @param {string} [params.requestId] ID from request/response headers
36+
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call, omitted if no error occurred
37+
*/
38+
constructor({ agent, segment, transaction, vendor, responseModel, requestId, error }) {
39+
this.id = makeId(32)
40+
this.span_id = segment?.id
41+
this.trace_id = transaction?.traceId
42+
this.vendor = vendor
43+
this.metadata = agent
44+
45+
// Omit `error` property if no error occurred
46+
if (error === true) {
47+
this.error = error
48+
}
49+
50+
// If a certain attribute value is not accessible via instrumentation,
51+
// it can be omitted from the event.
52+
if (requestId) this.request_id = requestId
53+
if (responseModel) this['response.model'] = responseModel
54+
}
55+
56+
// eslint-disable-next-line accessor-pairs
57+
set metadata(agent) {
58+
const transaction = agent.tracer.getTransaction()
59+
const attrs = transaction?.trace?.custom.get(DESTINATIONS.TRANS_SCOPE) || {}
60+
for (const [key, value] of Object.entries(attrs)) {
61+
if (key.startsWith('llm.')) {
62+
this[key] = value
63+
}
64+
}
65+
}
66+
67+
/**
68+
* Determines if the provided token count is valid.
69+
* A valid token count is greater than 0 and not null.
70+
* @param {number} tokenCount The token count obtained from the token callback
71+
* @returns {boolean} Whether the token count is valid
72+
*/
73+
validTokenCount(tokenCount) {
74+
return tokenCount !== null && tokenCount > 0
75+
}
76+
77+
/**
78+
* Calculates the total token count from the prompt tokens and completion tokens
79+
* set in the event.
80+
* @returns {number} The total token count
81+
*/
82+
getTotalTokenCount() {
83+
return Number(this['response.usage.prompt_tokens']) + Number(this['response.usage.completion_tokens'])
84+
}
85+
86+
/**
87+
* If `totalTokens` is valid, assigns it to
88+
* `this['response.usage.total_tokens']`.
89+
* @param {number} totalTokens total tokens on embedding message
90+
*/
91+
setTokensOnEmbeddingMessage(totalTokens) {
92+
if (this.validTokenCount(totalTokens)) {
93+
this['response.usage.total_tokens'] = totalTokens
94+
}
95+
}
96+
97+
/**
98+
* Sets the provided tokens counts on the LLM event.
99+
* Checks if promptToken and completionToken are greater than zero before setting.
100+
* This is because the spec states that token counts should only be set if both
101+
* are present.
102+
* @param {object} params to the function
103+
* @param {object} params.promptTokens value of prompt token count
104+
* @param {object} params.completionTokens value of completion(s) token count
105+
* @param {object} params.totalTokens value of prompt + completion(s) token count
106+
*/
107+
setTokensInResponse({ promptTokens, completionTokens, totalTokens }) {
108+
if (this.validTokenCount(promptTokens) && this.validTokenCount(completionTokens)) {
109+
this['response.usage.prompt_tokens'] = promptTokens
110+
this['response.usage.completion_tokens'] = completionTokens
111+
this['response.usage.total_tokens'] = totalTokens || this.getTotalTokenCount()
112+
}
113+
}
114+
115+
/**
116+
* Sets `token_count` to 0 on the LlmChatCompletionMessage if both prompt and completion tokens are greater than zero.
117+
* This is because the spec states that if token counts are set, then we should set token_count to 0 to indicate
118+
* that the token calculation does not have to occur in the ingest pipeline.
119+
* @param {object} params to the function
120+
* @param {object} params.promptTokens value of prompt token count
121+
* @param {object} params.completionTokens value of completion(s) token count
122+
*/
123+
setTokenInCompletionMessage({ promptTokens, completionTokens }) {
124+
if (this.validTokenCount(promptTokens) && this.validTokenCount(completionTokens)) {
125+
this.token_count = 0
126+
}
127+
}
128+
129+
/**
130+
* Calculates prompt and completion token counts using the provided callback and models.
131+
* If both counts are valid, sets this.token_count to 0.
132+
*
133+
* @param {object} options - The params object.
134+
* @param {Function} options.tokenCB - The token counting callback function.
135+
* @param {string} options.reqModel - The model used for the prompt.
136+
* @param {string} options.resModel - The model used for the completion.
137+
* @param {string} options.promptContent - The prompt content to count tokens for.
138+
* @param {string} options.completionContent - The completion content to count tokens for.
139+
* @returns {void}
140+
*/
141+
setTokenFromCallback({ tokenCB, reqModel, resModel, promptContent, completionContent }) {
142+
const promptToken = this.calculateCallbackTokens(tokenCB, reqModel, promptContent)
143+
const completionToken = this.calculateCallbackTokens(tokenCB, resModel, completionContent)
144+
145+
this.setTokenInCompletionMessage({ promptTokens: promptToken, completionTokens: completionToken })
146+
}
147+
148+
/**
149+
* Calculates prompt and completion token counts using the provided callback and models.
150+
* If both counts are valid, sets token prompt, completion and total counts on the event.
151+
*
152+
* @param {object} options - The params object.
153+
* @param {Function} options.tokenCB - The token counting callback function.
154+
* @param {string} options.reqModel - The model used for the prompt.
155+
* @param {string} options.resModel - The model used for the completion.
156+
* @param {string} options.promptContent - The prompt content to count tokens for.
157+
* @param {string} options.completionContent - The completion content to count tokens for.
158+
* @returns {void}
159+
*/
160+
setTokenUsageFromCallback({ tokenCB, reqModel, resModel, promptContent, completionContent }) {
161+
const promptTokens = this.calculateCallbackTokens(tokenCB, reqModel, promptContent)
162+
const completionTokens = this.calculateCallbackTokens(tokenCB, resModel, completionContent)
163+
this.setTokensInResponse({ promptTokens, completionTokens, totalTokens: promptTokens + completionTokens })
164+
}
165+
166+
/**
167+
* Calculate the token counts using the provided callback.
168+
* @param {Function} tokenCB - The token count callback function.
169+
* @param {string} model - The model.
170+
* @param {string} content - The content to calculate tokens for, such as prompt or completion response.
171+
* @returns {number|undefined} - The calculated token count or undefined if callback is not a function.
172+
*/
173+
calculateCallbackTokens(tokenCB, model, content) {
174+
if (typeof tokenCB === 'function') {
175+
return tokenCB(model, content)
176+
}
177+
return undefined
178+
}
179+
}
180+
181+
module.exports = LlmEvent
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmEvent = require('./base')
9+
10+
/**
11+
* An event that corresponds to each message (sent and received)
12+
* from a chat completion call including those created by the user,
13+
* assistant, and the system.
14+
*
15+
* @property {string} id ID in the format `response_id`-`sequence`,
16+
* or a UUID generated by the agent if no response ID is returned by the LLM
17+
*/
18+
module.exports = class LlmChatCompletionMessage extends LlmEvent {
19+
/**
20+
*
21+
* @param {object} params constructor params
22+
* @param {Agent} params.agent New Relic agent instance
23+
* @param {object} params.segment Current segment
24+
* @param {object} params.transaction Current and active transaction
25+
* @param {string} params.vendor Lowercase name of vendor (e.g. 'openai')
26+
* @param {string} params.requestId ID associated with the request -
27+
* typically available in response headers
28+
* @param {string} params.responseId ID associated with the response, used to create `this.id`
29+
* @param {string} params.responseModel Model name returned in the response
30+
* @param {number} params.sequence Index (beginning at 0) associated with
31+
* each message including the prompt and responses
32+
* @param {string} params.content Content of the message
33+
* @param {string} [params.role] Role of the message creator (e.g. `user`, `assistant`, `tool`)
34+
* @param {string} params.completionId ID of the `LlmChatCompletionSummary` event that
35+
* this message event is connected to
36+
* @param {boolean} params.isResponse `true` if a message is the result of a chat
37+
* completion and not an input message - omitted in `false` cases
38+
*/
39+
constructor({ agent, segment, transaction, vendor, requestId, responseId, responseModel, sequence, content, role, completionId, isResponse }) {
40+
super({ agent, segment, transaction, vendor, responseModel, requestId })
41+
42+
this.completion_id = completionId
43+
this.sequence = sequence
44+
if (isResponse) this.is_response = isResponse
45+
46+
if (role) {
47+
this.role = role
48+
} else {
49+
// If the role attribute is not available, a value of user MUST be sent for
50+
// requests and a value of assistant MUST be sent for responses.
51+
if (isResponse) {
52+
this.role = 'assistant'
53+
} else if (sequence === 0) {
54+
// We can assume the first message in the sequence is the request message.
55+
this.role = 'user'
56+
}
57+
}
58+
59+
if (isResponse !== true) {
60+
// Only include for input/request messages
61+
this.timestamp = segment.timer.start
62+
}
63+
64+
if (responseId) {
65+
// A UUID is generated for `id` in super constructor,
66+
// but use this id format if responseId exists
67+
this.id = `${responseId}-${sequence}`
68+
}
69+
70+
if (agent.config.ai_monitoring.record_content.enabled === true) {
71+
this.content = content
72+
}
73+
}
74+
}
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmEvent = require('./base')
9+
10+
/**
11+
* @property {number} request.temperature Value representing how random or
12+
* deterministic the output responses should be
13+
* @property {number} request.max_tokens Maximum number of tokens that can be
14+
* generated in a chat completion
15+
* @property {string} request.model Model name specified in the request (e.g. 'gpt-4')
16+
* @property {number} response.number_of_messages Number of messages comprising a
17+
* chat completion including system, user, and assistant messages
18+
* @property {string} response.choices.finish_reason Reason the model stopped generating tokens (e.g. "stop")
19+
* @property {string} response.organization Organization ID returned in the response or response headers
20+
* @property {number} timestamp Timestamp captured at the time of the LLM request with millisecond precision
21+
*/
22+
module.exports = class LlmChatCompletionSummary extends LlmEvent {
23+
/**
24+
* @param {object} params constructor parameters
25+
* @param {Agent} params.agent New Relic agent instance
26+
* @param {object} params.segment Current segment
27+
* @param {object} params.transaction Current and active transaction
28+
* @param {string} params.vendor Lowercase vendor name, e.g. "openai"
29+
* @param {string} params.responseModel Model name from response
30+
* @param {string} params.requestModel Model name specified in the request (e.g. 'gpt-4')
31+
* @param {string} params.requestId ID from request/response headers
32+
* @param {string} params.responseOrg Organization ID returned in the response or response headers
33+
* @param {number} params.temperature Value representing how random or
34+
* deterministic the output responses should be
35+
* @param {number} params.maxTokens Maximum number of tokens that can be
36+
* generated in a chat completion
37+
* @param {number} params.numMsgs Number of messages comprising a
38+
* chat completion including system, user, and assistant messages
39+
* @param {string} params.finishReason Reason the model stopped generating tokens (e.g. "stop")
40+
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call, omitted if no error occurred
41+
*/
42+
constructor({ agent, segment, transaction, vendor, responseModel, requestModel, requestId,
43+
responseOrg, temperature, maxTokens, numMsgs, finishReason, error }) {
44+
super({ agent, segment, transaction, vendor, responseModel, requestId, error })
45+
46+
if (requestModel) this['request.model'] = requestModel
47+
if (maxTokens) this['request.max_tokens'] = maxTokens
48+
if (temperature) this['request.temperature'] = temperature
49+
if (finishReason) this['response.choices.finish_reason'] = finishReason
50+
if (responseOrg) this['response.organization'] = responseOrg
51+
52+
this['response.number_of_messages'] = numMsgs
53+
this.timestamp = segment.timer.start
54+
this.duration = segment.getDurationInMillis()
55+
}
56+
}

lib/llm-events/embedding.js

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmEvent = require('./base')
9+
10+
/**
11+
* An event that captures data specific to the creation of
12+
* an embedding.
13+
*
14+
* @property {string} input Input to the embedding creation call
15+
* @property {string} request.model Model name specified in the request (e.g. `gpt-4`), can differ from `this.['response.model']`
16+
* @property {string} response.organization Organization ID returned in the response or request headers
17+
* @property {number} response.usage.total_tokens Total number of tokens used for input text
18+
* @property {number} duration Total time taken for the embedding call to complete in milliseconds
19+
* @property {object} response.headers Vendor-specific headers, if any
20+
*/
21+
module.exports = class LlmEmbedding extends LlmEvent {
22+
/**
23+
*
24+
* @param {object} params constructor parameters
25+
* @param {Agent} params.agent New Relic agent instance
26+
* @param {object} params.segment Current segment
27+
* @param {object} params.transaction Current and active transaction
28+
* @param {string} [params.requestId] ID associated with the request - typically available in response headers
29+
* @param {string} params.requestInput Input to the embedding creation call
30+
* @param {string} [params.requestModel] Model name specified in the request (e.g. 'gpt-4')
31+
* @param {string} [params.responseModel] Model name returned in the response (can differ from `request.model`)
32+
* @param {string} [params.responseOrg] Organization ID returned in the response or response headers
33+
* @param {string} params.vendor Lowercased name of vendor (e.g. 'openai')
34+
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call - omitted if no error occurred
35+
*/
36+
constructor({ agent, segment, transaction, requestId, requestInput, requestModel, responseModel, responseOrg, vendor, error }) {
37+
super({ agent, segment, requestId, responseModel, transaction, vendor, error })
38+
if (requestModel) this['request.model'] = requestModel
39+
if (responseOrg) this['response.organization'] = responseOrg
40+
this.duration = segment.getDurationInMillis()
41+
42+
if (agent.config.ai_monitoring.record_content.enabled === true) {
43+
this.input = requestInput
44+
}
45+
}
46+
}

0 commit comments

Comments
 (0)