Skip to content

Commit b314096

Browse files
committed
OpenAI Llm event refactor
1 parent f339675 commit b314096

26 files changed

+680
-345
lines changed

lib/llm-events-new/base.js

Lines changed: 172 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,172 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const { DESTINATIONS } = require('../config/attribute-filter')
9+
const { makeId } = require('../util/hashes')
10+
11+
/**
12+
* The base LLM event class that contains logic and properties
13+
* that are common to all LLM events (e.g. `LlmChatCompletionMessage`).
14+
*
15+
* @property {string} id UUID or identifier for the event
16+
* @property {string} request_id ID from request/response headers
17+
* @property {string} span_id GUID of active span
18+
* @property {string} trace_id Current trace ID
19+
* @property {string} response.model Model name from response
20+
* @property {string} vendor Lowercased vendor name, e.g. "openai"
21+
* @property {string} ingest_source Always set to 'Node'
22+
* @property {boolean|undefined} error set to `true` if an error occurred during creation call, omitted if no error occurred
23+
*/
24+
class LlmEvent {
25+
ingest_source = 'Node'
26+
27+
/**
28+
*
29+
* @param {object} params constructor parameters to create an `LlmEvent`
30+
* @param {Agent} params.agent New Relic agent instance
31+
* @param {object} params.segment Current segment
32+
* @param {object} params.transaction Current and active transaction
33+
* @param {string} params.vendor Lowercase vendor name, e.g. "openai"
34+
* @param {string} params.responseModel Model name from response
35+
* @param {string} params.requestId ID from request/response headers
36+
* @param {boolean} [params.error] set to `true` if an error occurred during creation call, omitted if no error occurred
37+
*/
38+
constructor({ agent, segment, transaction, vendor, responseModel, requestId, error = null }) {
39+
this.id = makeId(36)
40+
this.request_id = requestId
41+
this.span_id = segment?.id
42+
this.trace_id = transaction?.traceId
43+
this['response.model'] = responseModel
44+
this.vendor = vendor
45+
this.metadata = agent
46+
47+
if (error === true) {
48+
this.error = error
49+
}
50+
}
51+
52+
// eslint-disable-next-line accessor-pairs
53+
set metadata(agent) {
54+
const transaction = agent.tracer.getTransaction()
55+
const attrs = transaction?.trace?.custom.get(DESTINATIONS.TRANS_SCOPE) || {}
56+
for (const [key, value] of Object.entries(attrs)) {
57+
if (key.startsWith('llm.')) {
58+
this[key] = value
59+
}
60+
}
61+
}
62+
63+
/**
64+
* Determines if the provided token count is valid.
65+
* A valid token count is greater than 0 and not null.
66+
* @param {number} tokenCount The token count obtained from the token callback
67+
* @returns {boolean} Whether the token count is valid
68+
*/
69+
validTokenCount(tokenCount) {
70+
return tokenCount !== null && tokenCount > 0
71+
}
72+
73+
/**
74+
* Calculates the total token count from the prompt tokens and completion tokens
75+
* set in the event.
76+
* @returns {number} The total token count
77+
*/
78+
getTotalTokenCount() {
79+
return Number(this['response.usage.prompt_tokens']) + Number(this['response.usage.completion_tokens'])
80+
}
81+
82+
setTokensOnEmbeddingMessage(totalTokens) {
83+
if (this.validTokenCount(totalTokens)) {
84+
this['response.usage.total_tokens'] = totalTokens
85+
}
86+
}
87+
88+
/**
89+
* Sets the provided tokens counts on the LLM event.
90+
* Checks if promptToken and completionToken are greater than zero before setting.
91+
* This is because the spec states that token counts should only be set if both
92+
* are present.
93+
* @param {object} params to the function
94+
* @param {object} params.promptTokens value of prompt token count
95+
* @param {object} params.completionTokens value of completion(s) token count
96+
* @param {object} params.totalTokens value of prompt + completion(s) token count
97+
*/
98+
setTokensInResponse({ promptTokens, completionTokens, totalTokens }) {
99+
if (this.validTokenCount(promptTokens) && this.validTokenCount(completionTokens)) {
100+
this['response.usage.prompt_tokens'] = promptTokens
101+
this['response.usage.completion_tokens'] = completionTokens
102+
this['response.usage.total_tokens'] = totalTokens || this.getTotalTokenCount()
103+
}
104+
}
105+
106+
/**
107+
* Sets `token_count` to 0 on the LlmChatCompletionMessage if both prompt and completion tokens are greater than zero.
108+
* This is because the spec states that if token counts are set, then we should set token_count to 0 to indicate
109+
* that the token calculation does not have to occur in the ingest pipeline.
110+
* @param {object} params to the function
111+
* @param {object} params.promptTokens value of prompt token count
112+
* @param {object} params.completionTokens value of completion(s) token count
113+
*/
114+
setTokenInCompletionMessage({ promptTokens, completionTokens }) {
115+
if (this.validTokenCount(promptTokens) && this.validTokenCount(completionTokens)) {
116+
this.token_count = 0
117+
}
118+
}
119+
120+
/**
121+
* Calculates prompt and completion token counts using the provided callback and models.
122+
* If both counts are valid, sets this.token_count to 0.
123+
*
124+
* @param {object} options - The params object.
125+
* @param {Function} options.tokenCB - The token counting callback function.
126+
* @param {string} options.reqModel - The model used for the prompt.
127+
* @param {string} options.resModel - The model used for the completion.
128+
* @param {string} options.promptContent - The prompt content to count tokens for.
129+
* @param {string} options.completionContent - The completion content to count tokens for.
130+
* @returns {void}
131+
*/
132+
setTokenFromCallback({ tokenCB, reqModel, resModel, promptContent, completionContent }) {
133+
const promptToken = this.calculateCallbackTokens(tokenCB, reqModel, promptContent)
134+
const completionToken = this.calculateCallbackTokens(tokenCB, resModel, completionContent)
135+
136+
this.setTokenInCompletionMessage({ promptTokens: promptToken, completionTokens: completionToken })
137+
}
138+
139+
/**
140+
* Calculates prompt and completion token counts using the provided callback and models.
141+
* If both counts are valid, sets token prompt, completion and total counts on the event.
142+
*
143+
* @param {object} options - The params object.
144+
* @param {Function} options.tokenCB - The token counting callback function.
145+
* @param {string} options.reqModel - The model used for the prompt.
146+
* @param {string} options.resModel - The model used for the completion.
147+
* @param {string} options.promptContent - The prompt content to count tokens for.
148+
* @param {string} options.completionContent - The completion content to count tokens for.
149+
* @returns {void}
150+
*/
151+
setTokenUsageFromCallback({ tokenCB, reqModel, resModel, promptContent, completionContent }) {
152+
const promptTokens = this.calculateCallbackTokens(tokenCB, reqModel, promptContent)
153+
const completionTokens = this.calculateCallbackTokens(tokenCB, resModel, completionContent)
154+
this.setTokensInResponse({ promptTokens, completionTokens, totalTokens: promptTokens + completionTokens })
155+
}
156+
157+
/**
158+
* Calculate the token counts using the provided callback.
159+
* @param {Function} tokenCB - The token count callback function.
160+
* @param {string} model - The model.
161+
* @param {string} content - The content to calculate tokens for, such as prompt or completion response.
162+
* @returns {number|undefined} - The calculated token count or undefined if callback is not a function.
163+
*/
164+
calculateCallbackTokens(tokenCB, model, content) {
165+
if (typeof tokenCB === 'function') {
166+
return tokenCB(model, content)
167+
}
168+
return undefined
169+
}
170+
}
171+
172+
module.exports = LlmEvent

lib/llm-events-new/chat-message.js

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
const LlmEvent = require('./base')
7+
8+
/**
9+
* An event that corresponds to each message (sent and received)
10+
* from a chat completion call including those created by the user,
11+
* assistant, and the system.
12+
*
13+
* @property {string} id ID in the format `response_id`-`sequence`,
14+
* or a UUID generated by the agent if no response ID is returned by the LLM
15+
*/
16+
class LlmChatCompletionMessage extends LlmEvent {
17+
/**
18+
*
19+
* @param {object} params constructor params
20+
* @param {Agent} params.agent New Relic agent instance
21+
* @param {object} params.segment Current segment
22+
* @param {object} params.transaction Current and active transaction
23+
* @param {string} params.vendor Lowercase name of vendor (e.g. 'openai')
24+
* @param {string} params.requestId ID associated with the request -
25+
* typically available in response headers
26+
* @param {string} params.responseId ID associated with the response
27+
* @param {string} params.responseModel Model name returned in the response
28+
* @param {number} params.sequence Index (beginning at 0) associated with
29+
* each message including the prompt and responses
30+
* @param {string} params.content Content of the message
31+
* @param {string} [params.role] Role of the message creator (e.g. `user`, `assistant`, `tool`)
32+
* @param {string} params.completionId ID of the `LlmChatCompletionSummary` event that
33+
* this message event is connected to
34+
* @param {boolean} [params.isResponse] `true` if a message is the result of a chat
35+
* completion and not an input message - omitted in `false` cases
36+
*/
37+
constructor({ agent, segment, transaction, vendor, requestId, responseId, responseModel, sequence, content, role, completionId, isResponse }) {
38+
super({ agent, segment, transaction, vendor, responseModel, requestId })
39+
40+
this.completion_id = completionId
41+
this.sequence = sequence
42+
if (isResponse) this.is_response = isResponse
43+
44+
if (role) {
45+
this.role = role
46+
} else {
47+
// If the role attribute is not available, a value of user MUST be sent for
48+
// requests and a value of assistant MUST be sent for responses.
49+
if (isResponse) {
50+
this.role = 'assistant'
51+
} else if (sequence === 0) {
52+
// We can assume the first message in the sequence is the request message.
53+
this.role = 'user'
54+
}
55+
}
56+
57+
if (isResponse !== true) {
58+
// Only include for input/request messages
59+
this.timestamp = segment.timer.start
60+
}
61+
62+
if (responseId) {
63+
// A UUID is generated for `id` in super constructor,
64+
// but use this id format if responseId exists
65+
this.id = `${responseId}-${sequence}`
66+
}
67+
68+
if (agent.config.ai_monitoring.record_content.enabled === true) {
69+
this.content = content
70+
}
71+
}
72+
}
73+
74+
module.exports = LlmChatCompletionMessage

lib/llm-events-new/chat-summary.js

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
const LlmEvent = require('./base')
7+
8+
/**
9+
* @property {number} request.temperature Value representing how random or
10+
* deterministic the output responses should be
11+
* @property {number} request.max_tokens Maximum number of tokens that can be
12+
* generated in a chat completion
13+
* @property {string} request.model Model name specified in the request (e.g. 'gpt-4')
14+
* @property {number} response.number_of_messages Number of messages comprising a
15+
* chat completion including system, user, and assistant messages
16+
* @property {string} response.choices.finish_reason Reason the model stopped generating tokens (e.g. "stop")
17+
* @property {string} response.organization Organization ID returned in the response or response headers
18+
* @property {number} timestamp Timestamp captured at the time of the LLM request with millisecond precision
19+
*/
20+
class LlmChatCompletionSummary extends LlmEvent {
21+
/**
22+
* @param {object} params constructor parameters
23+
* @param {Agent} params.agent New Relic agent instance
24+
* @param {object} params.segment Current segment
25+
* @param {object} params.transaction Current and active transaction
26+
* @param {string} params.vendor Lowercase vendor name, e.g. "openai"
27+
* @param {string} params.responseModel Model name from response
28+
* @param {string} params.requestModel Model name specified in the request (e.g. 'gpt-4')
29+
* @param {string} params.requestId ID from request/response headers
30+
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call, omitted if no error occurred
31+
* @param {string} params.responseOrg Organization ID returned in the response or response headers
32+
* @param {number} params.temperature Value representing how random or
33+
* deterministic the output responses should be
34+
* @param {number} params.maxTokens Maximum number of tokens that can be
35+
* generated in a chat completion
36+
* @param {number} params.numMsgs Number of messages comprising a
37+
* chat completion including system, user, and assistant messages
38+
* @param {string} params.finishReason Reason the model stopped generating tokens (e.g. "stop")
39+
*/
40+
constructor({ agent, segment, transaction, vendor, responseModel, requestModel, requestId, error,
41+
responseOrg, temperature, maxTokens, numMsgs, finishReason }) {
42+
super({ agent, segment, transaction, vendor, responseModel, requestId, error })
43+
44+
this['request.model'] = requestModel
45+
this['request.max_tokens'] = maxTokens
46+
this['request.temperature'] = temperature
47+
this['response.number_of_messages'] = numMsgs
48+
this['response.choices.finish_reason'] = finishReason
49+
this['response.organization'] = responseOrg
50+
this.timestamp = segment.timer.start
51+
this.duration = segment.getDurationInMillis()
52+
}
53+
}
54+
55+
module.exports = LlmChatCompletionSummary

lib/llm-events-new/embedding.js

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
const LlmEvent = require('./base')
7+
8+
/**
9+
* An event that captures data specific to the creation of
10+
* an embedding.
11+
*
12+
* @property {string} input Input to the embedding creation call
13+
* @property {string} request.model Model name specified in the request (e.g. `gpt-4`), can differ from `this.['response.model']`
14+
* @property {string} response.organization Organization ID returned in the response or request headers
15+
* @property {number} response.usage.total_tokens Total number of tokens used for input text
16+
* @property {number} duration Total time taken for the embedding call to complete in milliseconds
17+
* @property {*} response.headers Vendor-specific headers
18+
*/
19+
class LlmEmbedding extends LlmEvent {
20+
/**
21+
*
22+
* @param {object} params constructor parameters
23+
* @param {Agent} params.agent New Relic agent instance
24+
* @param {object} params.segment Current segment
25+
* @param {object} params.transaction Current and active transaction
26+
* @param {string} params.requestId ID associated with the request - typically available in response headers
27+
* @param {string} params.requestInput Input to the embedding creation call
28+
* @param {string} params.requestModel Model name specified in the request (e.g. 'gpt-4')
29+
* @param {string} params.responseModel Model name returned in the response (can differ from `request.model`)
30+
* @param {string} params.responseOrg Organization ID returned in the response or response headers
31+
* @param {string} params.vendor Lowercased name of vendor (e.g. 'openai')
32+
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call - omitted if no error occurred
33+
*/
34+
constructor({ agent, segment, transaction, requestId, requestInput, requestModel, responseModel, responseOrg, vendor, error }) {
35+
super({ agent, segment, requestId, responseModel, transaction, vendor, error })
36+
this['request.model'] = requestModel
37+
this['response.organization'] = responseOrg
38+
this.duration = segment?.getDurationInMillis()
39+
40+
if (agent.config.ai_monitoring.record_content.enabled === true) {
41+
this.input = requestInput
42+
}
43+
}
44+
}
45+
46+
module.exports = LlmEmbedding

0 commit comments

Comments
 (0)