Skip to content

Commit 58ad008

Browse files
committed
OpenAI LLM event refactor and new base LLM classes
1 parent c5469ea commit 58ad008

21 files changed

+599
-212
lines changed

lib/llm-events/base.js

Lines changed: 182 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,182 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const { DESTINATIONS } = require('../config/attribute-filter')
9+
const { makeId } = require('../util/hashes')
10+
11+
/**
12+
* The base LLM event class that contains logic and properties
13+
* (e.g. `trace_id`, `vendor`) that are common to all LLM events.
14+
*
15+
* @property {string} id UUID or identifier for the event
16+
* @property {string} request_id ID from request/response headers
17+
* @property {string} span_id GUID of active span
18+
* @property {string} trace_id Current trace ID
19+
* @property {string} response.model Model name from response
20+
* @property {string} vendor Lowercased vendor name, e.g. "openai"
21+
* @property {string} ingest_source Always set to 'Node'
22+
* @property {boolean|undefined} error Set to `true` if an error occurred
23+
* during creation call, omitted if no error occurred
24+
*/
25+
class LlmEvent {
26+
ingest_source = 'Node'
27+
28+
/**
29+
* @param {object} params Constructor parameters
30+
* @param {Agent} params.agent New Relic agent instance
31+
* @param {TraceSegment} params.segment Current segment
32+
* @param {Transaction} params.transaction Current and active transaction
33+
* @param {string} params.vendor Lowercase vendor name, e.g. "openai"
34+
* @param {string} [params.responseModel] Model name from response
35+
* @param {string} [params.requestId] ID from request/response headers
36+
* @param {boolean} [params.error] Set to `true` if an error occurred during
37+
* creation call, omitted if no error occurred
38+
*/
39+
constructor({ agent, segment, transaction, vendor, responseModel, requestId, error }) {
40+
this.id = makeId(32)
41+
this.span_id = segment.id
42+
this.trace_id = transaction.traceId
43+
this.vendor = vendor
44+
this.metadata = agent
45+
46+
// Omit `error` property if no error occurred
47+
if (error === true) {
48+
this.error = error
49+
}
50+
51+
// If a certain attribute value is not accessible via instrumentation,
52+
// it can be omitted from the event.
53+
if (requestId) this.request_id = requestId
54+
if (responseModel) this['response.model'] = responseModel
55+
}
56+
57+
// eslint-disable-next-line accessor-pairs
58+
set metadata(agent) {
59+
const transaction = agent.tracer.getTransaction()
60+
const attrs = transaction?.trace?.custom.get(DESTINATIONS.TRANS_SCOPE) || {}
61+
for (const [key, value] of Object.entries(attrs)) {
62+
if (key.startsWith('llm.')) {
63+
this[key] = value
64+
}
65+
}
66+
}
67+
68+
/**
69+
* Determines if the provided token count is valid.
70+
* A valid token count is greater than 0 and not null.
71+
* @param {number} tokenCount The token count obtained from the token callback
72+
* @returns {boolean} Whether the token count is valid
73+
*/
74+
validTokenCount(tokenCount) {
75+
return tokenCount !== null && tokenCount > 0
76+
}
77+
78+
/**
79+
* Calculates the total token count from the prompt tokens and completion tokens
80+
* set in the event.
81+
* @returns {number} The total token count
82+
*/
83+
getTotalTokenCount() {
84+
return Number(this['response.usage.prompt_tokens']) + Number(this['response.usage.completion_tokens'])
85+
}
86+
87+
/**
88+
* If `totalTokens` is valid, assigns it to
89+
* `this['response.usage.total_tokens']`.
90+
* @param {number} totalTokens total tokens on embedding message
91+
*/
92+
setTokensOnEmbeddingMessage(totalTokens) {
93+
if (this.validTokenCount(totalTokens)) {
94+
this['response.usage.total_tokens'] = totalTokens
95+
}
96+
}
97+
98+
/**
99+
* Sets the provided tokens counts on the LLM event.
100+
* Checks if promptToken and completionToken are greater than zero before setting.
101+
* This is because the spec states that token counts should only be set if both
102+
* are present.
103+
* @param {object} params to the function
104+
* @param {object} params.promptTokens value of prompt token count
105+
* @param {object} params.completionTokens value of completion(s) token count
106+
* @param {object} params.totalTokens value of prompt + completion(s) token count
107+
*/
108+
setTokensInResponse({ promptTokens, completionTokens, totalTokens }) {
109+
if (this.validTokenCount(promptTokens) && this.validTokenCount(completionTokens)) {
110+
this['response.usage.prompt_tokens'] = promptTokens
111+
this['response.usage.completion_tokens'] = completionTokens
112+
this['response.usage.total_tokens'] = totalTokens || this.getTotalTokenCount()
113+
}
114+
}
115+
116+
/**
117+
* Sets `token_count` to 0 on the LlmChatCompletionMessage if both prompt and completion tokens are greater than zero.
118+
* This is because the spec states that if token counts are set, then we should set token_count to 0 to indicate
119+
* that the token calculation does not have to occur in the ingest pipeline.
120+
* @param {object} params to the function
121+
* @param {object} params.promptTokens value of prompt token count
122+
* @param {object} params.completionTokens value of completion(s) token count
123+
*/
124+
setTokenInCompletionMessage({ promptTokens, completionTokens }) {
125+
if (this.validTokenCount(promptTokens) && this.validTokenCount(completionTokens)) {
126+
this.token_count = 0
127+
}
128+
}
129+
130+
/**
131+
* Calculates prompt and completion token counts using the provided callback and models.
132+
* If both counts are valid, sets this.token_count to 0.
133+
*
134+
* @param {object} options - The params object.
135+
* @param {Function} options.tokenCB - The token counting callback function.
136+
* @param {string} options.reqModel - The model used for the prompt.
137+
* @param {string} options.resModel - The model used for the completion.
138+
* @param {string} options.promptContent - The prompt content to count tokens for.
139+
* @param {string} options.completionContent - The completion content to count tokens for.
140+
* @returns {void}
141+
*/
142+
setTokenFromCallback({ tokenCB, reqModel, resModel, promptContent, completionContent }) {
143+
const promptToken = this.calculateCallbackTokens(tokenCB, reqModel, promptContent)
144+
const completionToken = this.calculateCallbackTokens(tokenCB, resModel, completionContent)
145+
146+
this.setTokenInCompletionMessage({ promptTokens: promptToken, completionTokens: completionToken })
147+
}
148+
149+
/**
150+
* Calculates prompt and completion token counts using the provided callback and models.
151+
* If both counts are valid, sets token prompt, completion and total counts on the event.
152+
*
153+
* @param {object} options - The params object.
154+
* @param {Function} options.tokenCB - The token counting callback function.
155+
* @param {string} options.reqModel - The model used for the prompt.
156+
* @param {string} options.resModel - The model used for the completion.
157+
* @param {string} options.promptContent - The prompt content to count tokens for.
158+
* @param {string} options.completionContent - The completion content to count tokens for.
159+
* @returns {void}
160+
*/
161+
setTokenUsageFromCallback({ tokenCB, reqModel, resModel, promptContent, completionContent }) {
162+
const promptTokens = this.calculateCallbackTokens(tokenCB, reqModel, promptContent)
163+
const completionTokens = this.calculateCallbackTokens(tokenCB, resModel, completionContent)
164+
this.setTokensInResponse({ promptTokens, completionTokens, totalTokens: promptTokens + completionTokens })
165+
}
166+
167+
/**
168+
* Calculate the token counts using the provided callback.
169+
* @param {Function} tokenCB - The token count callback function.
170+
* @param {string} model - The model.
171+
* @param {string} content - The content to calculate tokens for, such as prompt or completion response.
172+
* @returns {number|undefined} - The calculated token count or undefined if callback is not a function.
173+
*/
174+
calculateCallbackTokens(tokenCB, model, content) {
175+
if (typeof tokenCB === 'function') {
176+
return tokenCB(model, content)
177+
}
178+
return undefined
179+
}
180+
}
181+
182+
module.exports = LlmEvent
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmEvent = require('./base')
9+
10+
/**
11+
* An event that corresponds to each message (sent and received)
12+
* from a chat completion call including those created by the user,
13+
* assistant, and the system.
14+
*
15+
* @augments LlmEvent
16+
* @property {string} completion_id ID of the `LlmChatCompletionSummary` event
17+
* that this message event is connected to
18+
* @property {string} content Content of the message
19+
* @property {string} id ID in the format `response_id`-`sequence`,
20+
* or a UUID generated by the agent if no response ID is returned by the LLM
21+
* @property {boolean|undefined} is_response `true` if a message is the result
22+
* of a chat completion and not an input message, `undefined` in `false` cases
23+
* @property {string} role Role of the message creator (e.g. `user`, `assistant`, `tool`)
24+
* @property {number} sequence Index (beginning at 0) associated with
25+
* each message including the prompt and responses
26+
* @property {number|undefined} timestamp Timestamp captured at the time of the LLM
27+
* request with millisecond precision, `undefined` if not a request
28+
*/
29+
module.exports = class LlmChatCompletionMessage extends LlmEvent {
30+
/**
31+
* @param {object} params Constructor parameters
32+
* @param {Agent} params.agent New Relic agent instance
33+
* @param {TraceSegment} params.segment Current segment
34+
* @param {Transaction} params.transaction Current and active transaction
35+
* @param {string} params.vendor Lowercase name of vendor (e.g. 'openai')
36+
* @param {string} params.requestId ID associated with the request -
37+
* typically available in response headers
38+
* @param {string} params.responseId ID associated with the response, used to create `this.id`
39+
* @param {string} params.responseModel Model name returned in the response
40+
* @param {number} params.sequence Index (beginning at 0) associated with
41+
* each message including the prompt and responses
42+
* @param {string} params.content Content of the message
43+
* @param {string} [params.role] Role of the message creator (e.g. `user`, `assistant`, `tool`)
44+
* @param {string} params.completionId ID of the `LlmChatCompletionSummary` event that
45+
* this message event is connected to
46+
* @param {boolean} params.isResponse `true` if a message is the result of a chat
47+
* completion and not an input message - omitted in `false` cases
48+
*/
49+
constructor({ agent, segment, transaction, vendor, requestId, responseId, responseModel, sequence, content, role, completionId, isResponse }) {
50+
super({ agent, segment, transaction, vendor, responseModel, requestId })
51+
52+
this.completion_id = completionId
53+
this.sequence = sequence
54+
if (isResponse) this.is_response = isResponse
55+
56+
if (role) {
57+
this.role = role
58+
} else {
59+
// If the role attribute is not available, a value of user MUST be sent for
60+
// requests and a value of assistant MUST be sent for responses.
61+
if (isResponse) {
62+
this.role = 'assistant'
63+
} else if (sequence === 0) {
64+
// We can assume the first message in the sequence is the request message.
65+
this.role = 'user'
66+
}
67+
}
68+
69+
if (isResponse !== true) {
70+
// Only include for input/request messages
71+
this.timestamp = segment.timer.start
72+
}
73+
74+
if (responseId) {
75+
// A UUID is generated for `id` in super constructor,
76+
// but use this id format if responseId exists
77+
this.id = `${responseId}-${sequence}`
78+
}
79+
80+
if (agent.config.ai_monitoring.record_content.enabled === true) {
81+
this.content = content
82+
}
83+
}
84+
}
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmEvent = require('./base')
9+
10+
/**
11+
* An event that captures high-level data about the creation of a
12+
* chat completion including request, response, and call information.
13+
*
14+
* @augments LlmEvent
15+
* @property {number} duration Total time taken for the chat completion to
16+
* complete in milliseconds
17+
* @property {number} timestamp Timestamp captured at the time of the LLM
18+
* request with millisecond precision
19+
* @property {number} request.max_tokens Maximum number of tokens that can be
20+
* generated in a chat completion
21+
* @property {string} request.model Model name specified in the request
22+
* (e.g. 'gpt-4')
23+
* @property {number} request.temperature Value representing how random or
24+
* deterministic the output responses should be
25+
* @property {string} response.choices.finish_reason Reason the model stopped
26+
* generating tokens (e.g. "stop")
27+
* @property {number} response.number_of_messages Number of messages comprising
28+
* a chat completion including system, user, and assistant messages
29+
* @property {string} response.organization Organization ID returned in the
30+
* response or response headers
31+
*/
32+
module.exports = class LlmChatCompletionSummary extends LlmEvent {
33+
/**
34+
* @param {object} params Constructor parameters
35+
* @param {Agent} params.agent New Relic agent instance
36+
* @param {TraceSegment} params.segment Current segment
37+
* @param {Transaction} params.transaction Current and active transaction
38+
* @param {string} params.vendor Lowercase vendor name, e.g. "openai"
39+
* @param {string} params.responseModel Model name from response
40+
* @param {string} params.requestModel Model name specified in the request (e.g. 'gpt-4')
41+
* @param {string} params.requestId ID from request/response headers
42+
* @param {string} params.responseOrg Organization ID returned in the response or response headers
43+
* @param {number} params.temperature Value representing how random or
44+
* deterministic the output responses should be
45+
* @param {number} params.maxTokens Maximum number of tokens that can be
46+
* generated in a chat completion
47+
* @param {number} params.numMsgs Number of messages comprising a
48+
* chat completion including system, user, and assistant messages
49+
* @param {string} params.finishReason Reason the model stopped generating tokens (e.g. "stop")
50+
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call, omitted if no error occurred
51+
*/
52+
constructor({ agent, segment, transaction, vendor, responseModel, requestModel, requestId,
53+
responseOrg, temperature, maxTokens, numMsgs = 0, finishReason, error }) {
54+
super({ agent, segment, transaction, vendor, responseModel, requestId, error })
55+
56+
if (requestModel) this['request.model'] = requestModel
57+
if (maxTokens) this['request.max_tokens'] = maxTokens
58+
if (temperature) this['request.temperature'] = temperature
59+
if (finishReason) this['response.choices.finish_reason'] = finishReason
60+
if (responseOrg) this['response.organization'] = responseOrg
61+
62+
this['response.number_of_messages'] = numMsgs
63+
this.timestamp = segment.timer.start
64+
this.duration = segment.getDurationInMillis()
65+
}
66+
}

lib/llm-events/embedding.js

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmEvent = require('./base')
9+
10+
/**
11+
* An event that captures data specific to the creation of an embedding.
12+
*
13+
* @augments LlmEvent
14+
* @property {number} duration Total time taken for the embedding call to complete
15+
* in milliseconds
16+
* @property {string} input Input to the embedding creation call
17+
* @property {string} request.model Model name specified in the request (e.g. `gpt-4`),
18+
* can differ from `this['response.model']`
19+
* @property {object|undefined} response.headers Vendor-specific headers, if any;
20+
* will be assigned to the `LlmEmbedding` like `this['response.headers.key'] = value`
21+
* @property {string} response.organization Organization ID returned in the response
22+
* or request headers
23+
* @property {number} response.usage.total_tokens Total number of tokens used for
24+
* input text
25+
*/
26+
module.exports = class LlmEmbedding extends LlmEvent {
27+
/**
28+
* @param {object} params Constructor parameters
29+
* @param {Agent} params.agent New Relic agent instance
30+
* @param {TraceSegment} params.segment Current segment
31+
* @param {Transaction} params.transaction Current and active transaction
32+
* @param {string} [params.requestId] ID associated with the request -
33+
* typically available in response headers
34+
* @param {string} params.requestInput Input to the embedding creation call
35+
* @param {string} [params.requestModel] Model name specified in the request
36+
* (e.g. 'gpt-4')
37+
* @param {string} [params.responseModel] Model name returned in the response
38+
* (can differ from `request.model`)
39+
* @param {string} [params.responseOrg] Organization ID returned in the response
40+
* or response headers
41+
* @param {string} params.vendor Lowercased name of vendor (e.g. 'openai')
42+
* @param {boolean} [params.error] Set to `true` if an error occurred during
43+
* creation call - omitted if no error occurred
44+
*/
45+
constructor({ agent, segment, transaction, requestId, requestInput, requestModel, responseModel, responseOrg, vendor, error }) {
46+
super({ agent, segment, requestId, responseModel, transaction, vendor, error })
47+
if (requestModel) this['request.model'] = requestModel
48+
if (responseOrg) this['response.organization'] = responseOrg
49+
this.duration = segment.getDurationInMillis()
50+
51+
if (agent.config.ai_monitoring.record_content.enabled === true) {
52+
this.input = requestInput
53+
}
54+
}
55+
}

0 commit comments

Comments
 (0)