Skip to content

Commit f1e3a0e

Browse files
refactor: Google Gen AI LLM event refactor (#3748)
1 parent 1ac00ea commit f1e3a0e

File tree

16 files changed

+249
-190
lines changed

16 files changed

+249
-190
lines changed
Lines changed: 24 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,42 +1,36 @@
11
/*
2-
* Copyright 2025 New Relic Corporation. All rights reserved.
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

66
'use strict'
7-
const LlmEvent = require('./event')
8-
const { makeId } = require('../../util/hashes')
97

10-
module.exports = class LlmChatCompletionMessage extends LlmEvent {
11-
constructor({
12-
agent,
8+
const LlmChatCompletionMessage = require('../chat-completion-message')
9+
const getUsageTokens = require('./get-usage-tokens')
10+
11+
/**
12+
* Encapsulates a Google Gen AI LlmChatCompletionMessage.
13+
*/
14+
module.exports = class GoogleGenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
15+
constructor({ agent,
1316
segment,
17+
transaction,
1418
request = {},
1519
response = {},
16-
index = 0,
17-
message,
20+
sequence = 0,
21+
content, role,
1822
completionId,
19-
transaction
20-
}) {
21-
super({ agent, segment, request, response, transaction })
22-
this.id = makeId(36)
23-
// message?.role is only defined if the message is
24-
// a response and it is always 'model'.
25-
// request messages do not have a role
26-
this.role = message?.role ?? 'user'
27-
this.sequence = index
28-
this.completion_id = completionId
29-
const responseText = response?.text ?? response?.candidates?.[0]?.content?.parts?.[0]?.text
30-
this.is_response = responseText === message?.parts?.[0]?.text
31-
32-
if (agent.config.ai_monitoring.record_content.enabled === true) {
33-
this.content = this.is_response ? message?.parts?.[0]?.text : message
34-
}
35-
36-
// only add timestamp for request/input messages
37-
if (this.is_response === false) {
38-
this.timestamp = segment.timer.start
39-
}
23+
isResponse }) {
24+
super({ agent,
25+
segment,
26+
transaction,
27+
vendor: 'gemini',
28+
sequence,
29+
content,
30+
role,
31+
completionId,
32+
isResponse,
33+
responseModel: response?.modelVersion })
4034

4135
this.setTokenCount(agent, request, response)
4236
}
@@ -66,7 +60,7 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
6660
return
6761
}
6862

69-
const tokens = this.getUsageTokens(response)
63+
const tokens = getUsageTokens(response)
7064
this.setTokenInCompletionMessage(tokens)
7165
}
7266
}

lib/llm-events/google-genai/chat-completion-summary.js

Lines changed: 31 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,47 @@
11
/*
2-
* Copyright 2025 New Relic Corporation. All rights reserved.
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

66
'use strict'
7-
const LlmEvent = require('./event')
87

9-
module.exports = class LlmChatCompletionSummary extends LlmEvent {
10-
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
11-
super({ agent, segment, request, response, responseAttrs: true, transaction })
12-
this.error = withError
8+
const LlmChatCompletionSummary = require('../chat-completion-summary')
9+
const getUsageTokens = require('./get-usage-tokens')
10+
11+
/**
12+
* Encapsulates a Google Gen AI LlmChatCompletionSummary.
13+
*/
14+
module.exports = class GoogleGenAiLlmChatCompletionSummary extends LlmChatCompletionSummary {
15+
/**
16+
*
17+
* @param {object} params Constructor parameters
18+
* @param {Agent} params.agent New Relic agent instance
19+
* @param {TraceSegment} params.segment Current segment
20+
* @param {Transaction} params.transaction Current and active transaction
21+
* @param {object} params.request Google Gen AI request object
22+
* @param {object} params.response Google Gen AI response object
23+
* @param {boolean} [params.error] Set to `true` if an error occurred
24+
*/
25+
constructor({ agent, segment, transaction, request, response, error }) {
26+
super({ agent,
27+
segment,
28+
transaction,
29+
responseModel: response?.modelVersion,
30+
requestModel: request?.model,
31+
finishReason: response?.candidates?.[0]?.finishReason,
32+
maxTokens: request.config?.maxOutputTokens,
33+
temperature: request.config?.temperature,
34+
vendor: 'gemini',
35+
error })
36+
1337
let requestMessagesLength = 0
1438
if (Array.isArray(request?.contents)) {
1539
requestMessagesLength = request.contents.length
1640
} else if (typeof request?.contents === 'string') {
1741
requestMessagesLength = 1
1842
}
1943
this['response.number_of_messages'] = requestMessagesLength + (response?.candidates?.length || 0)
20-
this['response.choices.finish_reason'] = response?.candidates?.[0]?.finishReason
21-
this['request.max_tokens'] = request.config?.maxOutputTokens
22-
this['request.temperature'] = request.config?.temperature
2344

24-
this.timestamp = segment.timer.start
2545
this.setTokens(agent, request, response)
2646
}
2747

@@ -51,7 +71,7 @@ module.exports = class LlmChatCompletionSummary extends LlmEvent {
5171
return
5272
}
5373

54-
const tokens = this.getUsageTokens(response)
74+
const tokens = getUsageTokens(response)
5575
this.setTokensInResponse(tokens)
5676
}
5777
}

lib/llm-events/google-genai/embedding.js

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,34 @@
11
/*
2-
* Copyright 2025 New Relic Corporation. All rights reserved.
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

66
'use strict'
77

8-
const LlmEvent = require('./event')
8+
const LlmEmbedding = require('../embedding')
99

10-
class LlmEmbedding extends LlmEvent {
11-
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
12-
super({ agent, segment, request, response, responseAttrs: true, transaction })
13-
this.error = withError
14-
15-
if (agent.config.ai_monitoring.record_content.enabled === true) {
16-
this.input = request.contents?.toString()
17-
}
10+
/**
11+
* Encapsulates a Google Gen AI LlmEmbedding.
12+
*/
13+
module.exports = class GoogleGenAiLlmEmbedding extends LlmEmbedding {
14+
/**
15+
*
16+
* @param {object} params Constructor params
17+
* @param {Agent} params.agent New Relic agent instance
18+
* @param {TraceSegment} params.segment Current segment
19+
* @param {Transaction} params.transaction Current and active transaction
20+
* @param {object} params.request Google Gen AI request object
21+
* @param {object} params.response Google Gen AI response object
22+
* @param {boolean} [params.error] Set to true if an error occurred
23+
*/
24+
constructor({ agent, segment, transaction, request = {}, response = {}, error }) {
25+
super({ agent,
26+
segment,
27+
transaction,
28+
requestInput: request?.contents,
29+
requestModel: request?.model,
30+
responseModel: response?.modelVersion,
31+
vendor: 'gemini',
32+
error })
1833
}
1934
}
20-
21-
module.exports = LlmEmbedding

lib/llm-events/google-genai/event.js

Lines changed: 0 additions & 36 deletions
This file was deleted.
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
module.exports = getUsageTokens
9+
10+
/**
11+
* Grabs the prompt, completion, and total token count from the
12+
* given response object.
13+
* @param {object} response Google Gen AI response object
14+
* @returns {object} { promptTokens, completionTokens, totalTokens }
15+
*/
16+
function getUsageTokens(response) {
17+
const { usageMetadata } = response
18+
if (!usageMetadata) {
19+
return { promptTokens: 0, completionTokens: 0, totalTokens: 0 }
20+
}
21+
return {
22+
promptTokens: Number(usageMetadata.promptTokenCount),
23+
completionTokens: Number(usageMetadata.candidatesTokenCount),
24+
totalTokens: Number(usageMetadata.totalTokenCount)
25+
}
26+
}
Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,10 @@
11
/*
2-
* Copyright 2025 New Relic Corporation. All rights reserved.
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6-
'use strict'
7-
8-
const LlmChatCompletionSummary = require('./chat-completion-summary')
9-
const LlmChatCompletionMessage = require('./chat-completion-message')
10-
const LlmEmbedding = require('./embedding')
11-
const LlmErrorMessage = require('../error-message')
12-
136
module.exports = {
14-
LlmChatCompletionMessage,
15-
LlmChatCompletionSummary,
16-
LlmEmbedding,
17-
LlmErrorMessage
7+
LlmChatCompletionMessage: require('./chat-completion-message'),
8+
LlmChatCompletionSummary: require('./chat-completion-summary'),
9+
LlmEmbedding: require('./embedding')
1810
}

lib/subscribers/ai-monitoring/embedding.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ class AiMonitoringEmbeddingSubscriber extends AiMonitoringSubscriber {
3030
* @param {Context} params.ctx active context
3131
* @param {object} params.request request made to method on a given llm library
3232
* @param {object} params.response response from method on a given llm library
33-
* @param {object} params.err error if present
33+
* @param {object} params.err error object if present
3434
* returns {object} a llm embedding instance for the given LLM
3535
*/
3636
createEmbedding({ ctx, request, response, err }) {

lib/subscribers/google-genai/embed-content.js

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,16 @@ class GoogleGenAIEmbedContentSubscriber extends AiMonitoringEmbeddingSubscriber
1414
this.events = ['asyncEnd']
1515
}
1616

17+
/**
18+
* Creates a Google Gen AI LlmEmbedding instance.
19+
*
20+
* @param {object} params to function
21+
* @param {Context} params.ctx active context
22+
* @param {object} params.request request made to method on a given llm library
23+
* @param {object} params.response response from method on a given llm library
24+
* @param {object} params.err error object if present
25+
* @returns {LlmEmbedding} a llm embedding instance for Google Gen AI
26+
*/
1727
createEmbedding({ ctx, request, response = {}, err }) {
1828
const { segment, transaction } = ctx
1929
return new LlmEmbedding({
@@ -22,7 +32,7 @@ class GoogleGenAIEmbedContentSubscriber extends AiMonitoringEmbeddingSubscriber
2232
transaction,
2333
request,
2434
response,
25-
withError: !!err
35+
error: !!err
2636
})
2737
}
2838

lib/subscribers/google-genai/generate-content.js

Lines changed: 37 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -27,15 +27,38 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
2727
})
2828
}
2929

30-
getMessages({ request, response }) {
31-
// Only take the first response message and append to input messages
30+
/**
31+
* Gets the request/input and response messages from the
32+
* Google Gen AI request and response objects.
33+
* @param {object} params function parameters
34+
* @param {object} params.request Google Gen AI request object
35+
* @param {object} params.response Google Gen AI response object
36+
* @returns {object[]} an array of messages like { content, role }
37+
*/
38+
getMessages({ request, response = {} }) {
3239
// request.contents can be a string or an array of strings
33-
// response.candidates is an array of candidates (choices); we only take the first one
34-
const inputMessages = Array.isArray(request.contents) ? request.contents : [request.contents]
35-
const responseMessage = response?.candidates?.[0]?.content
36-
return responseMessage !== undefined ? [...inputMessages, responseMessage] : inputMessages
40+
const contents = Array.isArray(request.contents) ? request.contents : [request.contents]
41+
const messages = contents.map((item) => {
42+
return { content: item, role: 'user' }
43+
})
44+
const responseContent = response.text ?? response.candidates?.[0]?.content?.parts?.[0]?.text
45+
if (responseContent) {
46+
// Do not push an empty response (likely from an error)
47+
messages.push({ content: responseContent, role: 'assistant' })
48+
}
49+
return messages
3750
}
3851

52+
/**
53+
* Creates a Google Gen AI LlmChatCompletionSummary instance.
54+
*
55+
* @param {object} params to function
56+
* @param {Context} params.ctx active context
57+
* @param {object} params.request request made to method
58+
* @param {object} params.response response from method
59+
* @param {object} [params.err] error object if present
60+
* @returns {object} a llm completion summary instance for Google Gen AI
61+
*/
3962
createCompletionSummary({ ctx, request, response = {}, err }) {
4063
const { transaction, segment } = ctx
4164
return new LlmChatCompletionSummary({
@@ -44,21 +67,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
4467
transaction,
4568
request,
4669
response,
47-
withError: !!err
70+
error: !!err
4871
})
4972
}
5073

5174
createCompletionMessage({ ctx, request, response, index, completionId, message }) {
5275
const { segment, transaction } = ctx
76+
77+
const isResponse = message?.content === response?.text
78+
5379
return new LlmChatCompletionMessage({
5480
agent: this.agent,
5581
segment,
5682
transaction,
5783
request,
5884
response,
59-
index,
85+
sequence: index,
6086
completionId,
61-
message
87+
content: message.content,
88+
role: message.role,
89+
isResponse
6290
})
6391
}
6492
}

0 commit comments

Comments
 (0)