Skip to content

Commit 7ebc8ee

Browse files
committed
Google Gen AI LLM refactor
1 parent b2612ec commit 7ebc8ee

File tree

15 files changed

+198
-186
lines changed

15 files changed

+198
-186
lines changed
Lines changed: 24 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,42 +1,36 @@
11
/*
2-
* Copyright 2025 New Relic Corporation. All rights reserved.
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

66
'use strict'
7-
const LlmEvent = require('./event')
8-
const { makeId } = require('../../util/hashes')
97

10-
module.exports = class LlmChatCompletionMessage extends LlmEvent {
11-
constructor({
12-
agent,
8+
const LlmChatCompletionMessage = require('../chat-completion-message')
9+
const getUsageTokens = require('./get-usage-tokens')
10+
11+
/**
12+
* Encapsulates a Google Gen AI LlmChatCompletionMessage.
13+
*/
14+
module.exports = class GoogleGenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
15+
constructor({ agent,
1316
segment,
17+
transaction,
1418
request = {},
1519
response = {},
16-
index = 0,
17-
message,
20+
sequence = 0,
21+
content, role,
1822
completionId,
19-
transaction
20-
}) {
21-
super({ agent, segment, request, response, transaction })
22-
this.id = makeId(36)
23-
// message?.role is only defined if the message is
24-
// a response and it is always 'model'.
25-
// request messages do not have a role
26-
this.role = message?.role ?? 'user'
27-
this.sequence = index
28-
this.completion_id = completionId
29-
const responseText = response?.text ?? response?.candidates?.[0]?.content?.parts?.[0]?.text
30-
this.is_response = responseText === message?.parts?.[0]?.text
31-
32-
if (agent.config.ai_monitoring.record_content.enabled === true) {
33-
this.content = this.is_response ? message?.parts?.[0]?.text : message
34-
}
35-
36-
// only add timestamp for request/input messages
37-
if (this.is_response === false) {
38-
this.timestamp = segment.timer.start
39-
}
23+
isResponse }) {
24+
super({ agent,
25+
segment,
26+
transaction,
27+
vendor: 'gemini',
28+
sequence,
29+
content,
30+
role,
31+
completionId,
32+
isResponse,
33+
responseModel: response?.modelVersion })
4034

4135
this.setTokenCount(agent, request, response)
4236
}
@@ -66,7 +60,7 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
6660
return
6761
}
6862

69-
const tokens = this.getUsageTokens(response)
63+
const tokens = getUsageTokens(response)
7064
this.setTokenInCompletionMessage(tokens)
7165
}
7266
}

lib/llm-events/google-genai/chat-completion-summary.js

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,37 @@
11
/*
2-
* Copyright 2025 New Relic Corporation. All rights reserved.
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

66
'use strict'
7-
const LlmEvent = require('./event')
87

9-
module.exports = class LlmChatCompletionSummary extends LlmEvent {
10-
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
11-
super({ agent, segment, request, response, responseAttrs: true, transaction })
12-
this.error = withError
8+
const LlmChatCompletionSummary = require('../chat-completion-summary')
9+
const getUsageTokens = require('./get-usage-tokens')
10+
11+
/**
12+
* Encapsulates a Google Gen AI LlmChatCompletionSummary.
13+
*/
14+
module.exports = class GoogleGenAiLlmChatCompletionSummary extends LlmChatCompletionSummary {
15+
constructor({ agent, segment, transaction, request, response, error }) {
16+
super({ agent,
17+
segment,
18+
transaction,
19+
responseModel: response?.modelVersion,
20+
requestModel: request?.model,
21+
finishReason: response?.candidates?.[0]?.finishReason,
22+
maxTokens: request.config?.maxOutputTokens,
23+
temperature: request.config?.temperature,
24+
vendor: 'gemini',
25+
error })
26+
1327
let requestMessagesLength = 0
1428
if (Array.isArray(request?.contents)) {
1529
requestMessagesLength = request.contents.length
1630
} else if (typeof request?.contents === 'string') {
1731
requestMessagesLength = 1
1832
}
1933
this['response.number_of_messages'] = requestMessagesLength + (response?.candidates?.length || 0)
20-
this['response.choices.finish_reason'] = response?.candidates?.[0]?.finishReason
21-
this['request.max_tokens'] = request.config?.maxOutputTokens
22-
this['request.temperature'] = request.config?.temperature
2334

24-
this.timestamp = segment.timer.start
2535
this.setTokens(agent, request, response)
2636
}
2737

@@ -51,7 +61,7 @@ module.exports = class LlmChatCompletionSummary extends LlmEvent {
5161
return
5262
}
5363

54-
const tokens = this.getUsageTokens(response)
64+
const tokens = getUsageTokens(response)
5565
this.setTokensInResponse(tokens)
5666
}
5767
}

lib/llm-events/google-genai/embedding.js

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,24 @@
11
/*
2-
* Copyright 2025 New Relic Corporation. All rights reserved.
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

66
'use strict'
77

8-
const LlmEvent = require('./event')
8+
const LlmEmbedding = require('../embedding')
99

10-
class LlmEmbedding extends LlmEvent {
11-
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
12-
super({ agent, segment, request, response, responseAttrs: true, transaction })
13-
this.error = withError
14-
15-
if (agent.config.ai_monitoring.record_content.enabled === true) {
16-
this.input = request.contents?.toString()
17-
}
10+
/**
11+
* Encapsulates a Google Gen AI LlmEmbedding.
12+
*/
13+
module.exports = class GoogleGenAiLlmEmbedding extends LlmEmbedding {
14+
constructor({ agent, segment, transaction, request = {}, response = {}, error }) {
15+
super({ agent,
16+
segment,
17+
transaction,
18+
requestInput: request?.contents,
19+
requestModel: request?.model,
20+
responseModel: response?.modelVersion,
21+
vendor: 'gemini',
22+
error })
1823
}
1924
}
20-
21-
module.exports = LlmEmbedding

lib/llm-events/google-genai/event.js

Lines changed: 0 additions & 36 deletions
This file was deleted.
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
module.exports = getUsageTokens
9+
10+
/**
11+
* Grabs the prompt, completion, and total token count from the
12+
* given response object.
13+
* @param {object} response Google Gen AI response object
14+
* @returns {object} { promptTokens, completionTokens, totalTokens }
15+
*/
16+
function getUsageTokens(response) {
17+
const promptTokens = Number(response?.usageMetadata?.promptTokenCount)
18+
const completionTokens = Number(response?.usageMetadata?.candidatesTokenCount)
19+
const totalTokens = Number(response?.usageMetadata?.totalTokenCount)
20+
return { promptTokens, completionTokens, totalTokens }
21+
}
Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,10 @@
11
/*
2-
* Copyright 2025 New Relic Corporation. All rights reserved.
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6-
'use strict'
7-
8-
const LlmChatCompletionSummary = require('./chat-completion-summary')
9-
const LlmChatCompletionMessage = require('./chat-completion-message')
10-
const LlmEmbedding = require('./embedding')
11-
const LlmErrorMessage = require('../error-message')
12-
136
module.exports = {
14-
LlmChatCompletionMessage,
15-
LlmChatCompletionSummary,
16-
LlmEmbedding,
17-
LlmErrorMessage
7+
LlmChatCompletionMessage: require('./chat-completion-message'),
8+
LlmChatCompletionSummary: require('./chat-completion-summary'),
9+
LlmEmbedding: require('./embedding')
1810
}

lib/subscribers/google-genai/embed-content.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class GoogleGenAIEmbedContentSubscriber extends AiMonitoringEmbeddingSubscriber
2222
transaction,
2323
request,
2424
response,
25-
withError: !!err
25+
error: !!err
2626
})
2727
}
2828

lib/subscribers/google-genai/generate-content.js

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
2727
})
2828
}
2929

30+
/**
31+
* Gets the request/input and response messages from the
32+
* Google Gen AI request and response objects.
33+
* @param {object} params function parameters
34+
* @param {object} params.request Google Gen AI request object
35+
* @param {object} params.response Google Gen AI response object
36+
* @returns {object[]} an array of messages like { content, role }
37+
*/
3038
getMessages({ request, response }) {
31-
// Only take the first response message and append to input messages
3239
// request.contents can be a string or an array of strings
33-
// response.candidates is an array of candidates (choices); we only take the first one
34-
const inputMessages = Array.isArray(request.contents) ? request.contents : [request.contents]
35-
const responseMessage = response?.candidates?.[0]?.content
36-
return responseMessage !== undefined ? [...inputMessages, responseMessage] : inputMessages
40+
const contents = Array.isArray(request.contents) ? request.contents : [request.contents]
41+
const messages = contents.map((item) => {
42+
return { content: item, role: 'user' }
43+
})
44+
const responseContent = response?.text ?? response?.candidates?.[0]?.content?.parts?.[0]?.text
45+
if (responseContent) {
46+
// Do not push an empty response (likely from an error)
47+
messages.push({ content: responseContent, role: 'assistant' })
48+
}
49+
return messages
3750
}
3851

3952
createCompletionSummary({ ctx, request, response = {}, err }) {
@@ -44,21 +57,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
4457
transaction,
4558
request,
4659
response,
47-
withError: !!err
60+
error: !!err
4861
})
4962
}
5063

5164
createCompletionMessage({ ctx, request, response, index, completionId, message }) {
5265
const { segment, transaction } = ctx
66+
67+
const isResponse = message?.content === response?.text
68+
5369
return new LlmChatCompletionMessage({
5470
agent: this.agent,
5571
segment,
5672
transaction,
5773
request,
5874
response,
59-
index,
75+
sequence: index,
6076
completionId,
61-
message
77+
content: message.content,
78+
role: message.role,
79+
isResponse
6280
})
6381
}
6482
}

0 commit comments

Comments
 (0)