Skip to content

Commit ffa99fb

Browse files
committed
gemini chat msg and summary
1 parent 1e78751 commit ffa99fb

File tree

14 files changed

+265
-80
lines changed

14 files changed

+265
-80
lines changed

lib/llm-events-new/chat-summary.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ class LlmChatCompletionSummary extends LlmEvent {
4545
this['request.max_tokens'] = maxTokens
4646
this['request.temperature'] = temperature
4747
this['response.number_of_messages'] = numMsgs
48-
this['response.choices.finish_reason'] = finishReason
49-
this['response.organization'] = responseOrg
48+
if (finishReason) this['response.choices.finish_reason'] = finishReason
49+
if (responseOrg) this['response.organization'] = responseOrg
5050
this.timestamp = segment.timer.start
5151
this.duration = segment.getDurationInMillis()
5252
}

lib/llm-events-new/embedding.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class LlmEmbedding extends LlmEvent {
3434
constructor({ agent, segment, transaction, requestId, requestInput, requestModel, responseModel, responseOrg, vendor, error }) {
3535
super({ agent, segment, requestId, responseModel, transaction, vendor, error })
3636
if (requestModel) this['request.model'] = requestModel
37-
if (responseModel) this['response.organization'] = responseOrg
37+
if (responseOrg) this['response.organization'] = responseOrg
3838
this.duration = segment.getDurationInMillis()
3939

4040
if (agent.config.ai_monitoring.record_content.enabled === true) {
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
const LlmChatCompletionMessage = require('../chat-message')
7+
const { getUsageTokens } = require('./utils')
8+
9+
/**
10+
* Encapsulates a Google Gen AI LlmChatCompletionMessage.
11+
*/
12+
class GoogleGenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
13+
constructor({ agent,
14+
segment,
15+
transaction,
16+
request = {},
17+
response = {},
18+
sequence = 0,
19+
content, role,
20+
completionId,
21+
isResponse }) {
22+
super({ agent,
23+
segment,
24+
transaction,
25+
vendor: 'gemini',
26+
sequence,
27+
content,
28+
role,
29+
completionId,
30+
isResponse,
31+
responseModel: response?.modelVersion })
32+
33+
this.setTokenCount(agent, request, response)
34+
}
35+
36+
setTokenCount(agent, request, response) {
37+
const tokenCB = agent.llm?.tokenCountCallback
38+
39+
if (tokenCB) {
40+
const promptContent = typeof request?.contents === 'string'
41+
? request?.contents
42+
: request?.contents?.join(' ')
43+
44+
const responseContent = response?.candidates?.[0]?.content?.parts
45+
const completionContent = responseContent?.map((content) => content.text).join(' ')
46+
47+
if (promptContent && completionContent) {
48+
this.setTokenFromCallback(
49+
{
50+
tokenCB,
51+
reqModel: request.model,
52+
resModel: this['response.model'],
53+
promptContent,
54+
completionContent
55+
}
56+
)
57+
}
58+
return
59+
}
60+
61+
const tokens = getUsageTokens(response)
62+
this.setTokenInCompletionMessage(tokens)
63+
}
64+
}
65+
66+
module.exports = GoogleGenAiLlmChatCompletionMessage
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
const LlmChatCompletionSummary = require('../chat-summary')
7+
const { getUsageTokens } = require('./utils')
8+
9+
/**
10+
* Encapsulates a Google Gen AI LlmChatCompletionSummary.
11+
*/
12+
class GoogleGenAiLlmChatCompletionSummary extends LlmChatCompletionSummary {
13+
constructor({ agent, segment, transaction, request, response, error }) {
14+
super({ agent,
15+
segment,
16+
transaction,
17+
responseModel: response?.modelVersion,
18+
requestModel: request?.model,
19+
finishReason: response?.candidates?.[0]?.finishReason,
20+
maxTokens: request.config?.maxOutputTokens,
21+
temperature: request.config?.temperature,
22+
vendor: 'gemini',
23+
error })
24+
25+
let requestMessagesLength = 0
26+
if (Array.isArray(request?.contents)) {
27+
requestMessagesLength = request.contents.length
28+
} else if (typeof request?.contents === 'string') {
29+
requestMessagesLength = 1
30+
}
31+
this['response.number_of_messages'] = requestMessagesLength + (response?.candidates?.length || 0)
32+
33+
this.setTokens(agent, request, response)
34+
}
35+
36+
setTokens(agent, request, response) {
37+
const tokenCB = agent.llm?.tokenCountCallback
38+
39+
// Prefer callback for prompt and completion tokens; if unavailable, fall back to response data.
40+
if (tokenCB) {
41+
const promptContent = typeof request?.contents === 'string'
42+
? request?.contents
43+
: request?.contents?.join(' ')
44+
45+
const responseContent = response?.candidates?.[0]?.content?.parts
46+
const completionContent = responseContent?.map((content) => content.text).join(' ')
47+
48+
if (promptContent && completionContent) {
49+
this.setTokenUsageFromCallback(
50+
{
51+
tokenCB,
52+
reqModel: request.model,
53+
resModel: this['response.model'],
54+
promptContent,
55+
completionContent
56+
}
57+
)
58+
}
59+
return
60+
}
61+
62+
const tokens = getUsageTokens(response)
63+
this.setTokensInResponse(tokens)
64+
}
65+
}
66+
67+
module.exports = GoogleGenAiLlmChatCompletionSummary

lib/llm-events-new/google-genai/embedding.js

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,17 @@
55

66
const LlmEmbedding = require('../embedding')
77

8+
/**
9+
* Encapsulates a Google Gen AI LlmEmbedding.
10+
*/
811
class GoogleGenAiLlmEmbedding extends LlmEmbedding {
912
constructor({ agent, segment, transaction, request = {}, response = {}, error }) {
1013
super({ agent,
1114
segment,
1215
transaction,
1316
requestInput: request?.contents,
1417
requestModel: request?.model,
18+
responseModel: response?.modelVersion,
1519
vendor: 'gemini',
1620
error })
1721
}

lib/llm-events-new/google-genai/index.js

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,12 @@
55

66
'use strict'
77

8-
// const LlmChatCompletionMessage = require('./chat-message')
9-
// const LlmChatCompletionSummary = require('./chat-summary')
8+
const LlmChatCompletionMessage = require('./chat-message')
9+
const LlmChatCompletionSummary = require('./chat-summary')
1010
const LlmEmbedding = require('./embedding')
1111

1212
module.exports = {
13-
// LlmChatCompletionMessage,
14-
// LlmChatCompletionSummary,
13+
LlmChatCompletionMessage,
14+
LlmChatCompletionSummary,
1515
LlmEmbedding
1616
}
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
/*
2+
* Copyright 2026 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
function getUsageTokens(response) {
7+
const promptTokens = Number(response?.usageMetadata?.promptTokenCount)
8+
const completionTokens = Number(response?.usageMetadata?.candidatesTokenCount)
9+
const totalTokens = Number(response?.usageMetadata?.totalTokenCount)
10+
return { promptTokens, completionTokens, totalTokens }
11+
}
12+
13+
module.exports = { getUsageTokens }

lib/subscribers/google-genai/generate-content.js

Lines changed: 27 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
const { AiMonitoringChatSubscriber } = require('../ai-monitoring')
77
const { AI } = require('../../metrics/names')
88
const { GEMINI } = AI
9-
const { LlmChatCompletionSummary, LlmChatCompletionMessage } = require('#agentlib/llm-events/google-genai/index.js')
9+
const { LlmChatCompletionSummary, LlmChatCompletionMessage } = require('#agentlib/llm-events-new/google-genai/index.js')
1010

1111
class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
1212
constructor({ agent, logger, channelName = 'nr_generateContentInternal' }) {
@@ -27,13 +27,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
2727
})
2828
}
2929

30+
/**
31+
* Gets the request/input and response messages from the
32+
* Google Gen AI request and response objects.
33+
* @param {object} params function parameters
34+
* @param {object} params.request Google Gen AI request object
35+
* @param {object} params.response Google Gen AI response object
36+
* @returns {object[]} an array of messages like { content, role }
37+
*/
3038
getMessages({ request, response }) {
31-
// Only take the first response message and append to input messages
3239
// request.contents can be a string or an array of strings
33-
// response.candidates is an array of candidates (choices); we only take the first one
34-
const inputMessages = Array.isArray(request.contents) ? request.contents : [request.contents]
35-
const responseMessage = response?.candidates?.[0]?.content
36-
return responseMessage !== undefined ? [...inputMessages, responseMessage] : inputMessages
40+
const contents = Array.isArray(request.contents) ? request.contents : [request.contents]
41+
const messages = contents.map((item) => {
42+
return { content: item, role: 'user' }
43+
})
44+
const responseContent = response?.text ?? response?.candidates?.[0]?.content?.parts?.[0]?.text
45+
if (responseContent) {
46+
// Do not push an empty response (likely from an error)
47+
messages.push({ content: responseContent, role: 'assistant' })
48+
}
49+
return messages
3750
}
3851

3952
createCompletionSummary({ ctx, request, response = {}, err }) {
@@ -44,21 +57,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
4457
transaction,
4558
request,
4659
response,
47-
withError: !!err
60+
error: !!err
4861
})
4962
}
5063

5164
createCompletionMessage({ ctx, request, response, index, completionId, message }) {
5265
const { segment, transaction } = ctx
66+
67+
const isResponse = message?.content === response?.text
68+
5369
return new LlmChatCompletionMessage({
5470
agent: this.agent,
5571
segment,
5672
transaction,
5773
request,
5874
response,
59-
index,
75+
sequence: index,
6076
completionId,
61-
message
77+
content: message.content,
78+
role: message.role,
79+
isResponse
6280
})
6381
}
6482
}

0 commit comments

Comments
 (0)