Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 24 additions & 30 deletions lib/llm-events/google-genai/chat-completion-message.js
Original file line number Diff line number Diff line change
@@ -1,42 +1,36 @@
/*
* Copyright 2025 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'
const LlmEvent = require('./event')
const { makeId } = require('../../util/hashes')

module.exports = class LlmChatCompletionMessage extends LlmEvent {
constructor({
agent,
const LlmChatCompletionMessage = require('../chat-completion-message')
const getUsageTokens = require('./get-usage-tokens')

/**
* Encapsulates a Google Gen AI LlmChatCompletionMessage.
*/
module.exports = class GoogleGenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
constructor({ agent,
segment,
transaction,
request = {},
response = {},
index = 0,
message,
sequence = 0,
content, role,
completionId,
transaction
}) {
super({ agent, segment, request, response, transaction })
this.id = makeId(36)
// message?.role is only defined if the message is
// a response and it is always 'model'.
// request messages do not have a role
this.role = message?.role ?? 'user'
this.sequence = index
this.completion_id = completionId
const responseText = response?.text ?? response?.candidates?.[0]?.content?.parts?.[0]?.text
this.is_response = responseText === message?.parts?.[0]?.text

if (agent.config.ai_monitoring.record_content.enabled === true) {
this.content = this.is_response ? message?.parts?.[0]?.text : message
}

// only add timestamp for request/input messages
if (this.is_response === false) {
this.timestamp = segment.timer.start
}
isResponse }) {
super({ agent,
segment,
transaction,
vendor: 'gemini',
sequence,
content,
role,
completionId,
isResponse,
responseModel: response?.modelVersion })

this.setTokenCount(agent, request, response)
}
Expand Down Expand Up @@ -66,7 +60,7 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
return
}

const tokens = this.getUsageTokens(response)
const tokens = getUsageTokens(response)
this.setTokenInCompletionMessage(tokens)
}
}
32 changes: 21 additions & 11 deletions lib/llm-events/google-genai/chat-completion-summary.js
Original file line number Diff line number Diff line change
@@ -1,27 +1,37 @@
/*
* Copyright 2025 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'
const LlmEvent = require('./event')

module.exports = class LlmChatCompletionSummary extends LlmEvent {
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
super({ agent, segment, request, response, responseAttrs: true, transaction })
this.error = withError
const LlmChatCompletionSummary = require('../chat-completion-summary')
const getUsageTokens = require('./get-usage-tokens')

/**
* Encapsulates a Google Gen AI LlmChatCompletionSummary.
*/
module.exports = class GoogleGenAiLlmChatCompletionSummary extends LlmChatCompletionSummary {
constructor({ agent, segment, transaction, request, response, error }) {
super({ agent,
segment,
transaction,
responseModel: response?.modelVersion,
requestModel: request?.model,
finishReason: response?.candidates?.[0]?.finishReason,
maxTokens: request.config?.maxOutputTokens,
temperature: request.config?.temperature,
vendor: 'gemini',
error })

let requestMessagesLength = 0
if (Array.isArray(request?.contents)) {
requestMessagesLength = request.contents.length
} else if (typeof request?.contents === 'string') {
requestMessagesLength = 1
}
this['response.number_of_messages'] = requestMessagesLength + (response?.candidates?.length || 0)
this['response.choices.finish_reason'] = response?.candidates?.[0]?.finishReason
this['request.max_tokens'] = request.config?.maxOutputTokens
this['request.temperature'] = request.config?.temperature

this.timestamp = segment.timer.start
this.setTokens(agent, request, response)
}

Expand Down Expand Up @@ -51,7 +61,7 @@ module.exports = class LlmChatCompletionSummary extends LlmEvent {
return
}

const tokens = this.getUsageTokens(response)
const tokens = getUsageTokens(response)
this.setTokensInResponse(tokens)
}
}
27 changes: 15 additions & 12 deletions lib/llm-events/google-genai/embedding.js
Original file line number Diff line number Diff line change
@@ -1,21 +1,24 @@
/*
* Copyright 2025 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

const LlmEvent = require('./event')
const LlmEmbedding = require('../embedding')

class LlmEmbedding extends LlmEvent {
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
super({ agent, segment, request, response, responseAttrs: true, transaction })
this.error = withError

if (agent.config.ai_monitoring.record_content.enabled === true) {
this.input = request.contents?.toString()
}
/**
* Encapsulates a Google Gen AI LlmEmbedding.
*/
module.exports = class GoogleGenAiLlmEmbedding extends LlmEmbedding {
constructor({ agent, segment, transaction, request = {}, response = {}, error }) {
super({ agent,
segment,
transaction,
requestInput: request?.contents,
requestModel: request?.model,
responseModel: response?.modelVersion,
vendor: 'gemini',
error })
}
}

module.exports = LlmEmbedding
36 changes: 0 additions & 36 deletions lib/llm-events/google-genai/event.js

This file was deleted.

21 changes: 21 additions & 0 deletions lib/llm-events/google-genai/get-usage-tokens.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
/*
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

module.exports = getUsageTokens

/**
* Grabs the prompt, completion, and total token count from the
* given response object.
* @param {object} response Google Gen AI response object
* @returns {object} { promptTokens, completionTokens, totalTokens }
*/
function getUsageTokens(response) {
const promptTokens = Number(response?.usageMetadata?.promptTokenCount)
const completionTokens = Number(response?.usageMetadata?.candidatesTokenCount)
const totalTokens = Number(response?.usageMetadata?.totalTokenCount)
return { promptTokens, completionTokens, totalTokens }
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's no need to assert that response is defined when the function requires it as a parameter. I'd also avoid the cost of "is metadata defined" by:

const { usageMetadata } = response
if (Object.prototype.toString.call(usageMetadata) !== '[object Object]') {
  return { promptTokens: 0, completionTokens: 0, totalTokens: 0 }
}

}
16 changes: 4 additions & 12 deletions lib/llm-events/google-genai/index.js
Original file line number Diff line number Diff line change
@@ -1,18 +1,10 @@
/*
* Copyright 2025 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

const LlmChatCompletionSummary = require('./chat-completion-summary')
const LlmChatCompletionMessage = require('./chat-completion-message')
const LlmEmbedding = require('./embedding')
const LlmErrorMessage = require('../error-message')

module.exports = {
LlmChatCompletionMessage,
LlmChatCompletionSummary,
LlmEmbedding,
LlmErrorMessage
LlmChatCompletionMessage: require('./chat-completion-message'),
LlmChatCompletionSummary: require('./chat-completion-summary'),
LlmEmbedding: require('./embedding')
}
2 changes: 1 addition & 1 deletion lib/subscribers/google-genai/embed-content.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class GoogleGenAIEmbedContentSubscriber extends AiMonitoringEmbeddingSubscriber
transaction,
request,
response,
withError: !!err
error: !!err
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

error: err === true

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

err is an object, updated doc block to reflect

})
}

Expand Down
34 changes: 26 additions & 8 deletions lib/subscribers/google-genai/generate-content.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
})
}

/**
* Gets the request/input and response messages from the
* Google Gen AI request and response objects.
* @param {object} params function parameters
* @param {object} params.request Google Gen AI request object
* @param {object} params.response Google Gen AI response object
* @returns {object[]} an array of messages like { content, role }
*/
getMessages({ request, response }) {
// Only take the first response message and append to input messages
// request.contents can be a string or an array of strings
// response.candidates is an array of candidates (choices); we only take the first one
const inputMessages = Array.isArray(request.contents) ? request.contents : [request.contents]
const responseMessage = response?.candidates?.[0]?.content
return responseMessage !== undefined ? [...inputMessages, responseMessage] : inputMessages
const contents = Array.isArray(request.contents) ? request.contents : [request.contents]
const messages = contents.map((item) => {
return { content: item, role: 'user' }
})
const responseContent = response?.text ?? response?.candidates?.[0]?.content?.parts?.[0]?.text
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The function signature and docblock indicates that response will always be defined. Is that correct?

if (responseContent) {
// Do not push an empty response (likely from an error)
messages.push({ content: responseContent, role: 'assistant' })
}
return messages
}

createCompletionSummary({ ctx, request, response = {}, err }) {
Expand All @@ -44,21 +57,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
transaction,
request,
response,
withError: !!err
error: !!err
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

error: err === true

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

err is an object, updated doc block to reflect

})
}

createCompletionMessage({ ctx, request, response, index, completionId, message }) {
const { segment, transaction } = ctx

const isResponse = message?.content === response?.text

return new LlmChatCompletionMessage({
agent: this.agent,
segment,
transaction,
request,
response,
index,
sequence: index,
completionId,
message
content: message.content,
role: message.role,
isResponse
})
}
}
Expand Down
Loading
Loading