Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 24 additions & 30 deletions lib/llm-events/google-genai/chat-completion-message.js
Original file line number Diff line number Diff line change
@@ -1,42 +1,36 @@
/*
* Copyright 2025 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'
const LlmEvent = require('./event')
const { makeId } = require('../../util/hashes')

module.exports = class LlmChatCompletionMessage extends LlmEvent {
constructor({
agent,
const LlmChatCompletionMessage = require('../chat-completion-message')
const getUsageTokens = require('./get-usage-tokens')

/**
* Encapsulates a Google Gen AI LlmChatCompletionMessage.
*/
module.exports = class GoogleGenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
constructor({ agent,
segment,
transaction,
request = {},
response = {},
index = 0,
message,
sequence = 0,
content, role,
completionId,
transaction
}) {
super({ agent, segment, request, response, transaction })
this.id = makeId(36)
// message?.role is only defined if the message is
// a response and it is always 'model'.
// request messages do not have a role
this.role = message?.role ?? 'user'
this.sequence = index
this.completion_id = completionId
const responseText = response?.text ?? response?.candidates?.[0]?.content?.parts?.[0]?.text
this.is_response = responseText === message?.parts?.[0]?.text

if (agent.config.ai_monitoring.record_content.enabled === true) {
this.content = this.is_response ? message?.parts?.[0]?.text : message
}

// only add timestamp for request/input messages
if (this.is_response === false) {
this.timestamp = segment.timer.start
}
isResponse }) {
super({ agent,
segment,
transaction,
vendor: 'gemini',
sequence,
content,
role,
completionId,
isResponse,
responseModel: response?.modelVersion })

this.setTokenCount(agent, request, response)
}
Expand Down Expand Up @@ -66,7 +60,7 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
return
}

const tokens = this.getUsageTokens(response)
const tokens = getUsageTokens(response)
this.setTokenInCompletionMessage(tokens)
}
}
42 changes: 31 additions & 11 deletions lib/llm-events/google-genai/chat-completion-summary.js
Original file line number Diff line number Diff line change
@@ -1,27 +1,47 @@
/*
* Copyright 2025 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'
const LlmEvent = require('./event')

module.exports = class LlmChatCompletionSummary extends LlmEvent {
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
super({ agent, segment, request, response, responseAttrs: true, transaction })
this.error = withError
const LlmChatCompletionSummary = require('../chat-completion-summary')
const getUsageTokens = require('./get-usage-tokens')

/**
* Encapsulates a Google Gen AI LlmChatCompletionSummary.
*/
module.exports = class GoogleGenAiLlmChatCompletionSummary extends LlmChatCompletionSummary {
/**
*
* @param {object} params Constructor parameters
* @param {Agent} params.agent New Relic agent instance
* @param {TraceSegment} params.segment Current segment
* @param {Transaction} params.transaction Current and active transaction
* @param {object} params.request Google Gen AI request object
* @param {object} params.response Google Gen AI response object
* @param {boolean} [params.error] Set to `true` if an error occurred
*/
constructor({ agent, segment, transaction, request, response, error }) {
super({ agent,
segment,
transaction,
responseModel: response?.modelVersion,
requestModel: request?.model,
finishReason: response?.candidates?.[0]?.finishReason,
maxTokens: request.config?.maxOutputTokens,
temperature: request.config?.temperature,
vendor: 'gemini',
error })

let requestMessagesLength = 0
if (Array.isArray(request?.contents)) {
requestMessagesLength = request.contents.length
} else if (typeof request?.contents === 'string') {
requestMessagesLength = 1
}
this['response.number_of_messages'] = requestMessagesLength + (response?.candidates?.length || 0)
this['response.choices.finish_reason'] = response?.candidates?.[0]?.finishReason
this['request.max_tokens'] = request.config?.maxOutputTokens
this['request.temperature'] = request.config?.temperature

this.timestamp = segment.timer.start
this.setTokens(agent, request, response)
}

Expand Down Expand Up @@ -51,7 +71,7 @@ module.exports = class LlmChatCompletionSummary extends LlmEvent {
return
}

const tokens = this.getUsageTokens(response)
const tokens = getUsageTokens(response)
this.setTokensInResponse(tokens)
}
}
37 changes: 25 additions & 12 deletions lib/llm-events/google-genai/embedding.js
Original file line number Diff line number Diff line change
@@ -1,21 +1,34 @@
/*
* Copyright 2025 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

const LlmEvent = require('./event')
const LlmEmbedding = require('../embedding')

class LlmEmbedding extends LlmEvent {
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
super({ agent, segment, request, response, responseAttrs: true, transaction })
this.error = withError

if (agent.config.ai_monitoring.record_content.enabled === true) {
this.input = request.contents?.toString()
}
/**
* Encapsulates a Google Gen AI LlmEmbedding.
*/
module.exports = class GoogleGenAiLlmEmbedding extends LlmEmbedding {
/**
*
* @param {object} params Constructor params
* @param {Agent} params.agent New Relic agent instance
* @param {TraceSegment} params.segment Current segment
* @param {Transaction} params.transaction Current and active transaction
* @param {object} params.request Google Gen AI request object
* @param {object} params.response Google Gen AI response object
* @param {boolean} [params.error] Set to true if an error occurred
*/
constructor({ agent, segment, transaction, request = {}, response = {}, error }) {
super({ agent,
segment,
transaction,
requestInput: request?.contents,
requestModel: request?.model,
responseModel: response?.modelVersion,
vendor: 'gemini',
error })
}
}

module.exports = LlmEmbedding
36 changes: 0 additions & 36 deletions lib/llm-events/google-genai/event.js

This file was deleted.

26 changes: 26 additions & 0 deletions lib/llm-events/google-genai/get-usage-tokens.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/*
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

module.exports = getUsageTokens

/**
* Grabs the prompt, completion, and total token count from the
* given response object.
* @param {object} response Google Gen AI response object
* @returns {object} { promptTokens, completionTokens, totalTokens }
*/
function getUsageTokens(response) {
const { usageMetadata } = response
if (!usageMetadata) {
return { promptTokens: 0, completionTokens: 0, totalTokens: 0 }
}
return {
promptTokens: Number(usageMetadata.promptTokenCount),
completionTokens: Number(usageMetadata.candidatesTokenCount),
totalTokens: Number(usageMetadata.totalTokenCount)
}
}
16 changes: 4 additions & 12 deletions lib/llm-events/google-genai/index.js
Original file line number Diff line number Diff line change
@@ -1,18 +1,10 @@
/*
* Copyright 2025 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

const LlmChatCompletionSummary = require('./chat-completion-summary')
const LlmChatCompletionMessage = require('./chat-completion-message')
const LlmEmbedding = require('./embedding')
const LlmErrorMessage = require('../error-message')

module.exports = {
LlmChatCompletionMessage,
LlmChatCompletionSummary,
LlmEmbedding,
LlmErrorMessage
LlmChatCompletionMessage: require('./chat-completion-message'),
LlmChatCompletionSummary: require('./chat-completion-summary'),
LlmEmbedding: require('./embedding')
}
2 changes: 1 addition & 1 deletion lib/subscribers/ai-monitoring/embedding.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class AiMonitoringEmbeddingSubscriber extends AiMonitoringSubscriber {
* @param {Context} params.ctx active context
* @param {object} params.request request made to method on a given llm library
* @param {object} params.response response from method on a given llm library
* @param {object} params.err error if present
* @param {object} params.err error object if present
* returns {object} a llm embedding instance for the given LLM
*/
createEmbedding({ ctx, request, response, err }) {
Expand Down
12 changes: 11 additions & 1 deletion lib/subscribers/google-genai/embed-content.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,16 @@ class GoogleGenAIEmbedContentSubscriber extends AiMonitoringEmbeddingSubscriber
this.events = ['asyncEnd']
}

/**
* Creates a Google Gen AI LlmEmbedding instance.
*
* @param {object} params to function
* @param {Context} params.ctx active context
* @param {object} params.request request made to method on a given llm library
* @param {object} params.response response from method on a given llm library
* @param {object} params.err error object if present
* @returns {LlmEmbedding} a llm embedding instance for Google Gen AI
*/
createEmbedding({ ctx, request, response = {}, err }) {
const { segment, transaction } = ctx
return new LlmEmbedding({
Expand All @@ -22,7 +32,7 @@ class GoogleGenAIEmbedContentSubscriber extends AiMonitoringEmbeddingSubscriber
transaction,
request,
response,
withError: !!err
error: !!err
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

error: err === true

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

err is an object, updated doc block to reflect

})
}

Expand Down
46 changes: 37 additions & 9 deletions lib/subscribers/google-genai/generate-content.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,38 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
})
}

getMessages({ request, response }) {
// Only take the first response message and append to input messages
/**
* Gets the request/input and response messages from the
* Google Gen AI request and response objects.
* @param {object} params function parameters
* @param {object} params.request Google Gen AI request object
* @param {object} params.response Google Gen AI response object
* @returns {object[]} an array of messages like { content, role }
*/
getMessages({ request, response = {} }) {
// request.contents can be a string or an array of strings
// response.candidates is an array of candidates (choices); we only take the first one
const inputMessages = Array.isArray(request.contents) ? request.contents : [request.contents]
const responseMessage = response?.candidates?.[0]?.content
return responseMessage !== undefined ? [...inputMessages, responseMessage] : inputMessages
const contents = Array.isArray(request.contents) ? request.contents : [request.contents]
const messages = contents.map((item) => {
return { content: item, role: 'user' }
})
const responseContent = response.text ?? response.candidates?.[0]?.content?.parts?.[0]?.text
if (responseContent) {
// Do not push an empty response (likely from an error)
messages.push({ content: responseContent, role: 'assistant' })
}
return messages
}

/**
* Creates a Google Gen AI LlmChatCompletionSummary instance.
*
* @param {object} params to function
* @param {Context} params.ctx active context
* @param {object} params.request request made to method
* @param {object} params.response response from method
* @param {object} [params.err] error object if present
* @returns {object} a llm completion summary instance for Google Gen AI
*/
createCompletionSummary({ ctx, request, response = {}, err }) {
const { transaction, segment } = ctx
return new LlmChatCompletionSummary({
Expand All @@ -44,21 +67,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber {
transaction,
request,
response,
withError: !!err
error: !!err
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

error: err === true

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

err is an object, updated doc block to reflect

})
}

createCompletionMessage({ ctx, request, response, index, completionId, message }) {
const { segment, transaction } = ctx

const isResponse = message?.content === response?.text

return new LlmChatCompletionMessage({
agent: this.agent,
segment,
transaction,
request,
response,
index,
sequence: index,
completionId,
message
content: message.content,
role: message.role,
isResponse
})
}
}
Expand Down
Loading
Loading