-
Notifications
You must be signed in to change notification settings - Fork 419
refactor: Google Gen AI LLM event refactor #3748
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,21 +1,24 @@ | ||
| /* | ||
| * Copyright 2025 New Relic Corporation. All rights reserved. | ||
| * Copyright 2026 New Relic Corporation. All rights reserved. | ||
| * SPDX-License-Identifier: Apache-2.0 | ||
| */ | ||
|
|
||
| 'use strict' | ||
|
|
||
| const LlmEvent = require('./event') | ||
| const LlmEmbedding = require('../embedding') | ||
|
|
||
| class LlmEmbedding extends LlmEvent { | ||
| constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) { | ||
| super({ agent, segment, request, response, responseAttrs: true, transaction }) | ||
| this.error = withError | ||
|
|
||
| if (agent.config.ai_monitoring.record_content.enabled === true) { | ||
| this.input = request.contents?.toString() | ||
| } | ||
| /** | ||
| * Encapsulates a Google Gen AI LlmEmbedding. | ||
| */ | ||
| module.exports = class GoogleGenAiLlmEmbedding extends LlmEmbedding { | ||
| constructor({ agent, segment, transaction, request = {}, response = {}, error }) { | ||
| super({ agent, | ||
| segment, | ||
| transaction, | ||
| requestInput: request?.contents, | ||
| requestModel: request?.model, | ||
| responseModel: response?.modelVersion, | ||
| vendor: 'gemini', | ||
| error }) | ||
| } | ||
| } | ||
|
|
||
| module.exports = LlmEmbedding |
This file was deleted.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,21 @@ | ||
| /* | ||
| * Copyright 2026 New Relic Corporation. All rights reserved. | ||
| * SPDX-License-Identifier: Apache-2.0 | ||
| */ | ||
|
|
||
| 'use strict' | ||
|
|
||
| module.exports = getUsageTokens | ||
|
|
||
| /** | ||
| * Grabs the prompt, completion, and total token count from the | ||
| * given response object. | ||
| * @param {object} response Google Gen AI response object | ||
| * @returns {object} { promptTokens, completionTokens, totalTokens } | ||
| */ | ||
| function getUsageTokens(response) { | ||
| const promptTokens = Number(response?.usageMetadata?.promptTokenCount) | ||
| const completionTokens = Number(response?.usageMetadata?.candidatesTokenCount) | ||
| const totalTokens = Number(response?.usageMetadata?.totalTokenCount) | ||
| return { promptTokens, completionTokens, totalTokens } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,18 +1,10 @@ | ||
| /* | ||
| * Copyright 2025 New Relic Corporation. All rights reserved. | ||
| * Copyright 2026 New Relic Corporation. All rights reserved. | ||
| * SPDX-License-Identifier: Apache-2.0 | ||
| */ | ||
|
|
||
| 'use strict' | ||
|
|
||
| const LlmChatCompletionSummary = require('./chat-completion-summary') | ||
| const LlmChatCompletionMessage = require('./chat-completion-message') | ||
| const LlmEmbedding = require('./embedding') | ||
| const LlmErrorMessage = require('../error-message') | ||
|
|
||
| module.exports = { | ||
| LlmChatCompletionMessage, | ||
| LlmChatCompletionSummary, | ||
| LlmEmbedding, | ||
| LlmErrorMessage | ||
| LlmChatCompletionMessage: require('./chat-completion-message'), | ||
| LlmChatCompletionSummary: require('./chat-completion-summary'), | ||
| LlmEmbedding: require('./embedding') | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,7 +22,7 @@ class GoogleGenAIEmbedContentSubscriber extends AiMonitoringEmbeddingSubscriber | |
| transaction, | ||
| request, | ||
| response, | ||
| withError: !!err | ||
| error: !!err | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. error: err === true
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. err is an object, updated doc block to reflect |
||
| }) | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,13 +27,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber { | |
| }) | ||
| } | ||
|
|
||
| /** | ||
| * Gets the request/input and response messages from the | ||
| * Google Gen AI request and response objects. | ||
| * @param {object} params function parameters | ||
| * @param {object} params.request Google Gen AI request object | ||
| * @param {object} params.response Google Gen AI response object | ||
| * @returns {object[]} an array of messages like { content, role } | ||
| */ | ||
| getMessages({ request, response }) { | ||
| // Only take the first response message and append to input messages | ||
| // request.contents can be a string or an array of strings | ||
| // response.candidates is an array of candidates (choices); we only take the first one | ||
| const inputMessages = Array.isArray(request.contents) ? request.contents : [request.contents] | ||
| const responseMessage = response?.candidates?.[0]?.content | ||
| return responseMessage !== undefined ? [...inputMessages, responseMessage] : inputMessages | ||
| const contents = Array.isArray(request.contents) ? request.contents : [request.contents] | ||
| const messages = contents.map((item) => { | ||
| return { content: item, role: 'user' } | ||
| }) | ||
| const responseContent = response?.text ?? response?.candidates?.[0]?.content?.parts?.[0]?.text | ||
|
||
| if (responseContent) { | ||
| // Do not push an empty response (likely from an error) | ||
| messages.push({ content: responseContent, role: 'assistant' }) | ||
| } | ||
| return messages | ||
| } | ||
|
|
||
| createCompletionSummary({ ctx, request, response = {}, err }) { | ||
|
|
@@ -44,21 +57,26 @@ class GoogleGenAIGenerateContentSubscriber extends AiMonitoringChatSubscriber { | |
| transaction, | ||
| request, | ||
| response, | ||
| withError: !!err | ||
| error: !!err | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
| }) | ||
| } | ||
|
|
||
| createCompletionMessage({ ctx, request, response, index, completionId, message }) { | ||
| const { segment, transaction } = ctx | ||
|
|
||
| const isResponse = message?.content === response?.text | ||
|
|
||
| return new LlmChatCompletionMessage({ | ||
| agent: this.agent, | ||
| segment, | ||
| transaction, | ||
| request, | ||
| response, | ||
| index, | ||
| sequence: index, | ||
| completionId, | ||
| message | ||
| content: message.content, | ||
| role: message.role, | ||
| isResponse | ||
| }) | ||
| } | ||
| } | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There's no need to assert that response is defined when the function requires it as a parameter. I'd also avoid the cost of "is metadata defined" by: