Skip to content

Commit e2eaf2d

Browse files
committed
google-genai Gemini skeleton
1 parent 01d4fa5 commit e2eaf2d

File tree

10 files changed

+209
-0
lines changed

10 files changed

+209
-0
lines changed
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
/*
2+
* Copyright 2025 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const { geminiApiKey } = require('../../lib/symbols')
9+
const {
10+
LlmChatCompletionMessage,
11+
LlmChatCompletionSummary,
12+
LlmEmbedding,
13+
LlmErrorMessage
14+
} = require('../../lib/llm-events/google-genai')
15+
const { RecorderSpec } = require('../../lib/shim/specs')
16+
const { extractLlmContext } = require('../util/llm-utils')
17+
18+
const { AI } = require('../../lib/metrics/names')
19+
const { GEMINI } = AI
20+
let TRACKING_METRIC = GEMINI.TRACKING_PREFIX
21+
22+
23+
module.exports = function initialize(agent, googleGenAi, moduleName, shim) {
24+
if (agent?.config?.ai_monitoring?.enabled !== true) {
25+
shim.logger.debug('config.ai_monitoring.enabled is set to false. Skipping instrumentation.')
26+
return
27+
}
28+
29+
// Update the tracking metric name with the version of the library
30+
// being instrumented. We do not have access to the version when
31+
// initially declaring the variable.
32+
TRACKING_METRIC = `${TRACKING_METRIC}/${shim.pkgVersion}`
33+
}
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
/*
2+
* Copyright 2025 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
const LlmEvent = require('./event')
8+
9+
module.exports = class LlmChatCompletionMessage extends LlmEvent {
10+
constructor({
11+
agent,
12+
segment,
13+
request = {},
14+
response = {},
15+
index = 0,
16+
message,
17+
completionId,
18+
transaction
19+
}) {
20+
super({ agent, segment, request, response, transaction })
21+
this.id = `${response.id}-${index}`
22+
this.role = message?.role
23+
this.sequence = index
24+
this.completion_id = completionId
25+
this.is_response = response?.choices?.[0]?.message?.content === message?.content
26+
27+
if (agent.config.ai_monitoring.record_content.enabled === true) {
28+
this.content = message?.content
29+
}
30+
31+
const tokenCB = agent.llm?.tokenCountCallback
32+
if (typeof tokenCB !== 'function') {
33+
return
34+
}
35+
36+
if (this.is_response) {
37+
this.token_count = tokenCB(this['response.model'], message?.content)
38+
} else {
39+
this.token_count = tokenCB(request.model || request.engine, message?.content)
40+
}
41+
}
42+
}
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
/*
2+
* Copyright 2025 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
const LlmEvent = require('./event')
8+
9+
module.exports = class LlmChatCompletionSummary extends LlmEvent {
10+
constructor({ agent, segment, request = {}, response = {}, withError = false, transaction }) {
11+
super({ agent, segment, request, response, responseAttrs: true, transaction })
12+
this.error = withError
13+
this['request.max_tokens'] = request.max_tokens
14+
this['request.temperature'] = request.temperature
15+
this['response.number_of_messages'] = request?.messages?.length + response?.choices?.length
16+
this['response.choices.finish_reason'] = response?.choices?.[0]?.finish_reason
17+
}
18+
}
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
/*
2+
* Copyright 2025 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmEvent = require('./event')
9+
10+
/**
11+
* @typedef {object} LlmEmbeddingParams
12+
* @augments LlmEventParams
13+
* @property {string} input - The input message for the embedding call
14+
*/
15+
/**
16+
* @type {LlmEmbeddingParams}
17+
*/
18+
const defaultParams = {}
19+
20+
class LlmEmbedding extends LlmEvent {
21+
constructor(params = defaultParams) {
22+
super(params)
23+
const { agent, input } = params
24+
const tokenCb = agent?.llm?.tokenCountCallback
25+
26+
this.input = agent.config?.ai_monitoring?.record_content?.enabled
27+
? input
28+
: undefined
29+
this.error = params.isError
30+
this.duration = params.segment.getDurationInMillis()
31+
32+
// Even if not recording content, we should use the local token counting callback to record token usage
33+
if (typeof tokenCb === 'function') {
34+
this.token_count = tokenCb(this.bedrockCommand.modelId, input)
35+
}
36+
}
37+
}
38+
39+
module.exports = LlmEmbedding
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
/*
2+
* Copyright 2025 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const BaseEvent = require('../event')
9+
const { makeId } = require('../../util/hashes')
10+
11+
module.exports = class LlmEvent extends BaseEvent {
12+
constructor({ agent, segment, request, response, responseAttrs = false, transaction }) {
13+
super()
14+
15+
this.id = makeId(36)
16+
this.appName = agent.config.applications()[0]
17+
this.request_id = response?.headers?.['x-request-id']
18+
this.trace_id = transaction?.traceId
19+
this.span_id = segment?.id
20+
this['response.model'] = response.model // TODO: request.model?
21+
this.vendor = 'gemini'
22+
this.ingest_source = 'Node'
23+
this.metadata = agent
24+
25+
/**
26+
* Used in embedding, and chat completion summary.
27+
* The flag will include response attributes but also
28+
* other attributes from request like model, and api key.
29+
* Lastly, it includes the active span's duration.
30+
*/
31+
if (responseAttrs) {
32+
this['request.model'] = request.model || request.engine
33+
this.duration = segment?.getDurationInMillis()
34+
this.responseAttrs(response)
35+
}
36+
}
37+
38+
responseAttrs(response) {
39+
this['response.organization'] = response?.headers?.['google-organization']
40+
this['response.headers.llmVersion'] = response?.headers?.['gemini-version']
41+
this['response.headers.ratelimitLimitRequests'] =
42+
response?.headers?.['x-ratelimit-limit-requests']
43+
this['response.headers.ratelimitLimitTokens'] = response?.headers?.['x-ratelimit-limit-tokens']
44+
this['response.headers.ratelimitResetTokens'] = response?.headers?.['x-ratelimit-reset-tokens']
45+
this['response.headers.ratelimitRemainingTokens'] =
46+
response?.headers?.['x-ratelimit-remaining-tokens']
47+
this['response.headers.ratelimitRemainingRequests'] =
48+
response?.headers?.['x-ratelimit-remaining-requests']
49+
}
50+
}
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
/*
2+
* Copyright 2025 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
8+
const LlmChatCompletionSummary = require('./chat-completion-summary')
9+
const LlmChatCompletionMessage = require('./chat-completion-message')
10+
const LlmEmbedding = require('./embedding')
11+
const LlmErrorMessage = require('../error-message')
12+
13+
module.exports = {
14+
LlmChatCompletionMessage,
15+
LlmChatCompletionSummary,
16+
LlmEmbedding,
17+
LlmErrorMessage
18+
}

lib/metrics/names.js

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,12 @@ const AI = {
174174
VECTORSTORE: 'Llm/vectorstore'
175175
}
176176

177+
AI.GEMINI = {
178+
TRACKING_PREFIX:`${AI.TRACKING_PREFIX}/Gemini`,
179+
EMBEDDING: `${AI.EMBEDDING}/Gemini`,
180+
COMPLETION: `${AI.COMPLETION}/Gemini`
181+
}
182+
177183
AI.OPENAI = {
178184
TRACKING_PREFIX: `${AI.TRACKING_PREFIX}/OpenAI`,
179185
EMBEDDING: `${AI.EMBEDDING}/OpenAI/create`,
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
// TODO
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
// TODO
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
// TODO

0 commit comments

Comments
 (0)