|
5 | 5 |
|
6 | 6 | 'use strict' |
7 | 7 |
|
| 8 | +const { geminiApiKey, googleGenAiHeaders } = require('../../../lib/symbols') |
8 | 9 | const { |
9 | 10 | LlmChatCompletionMessage, |
10 | 11 | LlmChatCompletionSummary, |
@@ -119,42 +120,67 @@ function recordChatCompletionMessages({ |
119 | 120 | delete response.headers |
120 | 121 | } |
121 | 122 |
|
| 123 | +/** |
| 124 | + * Adds apiKey and response headers to the active segment |
| 125 | + * on symbols |
| 126 | + * |
| 127 | + * @param {object} params input params |
| 128 | + * @param {Shim} params.shim instance of shim |
| 129 | + * @param {object} params.result from openai request |
| 130 | + * @param {string} params.apiKey api key from openai client |
| 131 | + */ |
| 132 | +function decorateSegment({ shim, result, apiKey }) { |
| 133 | + const segment = shim.getActiveSegment() |
| 134 | + |
| 135 | + if (segment) { |
| 136 | + segment[geminiApiKey] = apiKey |
| 137 | + |
| 138 | + const headers = result?.response?.headers |
| 139 | + ? Object.fromEntries(result.response.headers) |
| 140 | + : { ...result?.headers } |
| 141 | + segment[googleGenAiHeaders] = headers |
| 142 | + } |
| 143 | +} |
| 144 | + |
122 | 145 | module.exports = function initialize(agent, googleGenAi, moduleName, shim) { |
123 | 146 | if (agent?.config?.ai_monitoring?.enabled !== true) { |
124 | | - shim.logger.debug('config.ai_monitoring.enabled is set to false. Skipping instrumentation.') |
| 147 | + shim.logger.debug('config.ai_monitoring.enabled is set to false.') |
125 | 148 | return |
126 | 149 | } |
127 | 150 | // Update the tracking metric name with the version of the library |
128 | 151 | // being instrumented. We do not have access to the version when |
129 | 152 | // initially declaring the variable. |
130 | 153 | TRACKING_METRIC = `${TRACKING_METRIC}/${shim.pkgVersion}` |
131 | 154 |
|
132 | | - const models = googleGenAi.Models |
133 | | - // TODO: why is generateContentInternal and generateContentStreamInternal |
134 | | - // exposed but not generateContent or generateContentStream? |
135 | | - |
136 | 155 | /** |
137 | 156 | * Instrumentation is only done to get the response headers and attach |
138 | 157 | * to the active segment as @google/genai hides the headers from the functions |
139 | 158 | * we are trying to instrument. |
140 | 159 | * see: https://github.com/googleapis/js-genai/blob/cd0454862b4a0251d2606eeca8500b3b76004944/src/models.ts#L200 |
141 | | - * |
142 | | - * TODO: Do we even need the headers? |
143 | 160 | */ |
144 | | - shim.wrap(models.prototype, 'processParamsForMcpUsage', function wrapProcessParamsForMcpUsage(shim, original) { |
145 | | - return async function wrappedProcessParamsForMcpUsage(...args) { |
146 | | - // Call the original function and capture the result |
147 | | - const newParams = await original.apply(this, arguments) |
148 | | - |
149 | | - // Inspect the headers in newParams |
150 | | - const headers = newParams?.config?.httpOptions?.headers |
151 | | - shim.logger.debug('Headers in newParams:', headers) |
152 | | - |
153 | | - // Return the modified newParams |
154 | | - return newParams |
| 161 | + const httpResponse = googleGenAi.HttpResponse |
| 162 | + shim.wrap(httpResponse.prototype, 'json', function wrapJson(shim, func) { |
| 163 | + return async function wrappedJson() { |
| 164 | + const response = func.apply(this, arguments) |
| 165 | + if (await response) { |
| 166 | + // TODO: this does get some headers but not 'x-goog*' |
| 167 | + const headers = this.headers |
| 168 | + if (headers) { |
| 169 | + decorateSegment({ |
| 170 | + shim, |
| 171 | + result: response, |
| 172 | + apiKey: this[geminiApiKey] |
| 173 | + }) |
| 174 | + } |
| 175 | + } |
| 176 | + return response |
155 | 177 | } |
156 | 178 | }) |
157 | 179 |
|
| 180 | + const models = googleGenAi.Models |
| 181 | + // TODO: why is generateContentInternal and generateContentStreamInternal |
| 182 | + // exposed but not generateContent or generateContentStream? |
| 183 | + |
158 | 184 | /** |
159 | 185 | * Instruments chat completion creation |
160 | 186 | * and creates the LLM events |
@@ -209,47 +235,18 @@ module.exports = function initialize(agent, googleGenAi, moduleName, shim) { |
209 | 235 | name: GEMINI.COMPLETION, |
210 | 236 | promise: true, |
211 | 237 | after({ error: err, result: response, segment, transaction }) { |
212 | | - // Symbol.asyncIterator |
213 | | - // FIXME: it's causing recursion |
214 | | - shim.wrap(response, Symbol.asyncIterator, function wrapIterator(shim, orig) { |
215 | | - const originalAsyncIterator = orig |
216 | | - return async function * wrappedIterator() { |
217 | | - let content = '' |
218 | | - let role = '' |
219 | | - let chunk |
220 | | - let err |
221 | | - try { |
222 | | - const iterator = originalAsyncIterator.apply(this, arguments) |
223 | | - for await (chunk of iterator) { |
224 | | - if (chunk.choices?.[0]?.delta?.role) { |
225 | | - role = chunk.choices[0].delta.role |
226 | | - } |
227 | | - |
228 | | - content += chunk.choices?.[0]?.delta?.content ?? '' |
229 | | - yield chunk |
230 | | - } |
231 | | - } catch (streamErr) { |
232 | | - err = streamErr |
233 | | - } finally { |
234 | | - chunk.choices[0].message = { role, content } |
235 | | - // update segment duration since we want to extend the time it took to |
236 | | - // handle the stream |
237 | | - segment.touch() |
238 | | - |
239 | | - recordChatCompletionMessages({ |
240 | | - agent: shim.agent, |
241 | | - shim, |
242 | | - segment, |
243 | | - transaction, |
244 | | - request, |
245 | | - response: chunk, |
246 | | - err |
247 | | - }) |
248 | | - |
249 | | - addLlmMeta({ agent, transaction }) |
250 | | - } |
251 | | - } |
| 238 | + // TODO: actually need to handle the stream |
| 239 | + recordChatCompletionMessages({ |
| 240 | + agent, |
| 241 | + shim, |
| 242 | + segment, |
| 243 | + transaction, |
| 244 | + request, |
| 245 | + response, |
| 246 | + err |
252 | 247 | }) |
| 248 | + |
| 249 | + addLlmMeta({ agent, transaction }) |
253 | 250 | } |
254 | 251 | }) |
255 | 252 | }) |
|
0 commit comments