Skip to content

Commit 644a06e

Browse files
committed
summary test passing
1 parent a908467 commit 644a06e

File tree

4 files changed

+30
-40
lines changed

4 files changed

+30
-40
lines changed

lib/llm-events/google-genai/embedding.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class LlmEmbedding extends LlmEvent {
2828
: undefined
2929
this.error = params.isError
3030
this.duration = params.segment.getDurationInMillis()
31-
// TODO: idk if this is correct for token count
31+
// TODO: use token callback function? see Python agent work
3232
this.token_count = Array.isArray(params.response?.embeddings)
3333
? params.response.embeddings.reduce((sum, e) => sum + (e?.values?.length || 0), 0)
3434
: undefined

lib/llm-events/google-genai/event.js

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ module.exports = class LlmEvent extends BaseEvent {
1414

1515
this.id = makeId(36)
1616
this.appName = agent.config.applications()[0]
17-
this.request_id = response?.headers?.['x-request-id'] // TODO: what is this in Gemini?
17+
// this.request_id = response?.headers?.['x-request-id'] // TODO: what is this in Gemini?
1818
this.trace_id = transaction?.traceId
1919
this.span_id = segment?.id
2020
this['response.model'] = response?.modelVersion
@@ -23,30 +23,11 @@ module.exports = class LlmEvent extends BaseEvent {
2323
this.ingest_source = 'Node'
2424
this.metadata = agent
2525

26-
/**
27-
* Used in embedding, and chat completion summary.
28-
* The flag will include response attributes but also
29-
* other attributes from request like model, and api key.
30-
* Lastly, it includes the active span's duration.
31-
*/
26+
// TODO: no valid response.headers found in Gemini?
3227
if (responseAttrs) {
3328
this.duration = segment?.getDurationInMillis()
34-
this.responseAttrs(response)
29+
// relevant headers will be prefixed with 'x-goog'?
30+
// this.responseAttrs(response)
3531
}
3632
}
37-
38-
responseAttrs(response) {
39-
// TODO: no response.headers?
40-
// relevant headers will be prefixed with 'x-goog'?
41-
this['response.organization'] = response?.headers?.['google-organization']
42-
this['response.headers.llmVersion'] = response?.headers?.['gemini-version']
43-
this['response.headers.ratelimitLimitRequests'] =
44-
response?.headers?.['x-ratelimit-limit-requests']
45-
this['response.headers.ratelimitLimitTokens'] = response?.headers?.['x-ratelimit-limit-tokens']
46-
this['response.headers.ratelimitResetTokens'] = response?.headers?.['x-ratelimit-reset-tokens']
47-
this['response.headers.ratelimitRemainingTokens'] =
48-
response?.headers?.['x-ratelimit-remaining-tokens']
49-
this['response.headers.ratelimitRemainingRequests'] =
50-
response?.headers?.['x-ratelimit-remaining-requests']
51-
}
5233
}

test/unit/llm-events/google-genai/chat-completion-summary.test.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ test('should properly create a LlmChatCompletionSummary event', (t, end) => {
3232
segment,
3333
transaction: tx,
3434
request: req,
35-
response: res // TODO: res is different
35+
response: res
3636
})
3737
const expected = getExpectedResult(tx, chatSummaryEvent, 'summary')
3838
assert.deepEqual(chatSummaryEvent, expected)

test/unit/llm-events/google-genai/common.js

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,23 @@
55

66
'use strict'
77
const res = {
8-
model: 'gemini-2.0-flash',
9-
choices: [{ finish_reason: 'stop', message: { content: 'a lot', role: 'know-it-all' } }]
8+
modelVersion: 'gemini-2.0-flash',
9+
candidates: [
10+
{
11+
content: {
12+
parts: [
13+
{ text: "I don't know!" }
14+
],
15+
role: 'model'
16+
},
17+
finishReason: 'STOP'
18+
}
19+
],
20+
usageMetadata: {
21+
promptTokenCount: 10,
22+
candidatesTokenCount: 20,
23+
totalTokenCount: 30
24+
}
1025
}
1126

1227
const req = {
@@ -29,20 +44,14 @@ function getExpectedResult(tx, event, type, completionId) {
2944
appName: 'New Relic for Node.js tests',
3045
trace_id: tx.traceId,
3146
span_id: spanId,
32-
'response.model': 'gemini-2.0-flash',
47+
'request.model': 'gemini-2.0-flash',
3348
vendor: 'gemini',
3449
ingest_source: 'Node'
3550
}
3651
const resKeys = {
3752
duration: child.getDurationInMillis(),
38-
'request.model': 'gemini-2.0-flash',
39-
'response.organization': 'new-relic',
40-
'response.headers.llmVersion': '1.0.0',
41-
'response.headers.ratelimitLimitRequests': '100',
42-
'response.headers.ratelimitLimitTokens': '100',
43-
'response.headers.ratelimitResetTokens': '100',
44-
'response.headers.ratelimitRemainingTokens': '10',
45-
'response.headers.ratelimitRemainingRequests': '10'
53+
'response.model': 'gemini-2.0-flash',
54+
// TODO: response.headers?
4655
}
4756

4857
switch (type) {
@@ -56,10 +65,10 @@ function getExpectedResult(tx, event, type, completionId) {
5665
expected = {
5766
...expected,
5867
...resKeys,
59-
'request.max_tokens': '1000000',
60-
'request.temperature': '1.0',
61-
'response.number_of_messages': 3,
62-
'response.choices.finish_reason': 'stop',
68+
'request.max_tokens': 1000000,
69+
'request.temperature': 1.0,
70+
'response.number_of_messages': 2,
71+
'response.choices.finish_reason': 'STOP',
6372
error: false
6473
}
6574
break

0 commit comments

Comments
 (0)