Skip to content

Commit 998d3f5

Browse files
committed
appName only for langchain, makeId(32) instead of 36
1 parent 789afb4 commit 998d3f5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+197
-188
lines changed

lib/instrumentation/aws-sdk/v3/bedrock.js

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ function isStreamingEnabled({ commandName, config }) {
7373
* @param {object} params.msg LLM event
7474
*/
7575
function recordEvent({ agent, type, msg }) {
76-
if (msg.serialize) msg.serialize() // TODO: remove
7776
const llmContext = extractLlmContext(agent)
7877
const timestamp = msg?.timestamp ?? Date.now()
7978

lib/llm-events/aws-bedrock/chat-completion-message.js

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6+
'use strict'
7+
68
const LlmChatCompletionMessage = require('../chat-completion-message')
79

810
module.exports = class AwsBedrockLlmChatCompletionMessage extends LlmChatCompletionMessage {

lib/llm-events/aws-bedrock/chat-completion-summary.js

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6+
'use strict'
7+
68
const LlmChatCompletionSummary = require('../chat-completion-summary')
79

810
module.exports = class AwsBedrockLlmChatCompletionSummary extends LlmChatCompletionSummary {

lib/llm-events/aws-bedrock/embedding.js

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6+
'use strict'
7+
68
const LlmEmbedding = require('../embedding')
79

810
/**
@@ -15,31 +17,27 @@ module.exports = class AwsBedrockLlmEmbedding extends LlmEmbedding {
1517
* @param {Agent} params.agent New Relic agent instance
1618
* @param {object} params.segment Current segment
1719
* @param {object} params.transaction Current and active transaction
18-
* @param {string} params.requestId ID associated with the request - typically available in response headers
1920
* @param {string} params.requestInput Input to the embedding creation call
20-
* @param {string} params.requestModel Model name specified in the request (e.g. 'gpt-4')
21-
* @param {number} params.totalTokenCount Retrieved from the Bedrock response object, fallback for token calculation
22-
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call - omitted if no error occurred
21+
* @param {object} params.bedrockCommand AWS Bedrock Command object, represents the request
22+
* @param {object} params.bedrockResponse AWS Bedrock Response object
23+
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call
24+
* - omitted if no error occurred
2325
*/
24-
constructor({ agent, segment, transaction, requestInput, requestModel, requestId, totalTokenCount = 0, error }) {
26+
constructor({ agent, segment, transaction, requestInput, bedrockCommand, bedrockResponse, error }) {
2527
super({ agent,
2628
segment,
2729
transaction,
2830
vendor: 'bedrock',
31+
requestId: bedrockResponse?.requestId,
2932
requestInput,
30-
requestModel,
31-
requestId,
32-
responseModel: requestModel, // we can assume this in bedrock
33+
requestModel: bedrockCommand?.modelId,
34+
responseModel: bedrockCommand?.modelId, // we can assume requestModel==responseModel in bedrock
3335
error })
3436

35-
this.appName = agent.config.applications()[0] // TODO: still required?
36-
this.setTotalTokens(agent, requestInput, totalTokenCount)
37-
// TODO: bedrockResponse has headers, but they are not
38-
// in the list of `response.headers.<vendor_specific_headers>`,
39-
// still include them?
37+
this.setTotalTokens({ agent, input: requestInput, totalTokenCount: bedrockResponse?.totalTokenCount })
4038
}
4139

42-
setTotalTokens(agent, input, totalTokenCount) {
40+
setTotalTokens({ agent, input, totalTokenCount }) {
4341
const tokenCB = agent?.llm?.tokenCountCallback
4442

4543
// For embedding events, only total token count is relevant.

lib/llm-events/aws-bedrock/index.js

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,6 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6-
'use strict'
7-
86
module.exports = {
97
LlmChatCompletionMessage: require('./chat-completion-message'),
108
LlmChatCompletionSummary: require('./chat-completion-summary'),

lib/llm-events/base.js

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,14 +36,11 @@ class LlmEvent {
3636
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call, omitted if no error occurred
3737
*/
3838
constructor({ agent, segment, transaction, vendor, responseModel, requestId, error }) {
39-
this.id = makeId(36)
39+
this.id = makeId(32)
4040
this.span_id = segment?.id
4141
this.trace_id = transaction?.traceId
4242
this.vendor = vendor
4343
this.metadata = agent
44-
// TODO: Does not appear in AIM spec, but was a
45-
// requirement for LangChain instrumentation back in 2024?
46-
// this.appName = agent.config.applications()[0]
4744

4845
// Omit `error` property if no error occurred
4946
if (error === true) {

lib/llm-events/chat-completion-message.js

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6+
'use strict'
7+
68
const LlmEvent = require('./base')
79

810
/**
@@ -13,7 +15,7 @@ const LlmEvent = require('./base')
1315
* @property {string} id ID in the format `response_id`-`sequence`,
1416
* or a UUID generated by the agent if no response ID is returned by the LLM
1517
*/
16-
class LlmChatCompletionMessage extends LlmEvent {
18+
module.exports = class LlmChatCompletionMessage extends LlmEvent {
1719
/**
1820
*
1921
* @param {object} params constructor params
@@ -70,5 +72,3 @@ class LlmChatCompletionMessage extends LlmEvent {
7072
}
7173
}
7274
}
73-
74-
module.exports = LlmChatCompletionMessage

lib/llm-events/chat-completion-summary.js

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6+
'use strict'
7+
68
const LlmEvent = require('./base')
79

810
/**
@@ -17,7 +19,7 @@ const LlmEvent = require('./base')
1719
* @property {string} response.organization Organization ID returned in the response or response headers
1820
* @property {number} timestamp Timestamp captured at the time of the LLM request with millisecond precision
1921
*/
20-
class LlmChatCompletionSummary extends LlmEvent {
22+
module.exports = class LlmChatCompletionSummary extends LlmEvent {
2123
/**
2224
* @param {object} params constructor parameters
2325
* @param {Agent} params.agent New Relic agent instance
@@ -52,5 +54,3 @@ class LlmChatCompletionSummary extends LlmEvent {
5254
this.duration = segment.getDurationInMillis()
5355
}
5456
}
55-
56-
module.exports = LlmChatCompletionSummary

lib/llm-events/embedding.js

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6+
'use strict'
7+
68
const LlmEvent = require('./base')
79

810
/**
@@ -14,9 +16,9 @@ const LlmEvent = require('./base')
1416
* @property {string} response.organization Organization ID returned in the response or request headers
1517
* @property {number} response.usage.total_tokens Total number of tokens used for input text
1618
* @property {number} duration Total time taken for the embedding call to complete in milliseconds
17-
* @property {*} response.headers Vendor-specific headers
19+
* @property {object} response.headers Vendor-specific headers, if any
1820
*/
19-
class LlmEmbedding extends LlmEvent {
21+
module.exports = class LlmEmbedding extends LlmEvent {
2022
/**
2123
*
2224
* @param {object} params constructor parameters
@@ -42,5 +44,3 @@ class LlmEmbedding extends LlmEvent {
4244
}
4345
}
4446
}
45-
46-
module.exports = LlmEmbedding

lib/llm-events/google-genai/chat-completion-message.js

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,15 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6+
'use strict'
7+
68
const LlmChatCompletionMessage = require('../chat-completion-message')
79
const { getUsageTokens } = require('./utils')
810

911
/**
1012
* Encapsulates a Google Gen AI LlmChatCompletionMessage.
1113
*/
12-
class GoogleGenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
14+
module.exports = class GoogleGenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
1315
constructor({ agent,
1416
segment,
1517
transaction,
@@ -62,5 +64,3 @@ class GoogleGenAiLlmChatCompletionMessage extends LlmChatCompletionMessage {
6264
this.setTokenInCompletionMessage(tokens)
6365
}
6466
}
65-
66-
module.exports = GoogleGenAiLlmChatCompletionMessage

0 commit comments

Comments
 (0)