Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 10 additions & 11 deletions lib/instrumentation/aws-sdk/v3/bedrock.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,22 @@
*/

'use strict'
const StreamHandler = require('./stream-handler')
const ConverseStreamHandler = require('./converse-stream-handler')
const {
LlmChatCompletionMessage,
LlmChatCompletionSummary,
LlmEmbedding,
LlmErrorMessage,
BedrockCommand,
BedrockResponse,
StreamHandler
} = require('../../../llm-events/aws-bedrock')
BedrockResponse
} = require('#agentlib/llm-events/aws-bedrock/index.js')
const LlmErrorMessage = require('#agentlib/llm-events/error-message.js')

const { DESTINATIONS } = require('../../../config/attribute-filter')
const { AI } = require('../../../metrics/names')
const { RecorderSpec } = require('../../../shim/specs')
const InstrumentationDescriptor = require('../../../instrumentation-descriptor')
const { extractLlmContext } = require('../../../util/llm-utils')
const ConverseStreamHandler = require('../../../llm-events/aws-bedrock/converse-stream-handler')

let TRACKING_METRIC

Expand Down Expand Up @@ -73,7 +73,6 @@ function isStreamingEnabled({ commandName, config }) {
* @param {object} params.msg LLM event
*/
function recordEvent({ agent, type, msg }) {
msg.serialize()
const llmContext = extractLlmContext(agent)
const timestamp = msg?.timestamp ?? Date.now()

Expand Down Expand Up @@ -134,7 +133,7 @@ function recordChatCompletionMessages({
bedrockCommand,
transaction,
segment,
isError: err !== null
error: err !== null
})

// Record context message(s)
Expand All @@ -149,7 +148,7 @@ function recordChatCompletionMessages({
content: contextMessage.content,
role: contextMessage.role,
bedrockResponse,
index: i,
sequence: i,
completionId: summary.id
})
recordEvent({ agent, type: 'LlmChatCompletionMessage', msg })
Expand All @@ -164,7 +163,7 @@ function recordChatCompletionMessages({
bedrockCommand,
bedrockResponse,
isResponse: true,
index: promptContextMessages.length + i,
sequence: promptContextMessages.length + i,
content,
role: 'assistant',
completionId: summary.id
Expand Down Expand Up @@ -217,9 +216,9 @@ function recordEmbeddingMessage({
segment,
transaction,
bedrockCommand,
input: prompt.content,
requestInput: prompt.content,
bedrockResponse,
isError: err !== null
error: err !== null
}))

for (const embedding of embeddings) {
Expand Down
2 changes: 1 addition & 1 deletion lib/llm-events/aws-bedrock/bedrock-command.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

'use strict'

const { stringifyClaudeChunkedMessage, stringifyConverseChunkedMessage } = require('./utils')
const { stringifyClaudeChunkedMessage, stringifyConverseChunkedMessage } = require('./stringify-message')

/**
* Parses an AWS Bedrock command instance into a re-usable entity,
Expand Down
2 changes: 1 addition & 1 deletion lib/llm-events/aws-bedrock/bedrock-response.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

'use strict'

const { stringifyClaudeChunkedMessage, stringifyConverseChunkedMessage } = require('./utils')
const { stringifyClaudeChunkedMessage, stringifyConverseChunkedMessage } = require('./stringify-message')

/**
* @typedef {object} AwsBedrockMiddlewareResponse
Expand Down
104 changes: 40 additions & 64 deletions lib/llm-events/aws-bedrock/chat-completion-message.js
Original file line number Diff line number Diff line change
@@ -1,82 +1,59 @@
/*
* Copyright 2023 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

const LlmEvent = require('./event')
/**
* @typedef {object} LlmChatCompletionParams
* @augments LlmEventParams
* @property {string} completionId An identifier for the completion message.
* @property {string} content The human readable response from the LLM.
* @property {number} [index=0] The order of the message in the conversation.
* @property {boolean} [isResponse=false] Indicates if the message represents
* a response from the LLM.
* @property {object} message The message sent to the LLM.
* @property {OutgoingMessage} request The outgoing HTTP request used in the
* LLM conversation.
*/
/**
* @type {LlmChatCompletionParams}
*/
const defaultParams = {
completionId: '',
content: '',
index: 0,
isResponse: false,
message: {},
request: {}
}
const LlmChatCompletionMessage = require('../chat-completion-message')

/**
* Represents an LLM chat completion.
*/
class LlmChatCompletionMessage extends LlmEvent {
constructor(params = defaultParams) {
params = Object.assign({}, defaultParams, params)
super(params)

const { agent, content, isResponse, index, completionId, role, segment } = params
const recordContent = agent.config?.ai_monitoring?.record_content?.enabled
module.exports = class AwsBedrockLlmChatCompletionMessage extends LlmChatCompletionMessage {
/**
*
* @param {object} params constructor parameters
* @param {Agent} params.agent New Relic agent instance
* @param {object} params.segment Current segment
* @param {object} params.transaction Current and active transaction
* @param {object} params.bedrockCommand AWS Bedrock Command object, represents the request
* @param {object} params.bedrockResponse AWS Bedrock Response object
* @param {string} params.content Content of the message
* @param {string} [params.role] Role of the message creator (e.g. `user`, `assistant`, `tool`)
* @param {string} params.completionId ID of the `LlmChatCompletionSummary` event that
* this message event is connected to
* @param {number} params.sequence Index (beginning at 0) associated with
* each message including the prompt and responses
* @param {boolean} [params.isResponse] Indiciates if this message is the response
*/
constructor({ agent, segment, transaction, bedrockCommand, bedrockResponse, content, role, completionId, sequence = 0, isResponse }) {
super({ agent,
segment,
transaction,
vendor: 'bedrock',
content,
role,
sequence,
requestId: bedrockResponse?.requestId,
responseId: bedrockResponse?.id,
responseModel: bedrockCommand?.modelId, // we can assume requestModel==responseModel in bedrock
completionId,
isResponse })

this.is_response = isResponse
this.completion_id = completionId
this.sequence = index
this.content = recordContent === true ? content : undefined
this.role = role
if (this.is_response === false) {
// Only record for request/input messages
this.timestamp = segment.timer.start
}

this.#setId(index)
this.setTokenCount(agent)
this.setTokenCount(agent, bedrockCommand, bedrockResponse)
}

#setId(index) {
const cmd = this.bedrockCommand
if (cmd.isConverse || cmd.isTitan() === true || cmd.isClaude() === true) {
this.id = `${this.id}-${index}`
} else if (cmd.isCohere() === true) {
this.id = `${this.bedrockResponse.id || this.id}-${index}`
}
}

setTokenCount(agent) {
setTokenCount(agent, bedrockCommand, bedrockResponse) {
const tokenCB = agent?.llm?.tokenCountCallback

if (tokenCB) {
const promptContent = this.bedrockCommand?.prompt?.map((msg) => msg.content).join(' ')
const completionContent = this.bedrockResponse?.completions?.join(' ')
const promptContent = bedrockCommand?.prompt?.map((msg) => msg.content).join(' ')
const completionContent = bedrockResponse?.completions?.join(' ')

if (promptContent && completionContent) {
this.setTokenFromCallback(
{
tokenCB,
reqModel: this.bedrockCommand.modelId,
resModel: this.bedrockCommand.modelId,
reqModel: bedrockCommand.modelId,
resModel: bedrockCommand.modelId,
promptContent,
completionContent
}
Expand All @@ -85,8 +62,7 @@ class LlmChatCompletionMessage extends LlmEvent {
return
}

this.setTokenInCompletionMessage({ promptTokens: this.bedrockResponse.inputTokenCount, completionTokens: this.bedrockResponse.outputTokenCount })
this.setTokenInCompletionMessage({ promptTokens: bedrockResponse.inputTokenCount,
completionTokens: bedrockResponse.outputTokenCount })
}
}

module.exports = LlmChatCompletionMessage
80 changes: 37 additions & 43 deletions lib/llm-events/aws-bedrock/chat-completion-summary.js
Original file line number Diff line number Diff line change
@@ -1,68 +1,62 @@
/*
* Copyright 2024 New Relic Corporation. All rights reserved.
* Copyright 2026 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

const LlmEvent = require('./event')

/**
* @typedef {object} LlmChatCompletionSummaryParams
* @augments LlmEventParams
* @property {string} segment the segment associated with this LlmChatCompletionSummary
* @property {boolean} isError whether this event represents an error
*/
/**
* @type {LlmChatCompletionSummaryParams}
*/
const defaultParams = {}

/**
* Represents an LLM chat completion summary.
*/
class LlmChatCompletionSummary extends LlmEvent {
constructor(params = defaultParams) {
super(params)

const { segment, isError, agent } = params
this.error = isError
this.duration = segment.getDurationInMillis()

const cmd = this.bedrockCommand
const res = this.bedrockResponse

this['request.max_tokens'] = cmd.maxTokens
this['response.choices.finish_reason'] = res.finishReason
this['request.temperature'] = cmd.temperature
this['response.number_of_messages'] = (cmd.prompt.length ?? 0) + (res.completions.length ?? 0)

this.timestamp = segment.timer.start
this.setTokens(agent)
const LlmChatCompletionSummary = require('../chat-completion-summary')

module.exports = class AwsBedrockLlmChatCompletionSummary extends LlmChatCompletionSummary {
/**
*
* @param {object} params constructor parameters
* @param {Agent} params.agent New Relic agent instance
* @param {object} params.segment Current segment
* @param {object} params.transaction Current and active transaction
* @param {object} params.bedrockCommand AWS Bedrock Command object, represents the request
* @param {object} params.bedrockResponse AWS Bedrock Response object
* @param {boolean} [params.error] Set to `true` if an error occurred during creation call, omitted if no error occurred
*/
constructor({ agent, segment, transaction, bedrockCommand, bedrockResponse, error }) {
super({ agent,
segment,
transaction,
error,
vendor: 'bedrock',
requestModel: bedrockCommand?.modelId,
requestId: bedrockResponse?.requestId,
responseModel: bedrockCommand?.modelId, // we can assume requestModel==responseModel in bedrock
temperature: bedrockCommand.temperature,
maxTokens: bedrockCommand.maxTokens,
numMsgs: (bedrockCommand.prompt.length ?? 0) + (bedrockResponse.completions.length ?? 0),
finishReason: bedrockResponse?.finishReason })

this.setTokens(agent, bedrockCommand, bedrockResponse)
}

setTokens(agent) {
setTokens(agent, bedrockCommand, bedrockResponse) {
const tokenCB = agent?.llm?.tokenCountCallback

// Prefer callback for prompt and completion tokens; if unavailable, fall back to response data.
if (tokenCB) {
const promptContent = this.bedrockCommand?.prompt?.map((msg) => msg.content).join(' ')
const completionContent = this.bedrockResponse?.completions?.join(' ')
const promptContent = bedrockCommand?.prompt?.map((msg) => msg.content).join(' ')
const completionContent = bedrockResponse?.completions?.join(' ')

this.setTokenUsageFromCallback(
{
tokenCB,
reqModel: this.bedrockCommand.modelId,
resModel: this.bedrockCommand.modelId,
reqModel: bedrockCommand.modelId,
resModel: bedrockCommand.modelId,
promptContent,
completionContent
}
)
return
}

this.setTokensInResponse({ promptTokens: this.bedrockResponse.inputTokenCount, completionTokens: this.bedrockResponse.outputTokenCount, totalTokens: this.bedrockResponse.totalTokenCount })
this.setTokensInResponse({ promptTokens: bedrockResponse.inputTokenCount,
completionTokens: bedrockResponse.outputTokenCount,
totalTokens: bedrockResponse.totalTokenCount })
}
}

module.exports = LlmChatCompletionSummary
Loading