diff --git a/test/versioned/langchain-aws/package.json b/test/versioned/langchain-aws/package.json new file mode 100644 index 0000000000..bb8f04bd42 --- /dev/null +++ b/test/versioned/langchain-aws/package.json @@ -0,0 +1,33 @@ +{ + "name": "langchain-aws-bedrock-tests", + "targets": [ + { + "name": "@langchain/core", + "minSupported": "0.1.17", + "minAgentVersion": "11.13.0" + } + ], + "version": "0.0.0", + "private": true, + "engines": { + "node": ">=20" + }, + "tests": [ + { + "engines": { + "node": ">=20" + }, + "dependencies": { + "@langchain/aws": ">=1.1.0", + "@langchain/core": ">=1.0.0", + "@langchain/community": ">=1.0.0", + "@elastic/elasticsearch": "8.13.1" + }, + "files": [ + "runnables.test.js", + "runnables-streaming.test.js", + "vectorstore.test.js" + ] + } + ] +} \ No newline at end of file diff --git a/test/versioned/langchain-aws/runnables-streaming.test.js b/test/versioned/langchain-aws/runnables-streaming.test.js new file mode 100644 index 0000000000..3ec8e8f01f --- /dev/null +++ b/test/versioned/langchain-aws/runnables-streaming.test.js @@ -0,0 +1,117 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const test = require('node:test') +const assert = require('node:assert') + +const { removeModules } = require('../../lib/cache-buster') +const { match } = require('../../lib/custom-assertions') +const { + runStreamingEnabledTests, + runStreamingDisabledTest, + runAiMonitoringDisabledTests +} = require('../langchain/runnables-streaming') +const { FAKE_CREDENTIALS, getAiResponseServer } = require('../../lib/aws-server-stubs') +const helper = require('../../lib/agent_helper') + +const config = { + ai_monitoring: { + enabled: true, + streaming: { + enabled: true + } + } +} +const createAiResponseServer = getAiResponseServer(__dirname) + +async function beforeEach({ enabled, ctx }) { + ctx.nr = {} + const { server, baseUrl } = await createAiResponseServer() + ctx.nr.server = server + ctx.nr.agent = helper.instrumentMockedAgent(config) + ctx.nr.agent.config.ai_monitoring.streaming.enabled = enabled + + const { ChatPromptTemplate } = require('@langchain/core/prompts') + const { StringOutputParser, CommaSeparatedListOutputParser } = require('@langchain/core/output_parsers') + const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') + const { ChatBedrockConverse } = require('@langchain/aws') + const { BedrockRuntimeClient } = require('@aws-sdk/client-bedrock-runtime') + ctx.nr.ChatPromptTemplate = ChatPromptTemplate + ctx.nr.CommaSeparatedListOutputParser = CommaSeparatedListOutputParser + ctx.nr.BaseCallbackHandler = BaseCallbackHandler + ctx.nr.langchainCoreVersion = require('@langchain/core/package.json').version + + // Create the BedrockRuntimeClient with our mock endpoint + const bedrockClient = new BedrockRuntimeClient({ + region: 'us-east-1', + credentials: FAKE_CREDENTIALS, + endpoint: baseUrl, + maxAttempts: 1 + }) + + ctx.nr.prompt = ChatPromptTemplate.fromMessages([['assistant', 'text converse ultimate question {topic}']]) + ctx.nr.model = new ChatBedrockConverse({ + streaming: true, + model: 'anthropic.claude-instant-v1', + region: 'us-east-1', + client: bedrockClient + }) + ctx.nr.outputParser = new StringOutputParser() +} + +async function afterEach(ctx) { + ctx.nr?.server?.destroy() + helper.unloadAgent(ctx.nr.agent) + // bust the require-cache so it can re-instrument + removeModules(['@langchain/core', '@langchain/aws', '@aws-sdk']) +} + +test('streaming enabled', async (t) => { + t.beforeEach((ctx) => beforeEach({ enabled: true, ctx })) + t.afterEach((ctx) => afterEach(ctx)) + + await runStreamingEnabledTests({ + inputData: { topic: 'streamed' }, + expectedInput: '{"topic":"streamed"}', + expectedContent: () => 'This is a test.', + errorPromptTemplate: ['assistant', 'text converse ultimate question streamed error'], + errorFromStreamEventCount: 4, + errorFromStreamLangchainEventCount: 2, + errorFromStreamAssertion: (exceptions) => { + assert.equal(exceptions.length, 2) + for (const e of exceptions) { + match(e, { + customAttributes: { + 'error.message': /Internal server error during streaming/, + completion_id: /[\w-]{36}/ + } + }) + } + } + })(t) +}) + +test('streaming disabled', async (t) => { + t.beforeEach((ctx) => beforeEach({ enabled: false, ctx })) + t.afterEach((ctx) => afterEach(ctx)) + + await runStreamingDisabledTest({ + inputData: { topic: 'streamed' }, + expectedContent: () => 'This is a test.', + streamingDisabledMessage: 'should increment streaming disabled in both langchain and bedrock' + })(t) +}) + +test('ai_monitoring disabled', async (t) => { + t.beforeEach((ctx) => beforeEach({ enabled: true, ctx })) + t.afterEach((ctx) => afterEach(ctx)) + + await runAiMonitoringDisabledTests({ + inputData: { topic: 'streamed' }, + expectedContent: () => 'This is a test.' + })(t) +}) diff --git a/test/versioned/langchain-aws/runnables.test.js b/test/versioned/langchain-aws/runnables.test.js new file mode 100644 index 0000000000..a82f7d63d4 --- /dev/null +++ b/test/versioned/langchain-aws/runnables.test.js @@ -0,0 +1,69 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const test = require('node:test') + +const { removeModules } = require('../../lib/cache-buster') +const { runRunnablesTests } = require('../langchain/runnables') +const { FAKE_CREDENTIALS, getAiResponseServer } = require('../../lib/aws-server-stubs') +const helper = require('../../lib/agent_helper') + +const config = { + ai_monitoring: { + enabled: true + } +} +const createAiResponseServer = getAiResponseServer(__dirname) + +test.beforeEach(async (ctx) => { + ctx.nr = {} + const { server, baseUrl } = await createAiResponseServer() + ctx.nr.server = server + ctx.nr.agent = helper.instrumentMockedAgent(config) + + const { ChatPromptTemplate } = require('@langchain/core/prompts') + const { StringOutputParser, CommaSeparatedListOutputParser } = require('@langchain/core/output_parsers') + const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') + const { ChatBedrockConverse } = require('@langchain/aws') + const { BedrockRuntimeClient } = require('@aws-sdk/client-bedrock-runtime') + ctx.nr.ChatPromptTemplate = ChatPromptTemplate + ctx.nr.CommaSeparatedListOutputParser = CommaSeparatedListOutputParser + ctx.nr.BaseCallbackHandler = BaseCallbackHandler + ctx.nr.langchainCoreVersion = require('@langchain/core/package.json').version + + // Create the BedrockRuntimeClient with our mock endpoint + const bedrockClient = new BedrockRuntimeClient({ + region: 'us-east-1', + credentials: FAKE_CREDENTIALS, + endpoint: baseUrl, + maxAttempts: 1 + }) + + ctx.nr.prompt = ChatPromptTemplate.fromMessages([['assistant', 'text converse ultimate {topic}']]) + ctx.nr.model = new ChatBedrockConverse({ + model: 'anthropic.claude-3-haiku-20240307-v1:0', + region: 'us-east-1', + client: bedrockClient + }) + ctx.nr.outputParser = new StringOutputParser() +}) + +test.afterEach(async (ctx) => { + ctx.nr?.server?.destroy() + helper.unloadAgent(ctx.nr.agent) + // bust the require-cache so it can re-instrument + removeModules(['@langchain/core', '@langchain/aws', '@aws-sdk']) +}) + +runRunnablesTests({ + inputData: { topic: 'question' }, + expectedInput: '{"topic":"question"}', + expectedOutput: 'This is a test.', + errorPromptTemplate: ['assistant', 'text converse ultimate question error'], + errorEventCount: 5, + arrayParserOutput: '["This is a test."]' +}) diff --git a/test/versioned/langchain-aws/vectorstore.test.js b/test/versioned/langchain-aws/vectorstore.test.js new file mode 100644 index 0000000000..458e4000b9 --- /dev/null +++ b/test/versioned/langchain-aws/vectorstore.test.js @@ -0,0 +1,81 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const test = require('node:test') + +const { removeModules } = require('../../lib/cache-buster') +const { runVectorstoreTests } = require('../langchain/vectorstore') +const { Document } = require('@langchain/core/documents') +const { FAKE_CREDENTIALS, getAiResponseServer } = require('../../lib/aws-server-stubs') +const params = require('../../lib/params') +const helper = require('../../lib/agent_helper') + +const config = { + ai_monitoring: { + enabled: true + } +} +const createAiResponseServer = getAiResponseServer(__dirname) + +test.beforeEach(async (ctx) => { + ctx.nr = {} + const { server, baseUrl } = await createAiResponseServer() + ctx.nr.server = server + ctx.nr.agent = helper.instrumentMockedAgent(config) + + const { BedrockEmbeddings } = require('@langchain/aws') + const { BedrockRuntimeClient } = require('@aws-sdk/client-bedrock-runtime') + ctx.nr.langchainCoreVersion = require('@langchain/core/package.json').version + + const { Client } = require('@elastic/elasticsearch') + const clientArgs = { + client: new Client({ + node: `http://${params.elastic_host}:${params.elastic_port}` + }), + indexName: 'test_langchain_aws_vectorstore' + } + const { ElasticVectorSearch } = require('@langchain/community/vectorstores/elasticsearch') + + // Create the BedrockRuntimeClient with our mock endpoint + const bedrockClient = new BedrockRuntimeClient({ + region: 'us-east-1', + credentials: FAKE_CREDENTIALS, + endpoint: baseUrl, + maxAttempts: 1 + }) + + ctx.nr.embedding = new BedrockEmbeddings({ + model: 'amazon.titan-embed-text-v1', + region: 'us-east-1', + client: bedrockClient, + maxRetries: 0 + }) + const docs = [ + new Document({ + metadata: { id: '2' }, + pageContent: 'embed text amazon token count callback response' + }) + ] + const vectorStore = new ElasticVectorSearch(ctx.nr.embedding, clientArgs) + await vectorStore.deleteIfExists() + await vectorStore.addDocuments(docs) + ctx.nr.vs = vectorStore +}) + +test.afterEach(async (ctx) => { + await ctx.nr?.vs?.deleteIfExists() + ctx.nr?.server?.destroy() + helper.unloadAgent(ctx.nr.agent) + // bust the require-cache so it can re-instrument + removeModules(['@langchain/core', '@langchain/aws', '@aws-sdk', '@elastic', '@langchain/community']) +}) + +runVectorstoreTests({ + searchQuery: 'embed text amazon token count callback response', + expectedQuery: 'embed text amazon token count callback response', + expectedPageContent: 'embed text amazon token count callback response' +}) diff --git a/test/versioned/langchain-openai/package.json b/test/versioned/langchain-openai/package.json new file mode 100644 index 0000000000..9a110080e0 --- /dev/null +++ b/test/versioned/langchain-openai/package.json @@ -0,0 +1,34 @@ +{ + "name": "langchain-openai-tests", + "targets": [ + { + "name": "@langchain/core", + "minSupported": "0.1.17", + "minAgentVersion": "11.13.0" + } + ], + "version": "0.0.0", + "private": true, + "engines": { + "node": ">=20" + }, + "tests": [ + { + "engines": { + "node": ">=20" + }, + "dependencies": { + "@langchain/core": ">=1.0.0", + "@langchain/community": ">=1.0.0", + "@langchain/openai": ">=1.0.0", + "openai": "4.90.0", + "@elastic/elasticsearch": "8.13.1" + }, + "files": [ + "runnables.test.js", + "runnables-streaming.test.js", + "vectorstore.test.js" + ] + } + ] +} \ No newline at end of file diff --git a/test/versioned/langchain-openai/runnables-streaming.test.js b/test/versioned/langchain-openai/runnables-streaming.test.js new file mode 100644 index 0000000000..f5da21bd40 --- /dev/null +++ b/test/versioned/langchain-openai/runnables-streaming.test.js @@ -0,0 +1,120 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const test = require('node:test') +const assert = require('node:assert') + +const { removeModules } = require('../../lib/cache-buster') +const { match } = require('../../lib/custom-assertions') +const { + runStreamingEnabledTests, + runStreamingDisabledTest, + runAiMonitoringDisabledTests +} = require('../langchain/runnables-streaming') +const createOpenAIMockServer = require('../openai/mock-server') +const mockResponses = require('../openai/mock-chat-api-responses') +const helper = require('../../lib/agent_helper') + +const config = { + ai_monitoring: { + enabled: true, + streaming: { + enabled: true + } + } +} + +async function beforeEach({ enabled, ctx }) { + ctx.nr = {} + const { host, port, server } = await createOpenAIMockServer() + ctx.nr.server = server + ctx.nr.agent = helper.instrumentMockedAgent(config) + ctx.nr.agent.config.ai_monitoring.streaming.enabled = enabled + + const { ChatPromptTemplate } = require('@langchain/core/prompts') + const { StringOutputParser, CommaSeparatedListOutputParser } = require('@langchain/core/output_parsers') + const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') + const { ChatOpenAI } = require('@langchain/openai') + ctx.nr.ChatPromptTemplate = ChatPromptTemplate + ctx.nr.CommaSeparatedListOutputParser = CommaSeparatedListOutputParser + ctx.nr.BaseCallbackHandler = BaseCallbackHandler + ctx.nr.langchainCoreVersion = require('@langchain/core/package.json').version + + ctx.nr.prompt = ChatPromptTemplate.fromMessages([['assistant', '{topic} response']]) + ctx.nr.model = new ChatOpenAI({ + streaming: true, + apiKey: 'fake-key', + maxRetries: 0, + configuration: { + baseURL: `http://${host}:${port}` + } + }) + ctx.nr.outputParser = new StringOutputParser() +} + +async function afterEach(ctx) { + ctx.nr?.server?.close() + helper.unloadAgent(ctx.nr.agent) + // bust the require-cache so it can re-instrument + removeModules(['@langchain/core', 'openai']) +} + +test('streaming enabled', async (t) => { + t.beforeEach((ctx) => beforeEach({ enabled: true, ctx })) + t.afterEach((ctx) => afterEach(ctx)) + + await runStreamingEnabledTests({ + inputData: { topic: 'Streamed' }, + expectedInput: '{"topic":"Streamed"}', + expectedContent: () => mockResponses.get('Streamed response').streamData, + errorPromptTemplate: ['assistant', '{topic} stream'], + errorFromInputAssertion: (exceptions) => { + for (const e of exceptions) { + const str = Object.prototype.toString.call(e.customAttributes) + assert.equal(str, '[object LlmErrorMessage]') + } + }, + errorFromStreamAssertion: (exceptions) => { + for (const e of exceptions) { + // skip the socket error as it is not related to LLM + // this started occurring when openai used undici as the HTTP client + if (e.error.code === 'UND_ERR_SOCKET') { + continue + } + const str = Object.prototype.toString.call(e.customAttributes) + assert.equal(str, '[object LlmErrorMessage]') + match(e, { + customAttributes: { + 'error.message': /(?:Premature close)|(?:terminated)/, + completion_id: /\w{32}/ + } + }) + } + } + })(t) +}) + +test('streaming disabled', async (t) => { + t.beforeEach((ctx) => beforeEach({ enabled: false, ctx })) + t.afterEach((ctx) => afterEach(ctx)) + + await runStreamingDisabledTest({ + inputData: { topic: 'Streamed' }, + expectedContent: () => mockResponses.get('Streamed response').streamData, + streamingDisabledMessage: 'should increment streaming disabled in both langchain and openai' + })(t) +}) + +test('ai_monitoring disabled', async (t) => { + t.beforeEach((ctx) => beforeEach({ enabled: true, ctx })) + t.afterEach((ctx) => afterEach(ctx)) + + await runAiMonitoringDisabledTests({ + inputData: { topic: 'Streamed' }, + expectedContent: () => mockResponses.get('Streamed response').streamData + })(t) +}) diff --git a/test/versioned/langchain-openai/runnables.test.js b/test/versioned/langchain-openai/runnables.test.js new file mode 100644 index 0000000000..6a2b545384 --- /dev/null +++ b/test/versioned/langchain-openai/runnables.test.js @@ -0,0 +1,65 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const test = require('node:test') +const assert = require('node:assert') + +const { removeModules } = require('../../lib/cache-buster') +const { runRunnablesTests } = require('../langchain/runnables') +const createOpenAIMockServer = require('../openai/mock-server') +const helper = require('../../lib/agent_helper') + +const config = { + ai_monitoring: { + enabled: true + } +} + +test.beforeEach(async (ctx) => { + ctx.nr = {} + const { host, port, server } = await createOpenAIMockServer() + ctx.nr.server = server + ctx.nr.agent = helper.instrumentMockedAgent(config) + + const { ChatPromptTemplate } = require('@langchain/core/prompts') + const { StringOutputParser, CommaSeparatedListOutputParser } = require('@langchain/core/output_parsers') + const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') + const { ChatOpenAI } = require('@langchain/openai') + ctx.nr.ChatPromptTemplate = ChatPromptTemplate + ctx.nr.CommaSeparatedListOutputParser = CommaSeparatedListOutputParser + ctx.nr.BaseCallbackHandler = BaseCallbackHandler + ctx.nr.langchainCoreVersion = require('@langchain/core/package.json').version + + ctx.nr.prompt = ChatPromptTemplate.fromMessages([['assistant', 'You are a {topic}.']]) + ctx.nr.model = new ChatOpenAI({ + apiKey: 'fake-key', + maxRetries: 0, + configuration: { + baseURL: `http://${host}:${port}` + } + }) + ctx.nr.outputParser = new StringOutputParser() +}) + +test.afterEach(async (ctx) => { + ctx.nr?.server?.close() + helper.unloadAgent(ctx.nr.agent) + // bust the require-cache so it can re-instrument + removeModules(['@langchain/core', 'openai']) +}) + +runRunnablesTests({ + inputData: { topic: 'scientist' }, + arrayParserOutput: '["212 degrees Fahrenheit is equal to 100 degrees Celsius."]', + errorPromptTemplate: ['assistant', 'Invalid API key.'], + errorAssertion: (exceptions) => { + for (const e of exceptions) { + const str = Object.prototype.toString.call(e.customAttributes) + assert.equal(str, '[object LlmErrorMessage]') + } + } +}) diff --git a/test/versioned/langchain-openai/vectorstore.test.js b/test/versioned/langchain-openai/vectorstore.test.js new file mode 100644 index 0000000000..8f8803f03c --- /dev/null +++ b/test/versioned/langchain-openai/vectorstore.test.js @@ -0,0 +1,76 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const test = require('node:test') +const assert = require('node:assert') + +const { removeModules } = require('../../lib/cache-buster') +const { runVectorstoreTests } = require('../langchain/vectorstore') +const { Document } = require('@langchain/core/documents') +const createOpenAIMockServer = require('../openai/mock-server') +const params = require('../../lib/params') +const helper = require('../../lib/agent_helper') + +const config = { + ai_monitoring: { + enabled: true + } +} + +test.beforeEach(async (ctx) => { + ctx.nr = {} + const { host, port, server } = await createOpenAIMockServer() + ctx.nr.server = server + ctx.nr.agent = helper.instrumentMockedAgent(config) + + const { OpenAIEmbeddings } = require('@langchain/openai') + ctx.nr.langchainCoreVersion = require('@langchain/core/package.json').version + + const { Client } = require('@elastic/elasticsearch') + const clientArgs = { + client: new Client({ + node: `http://${params.elastic_host}:${params.elastic_port}` + }), + indexName: 'test_langchain_openai_vectorstore' + } + const { ElasticVectorSearch } = require('@langchain/community/vectorstores/elasticsearch') + + ctx.nr.embedding = new OpenAIEmbeddings({ + apiKey: 'fake-key', + configuration: { + baseURL: `http://${host}:${port}` + } + }) + const docs = [ + new Document({ + metadata: { id: '2' }, + pageContent: 'This is an embedding test.' + }) + ] + const vectorStore = new ElasticVectorSearch(ctx.nr.embedding, clientArgs) + await vectorStore.deleteIfExists() + await vectorStore.addDocuments(docs) + ctx.nr.vs = vectorStore +}) + +test.afterEach(async (ctx) => { + await ctx.nr?.vs?.deleteIfExists() + ctx.nr?.server?.close() + helper.unloadAgent(ctx.nr.agent) + // bust the require-cache so it can re-instrument + removeModules(['@langchain/core', 'openai', '@elastic', '@langchain/community']) +}) + +runVectorstoreTests({ + searchQuery: 'This is an embedding test.', + errorAssertion: (exceptions) => { + for (const e of exceptions) { + const str = Object.prototype.toString.call(e.customAttributes) + assert.equal(str, '[object LlmErrorMessage]') + } + } +}) diff --git a/test/versioned/langchain/bedrock/runnables-streaming.test.js b/test/versioned/langchain/bedrock/runnables-streaming.test.js deleted file mode 100644 index 23969bbedf..0000000000 --- a/test/versioned/langchain/bedrock/runnables-streaming.test.js +++ /dev/null @@ -1,598 +0,0 @@ -/* - * Copyright 2024 New Relic Corporation. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -const test = require('node:test') -const assert = require('node:assert') -const path = require('node:path') - -const { removeModules } = require('../../../lib/cache-buster') -const { assertPackageMetrics, assertSegments, assertSpanKind, match } = require('../../../lib/custom-assertions') -const { - assertLangChainChatCompletionMessages, - assertLangChainChatCompletionSummary, - filterLangchainEvents, - filterLangchainEventsByType -} = require('../common') -const { version: pkgVersion } = require('@langchain/core/package.json') -const { FAKE_CREDENTIALS, getAiResponseServer } = require('../../../lib/aws-server-stubs') -const helper = require('../../../lib/agent_helper') - -const config = { - ai_monitoring: { - enabled: true, - streaming: { - enabled: true - } - } -} -const { DESTINATIONS } = require('../../../../lib/config/attribute-filter') -const createAiResponseServer = getAiResponseServer(path.join(__dirname, '../')) - -function consumeStreamChunk() { - // A no-op function used to consume chunks of a stream. -} - -async function beforeEach({ enabled, ctx }) { - ctx.nr = {} - const { server, baseUrl } = await createAiResponseServer() - ctx.nr.server = server - ctx.nr.agent = helper.instrumentMockedAgent(config) - ctx.nr.agent.config.ai_monitoring.streaming.enabled = enabled - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const { StringOutputParser } = require('@langchain/core/output_parsers') - const { ChatBedrockConverse } = require('@langchain/aws') - const { BedrockRuntimeClient } = require('@aws-sdk/client-bedrock-runtime') - - // Create the BedrockRuntimeClient with our mock endpoint - const bedrockClient = new BedrockRuntimeClient({ - region: 'us-east-1', - credentials: FAKE_CREDENTIALS, - endpoint: baseUrl, - maxAttempts: 1 - }) - - ctx.nr.prompt = ChatPromptTemplate.fromMessages([['assistant', 'text converse ultimate question {topic}']]) - ctx.nr.model = new ChatBedrockConverse({ - streaming: true, - model: 'anthropic.claude-instant-v1', - region: 'us-east-1', - client: bedrockClient - }) - ctx.nr.outputParser = new StringOutputParser() -} - -async function afterEach(ctx) { - ctx.nr?.server?.destroy() - helper.unloadAgent(ctx.nr.agent) - // bust the require-cache so it can re-instrument - removeModules(['@langchain/core', '@langchain/aws', '@aws-sdk']) -} - -test('streaming enabled', async (t) => { - t.beforeEach((ctx) => beforeEach({ enabled: true, ctx })) - t.afterEach((ctx) => afterEach(ctx)) - - await t.test('should log tracking metrics', function(t) { - const { agent } = t.nr - const { version } = require('@langchain/core/package.json') - assertPackageMetrics({ agent, pkg: '@langchain/core', version }) - }) - - await t.test('should create langchain events for every stream call', (t, end) => { - const { agent, prompt, model, outputParser } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - let content = '' - for await (const chunk of stream) { - content += chunk - } - - assert.equal(content, 'This is a test.') - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 6, 'should create 6 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - - assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') - - tx.end() - end() - }) - }) - - await t.test( - 'should increment tracking metric for each langchain chat prompt event', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - } - - const metrics = agent.metrics.getOrCreateMetric( - `Supportability/Nodejs/ML/Langchain/${pkgVersion}` - ) - assert.equal(metrics.callCount > 0, true) - - tx.end() - end() - }) - } - ) - - await t.test( - 'should create langchain events for every stream call on chat prompt + model + parser', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'streamed' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input, options) - let content = '' - for await (const chunk of stream) { - content += chunk - } - assert.equal(content, 'This is a test.') - - const events = agent.customEventAggregator.events.toArray() - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"streamed"}', - output: content - }) - - tx.end() - end() - }) - } - ) - - await t.test( - 'should create langchain events for every stream call on chat prompt + model', - (t, end) => { - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'streamed' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model) - const stream = await chain.stream(input, options) - let content = '' - for await (const chunk of stream) { - // No parser, so have to look at content for text string - content += chunk?.content - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"streamed"}', - output: content - }) - - tx.end() - end() - }) - } - ) - - await t.test( - 'should create langchain events for every stream call with parser that returns an array as output', - (t, end) => { - const { CommaSeparatedListOutputParser } = require('@langchain/core/output_parsers') - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const parser = new CommaSeparatedListOutputParser() - - const input = { topic: 'streamed' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(parser) - const stream = await chain.stream(input, options) - let content = '' - for await (const chunk of stream) { - content += chunk - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"streamed"}', - output: content - }) - - tx.end() - end() - }) - } - ) - - await t.test('should add runId when a callback handler exists', (t, end) => { - const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') - let runId - const cbHandler = BaseCallbackHandler.fromMethods({ - handleChainStart(...args) { - runId = args?.[2] - } - }) - - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'streamed' } - const options = { - metadata: { key: 'value', hello: 'world' }, - callbacks: [cbHandler], - tags: ['tag1', 'tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input, options) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - assert.equal(langchainEvents[0][1].request_id, runId) - - tx.end() - end() - }) - }) - - await t.test( - 'should create langchain events for every stream call on chat prompt + model + parser with callback', - (t, end) => { - const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') - const cbHandler = BaseCallbackHandler.fromMethods({ - handleChainStart() {} - }) - - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'streamed' } - const options = { - metadata: { key: 'value', hello: 'world' }, - callbacks: [cbHandler], - tags: ['tag1', 'tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input, options) - - let content = '' - for await (const chunk of stream) { - content += chunk - } - assert.equal(content, 'This is a test.') - - const events = agent.customEventAggregator.events.toArray() - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0], - withCallback: cbHandler - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - withCallback: cbHandler, - input: '{"topic":"streamed"}', - output: content - }) - - tx.end() - end() - }) - } - ) - - await t.test('should not create langchain events when not in a transaction', async (t) => { - const { agent, prompt, outputParser, model } = t.nr - - const input = { topic: 'streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create langchain events') - }) - - await t.test('should add llm attribute to transaction', (t, end) => { - const { agent, prompt, model } = t.nr - - const input = { topic: 'streamed' } - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) - assert.equal(attributes.llm, true) - - tx.end() - end() - }) - }) - - await t.test('should create span on successful runnables create', (t, end) => { - const { agent, prompt, model } = t.nr - - const input = { topic: 'streamed' } - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - assertSegments(tx.trace, tx.trace.root, ['Llm/chain/Langchain/stream'], { exact: false }) - tx.end() - assertSpanKind({ agent, segments: [{ name: 'Llm/chain/Langchain/stream', kind: 'internal' }] }) - end() - }) - }) - - // testing JSON.stringify on request (input) during creation of LangChainCompletionMessage event - await t.test( - 'should use empty string for content property on completion message event when invalid input is used - circular reference', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'streamed' } - input.myself = input - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - - const msgEventEmptyContent = langChainMessageEvents.filter( - (event) => event[1].content === '' - ) - - assert.equal( - msgEventEmptyContent.length, - 1, - 'should have 1 event with empty content property' - ) - - tx.end() - end() - }) - } - ) - - await t.test('should create error events from input', (t, end) => { - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const prompt = ChatPromptTemplate.fromMessages([ - ['assistant', 'tell me short joke about {topic}'] - ]) - const { agent, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model).pipe(outputParser) - - try { - await chain.stream('') - } catch (error) { - assert.ok(error) - } - - // No bedrock events as it errors before talking to LLM - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 2, 'should create 2 events') - - const summary = events.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] - assert.equal(summary.error, true) - - // But, we should also get two error events: 1xLLM and 1xLangChain - const exceptions = tx.exceptions - for (const e of exceptions) { - assert.ok(e.customAttributes?.['error.message'], 'error.message should be set') - } - - tx.end() - end() - }) - }) - - await t.test('should create error events when stream fails', (t, end) => { - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const prompt = ChatPromptTemplate.fromMessages([['assistant', 'text converse ultimate question streamed error']]) - const { agent, model, outputParser } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model).pipe(outputParser) - - try { - const stream = await chain.stream({ topic: 'bad' }) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - } - } catch (error) { - assert.ok(error) - } - - // We should get 3xLangChain and 1xLLM events. - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 4, 'should create 4 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - assert.equal(langchainEvents.length, 2, 'should create 2 langchain events') - const summary = langchainEvents.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] - assert.equal(summary.error, true) - - // But, we should also get two error events: 1xLLM and 1xLangChain - const exceptions = tx.exceptions - assert.equal(exceptions.length, 2) - for (const e of exceptions) { - match(e, { - customAttributes: { - 'error.message': /Internal server error during streaming/, - completion_id: /[\w-]{36}/ - } - }) - } - tx.end() - end() - }) - }) -}) - -test('streaming disabled', async (t) => { - t.beforeEach((ctx) => beforeEach({ enabled: false, ctx })) - t.afterEach((ctx) => afterEach(ctx)) - - await t.test( - 'should not create llm events when `ai_monitoring.streaming.enabled` is false', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - let content = '' - for await (const chunk of stream) { - content += chunk - } - assert.equal(content, 'This is a test.') - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create llm events when streaming is disabled') - const metrics = agent.metrics.getOrCreateMetric( - `Supportability/Nodejs/ML/Langchain/${pkgVersion}` - ) - assert.equal(metrics.callCount > 0, true) - const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) - assert.equal(attributes.llm, true) - const streamingDisabled = agent.metrics.getOrCreateMetric( - 'Supportability/Nodejs/ML/Streaming/Disabled' - ) - assert.equal( - streamingDisabled.callCount, - 2, - 'should increment streaming disabled in both langchain and bedrock' - ) - - tx.end() - end() - }) - } - ) -}) diff --git a/test/versioned/langchain/bedrock/runnables.test.js b/test/versioned/langchain/bedrock/runnables.test.js deleted file mode 100644 index 59c5fb30c8..0000000000 --- a/test/versioned/langchain/bedrock/runnables.test.js +++ /dev/null @@ -1,456 +0,0 @@ -/* - * Copyright 2024 New Relic Corporation. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -const test = require('node:test') -const assert = require('node:assert') -const path = require('node:path') - -const { removeModules } = require('../../../lib/cache-buster') -const { assertPackageMetrics, assertSegments, assertSpanKind } = require('../../../lib/custom-assertions') -const { - assertLangChainChatCompletionMessages, - assertLangChainChatCompletionSummary, - filterLangchainEvents, - filterLangchainEventsByType -} = require('../common') -const { version: pkgVersion } = require('@langchain/core/package.json') -const { FAKE_CREDENTIALS, getAiResponseServer } = require('../../../lib/aws-server-stubs') -const helper = require('../../../lib/agent_helper') - -const config = { - ai_monitoring: { - enabled: true - } -} -const { DESTINATIONS } = require('../../../../lib/config/attribute-filter') -const createAiResponseServer = getAiResponseServer(path.join(__dirname, '../')) - -test.beforeEach(async (ctx) => { - ctx.nr = {} - const { server, baseUrl } = await createAiResponseServer() - ctx.nr.server = server - ctx.nr.agent = helper.instrumentMockedAgent(config) - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const { StringOutputParser } = require('@langchain/core/output_parsers') - const { ChatBedrockConverse } = require('@langchain/aws') - const { BedrockRuntimeClient } = require('@aws-sdk/client-bedrock-runtime') - - // Create the BedrockRuntimeClient with our mock endpoint - const bedrockClient = new BedrockRuntimeClient({ - region: 'us-east-1', - credentials: FAKE_CREDENTIALS, - endpoint: baseUrl, - maxAttempts: 1 - }) - - ctx.nr.prompt = ChatPromptTemplate.fromMessages([['assistant', 'text converse ultimate {topic}']]) - ctx.nr.model = new ChatBedrockConverse({ - model: 'anthropic.claude-3-haiku-20240307-v1:0', - region: 'us-east-1', - client: bedrockClient - }) - ctx.nr.outputParser = new StringOutputParser() -}) - -test.afterEach(async (ctx) => { - ctx.nr?.server?.destroy() - helper.unloadAgent(ctx.nr.agent) - // bust the require-cache so it can re-instrument - removeModules(['@langchain/core', '@langchain/aws', '@aws-sdk']) -}) - -test('should log tracking metrics', function(t) { - const { agent } = t.nr - const { version } = require('@langchain/core/package.json') - assertPackageMetrics({ agent, pkg: '@langchain/core', version }) -}) - -test('should create langchain events for every invoke call', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 6, 'should create 6 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - - assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') - - tx.end() - end() - }) -}) - -test('should increment tracking metric for each langchain chat prompt event', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const metrics = agent.metrics.getOrCreateMetric( - `Supportability/Nodejs/ML/Langchain/${pkgVersion}` - ) - assert.equal(metrics.callCount > 0, true) - - tx.end() - end() - }) -}) - -test('should support custom attributes on the LLM events', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - const api = helper.getAgentApi() - helper.runInTransaction(agent, async (tx) => { - api.withLlmCustomAttributes({ 'llm.contextAttribute': 'someValue' }, async () => { - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - const events = agent.customEventAggregator.events.toArray() - - const [[, message]] = events - assert.equal(message['llm.contextAttribute'], 'someValue') - - tx.end() - end() - }) - }) -}) - -test('should create langchain events for every invoke call on chat prompt + model + parser', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"question"}', - output: 'This is a test.' - }) - - tx.end() - end() - }) -}) - -test('should create langchain events for every invoke call on chat prompt + model', (t, end) => { - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"question"}', - output: 'This is a test.' - }) - - tx.end() - end() - }) -}) - -test('should create langchain events for every invoke call with parser that returns an array as output', (t, end) => { - const { CommaSeparatedListOutputParser } = require('@langchain/core/output_parsers') - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const parser = new CommaSeparatedListOutputParser() - - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(parser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"question"}', - output: '["This is a test."]' - }) - - tx.end() - end() - }) -}) - -test('should add runId when a callback handler exists', (t, end) => { - const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') - let runId - const cbHandler = BaseCallbackHandler.fromMethods({ - handleChainStart(...args) { - runId = args?.[2] - } - }) - - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'question' } - const options = { - metadata: { key: 'value', hello: 'world' }, - callbacks: [cbHandler], - tags: ['tag1', 'tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - assert.equal(langchainEvents[0][1].request_id, runId) - - tx.end() - end() - }) -}) - -test('should create langchain events for every invoke call on chat prompt + model + parser with callback', (t, end) => { - const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') - const cbHandler = BaseCallbackHandler.fromMethods({ - handleChainStart() {} - }) - - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'question' } - const options = { - metadata: { key: 'value', hello: 'world' }, - callbacks: [cbHandler], - tags: ['tag1', 'tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0], - withCallback: cbHandler - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - withCallback: cbHandler, - input: '{"topic":"question"}', - output: 'This is a test.' - }) - - tx.end() - end() - }) -}) - -test('should not create langchain events when not in a transaction', async (t) => { - const { agent, prompt, outputParser, model } = t.nr - - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create langchain events') -}) - -test('should add llm attribute to transaction', (t, end) => { - const { agent, prompt, model } = t.nr - - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model) - await chain.invoke(input, options) - - const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) - assert.equal(attributes.llm, true) - - tx.end() - end() - }) -}) - -test('should create span on successful runnables create', (t, end) => { - const { agent, prompt, model } = t.nr - - const input = { topic: 'question' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model) - const result = await chain.invoke(input, options) - - assert.ok(result) - assertSegments(tx.trace, tx.trace.root, ['Llm/chain/Langchain/invoke'], { exact: false }) - tx.end() - assertSpanKind({ agent, segments: [{ name: 'Llm/chain/Langchain/invoke', kind: 'internal' }] }) - end() - }) -}) - -// testing JSON.stringify on request (input) during creation of LangChainCompletionMessage event -test('should use empty string for content property on completion message event when invalid input is used - circular reference', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'question' } - input.myself = input - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - - const msgEventEmptyContent = langChainMessageEvents.filter((event) => event[1].content === '') - - assert.equal(msgEventEmptyContent.length, 1, 'should have 1 event with empty content property') - - tx.end() - end() - }) -}) - -test('should create error events', (t, end) => { - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const prompt = ChatPromptTemplate.fromMessages([['assistant', 'text converse ultimate question error']]) - const { agent, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model).pipe(outputParser) - - try { - await chain.invoke('') - } catch (error) { - assert.ok(error) - } - - // We should still get the same 3xLangChain and 2xLLM events as in the - // success case: - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 5, 'should create 5 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') - const summary = langchainEvents.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] - assert.equal(summary.error, true) - - // But, we should also get two error events: 1xLLM and 1xLangChain - const exceptions = tx.exceptions - assert.equal(exceptions.length, 2) - for (const e of exceptions) { - assert.ok(e.customAttributes?.['error.message']) - } - - tx.end() - end() - }) -}) diff --git a/test/versioned/langchain/bedrock/vectorstore.test.js b/test/versioned/langchain/bedrock/vectorstore.test.js deleted file mode 100644 index 9644938172..0000000000 --- a/test/versioned/langchain/bedrock/vectorstore.test.js +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Copyright 2024 New Relic Corporation. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -const test = require('node:test') -const assert = require('node:assert') -const path = require('node:path') - -const { removeModules } = require('../../../lib/cache-buster') -const { assertPackageMetrics, assertSegments, assertSpanKind } = require('../../../lib/custom-assertions') -const { - assertLangChainVectorSearch, - assertLangChainVectorSearchResult, - filterLangchainEvents, - filterLangchainEventsByType -} = require('../common') -const { Document } = require('@langchain/core/documents') -const { FAKE_CREDENTIALS, getAiResponseServer } = require('../../../lib/aws-server-stubs') -const params = require('../../../lib/params') -const helper = require('../../../lib/agent_helper') - -const config = { - ai_monitoring: { - enabled: true - } -} -const { DESTINATIONS } = require('../../../../lib/config/attribute-filter') -const { tspl } = require('@matteo.collina/tspl') -const createAiResponseServer = getAiResponseServer(path.join(__dirname, '../')) - -test.beforeEach(async (ctx) => { - ctx.nr = {} - const { server, baseUrl } = await createAiResponseServer() - ctx.nr.server = server - ctx.nr.agent = helper.instrumentMockedAgent(config) - const { BedrockEmbeddings } = require('@langchain/aws') - const { BedrockRuntimeClient } = require('@aws-sdk/client-bedrock-runtime') - - const { Client } = require('@elastic/elasticsearch') - const clientArgs = { - client: new Client({ - node: `http://${params.elastic_host}:${params.elastic_port}` - }) - } - const { ElasticVectorSearch } = require('@langchain/community/vectorstores/elasticsearch') - - // Create the BedrockRuntimeClient with our mock endpoint - const bedrockClient = new BedrockRuntimeClient({ - region: 'us-east-1', - credentials: FAKE_CREDENTIALS, - endpoint: baseUrl, - maxAttempts: 1 - }) - - ctx.nr.embedding = new BedrockEmbeddings({ - model: 'amazon.titan-embed-text-v1', - region: 'us-east-1', - client: bedrockClient, - maxRetries: 0 - }) - const docs = [ - new Document({ - metadata: { id: '2' }, - pageContent: 'embed text amazon token count callback response' - }) - ] - const vectorStore = new ElasticVectorSearch(ctx.nr.embedding, clientArgs) - await vectorStore.deleteIfExists() - await vectorStore.addDocuments(docs) - ctx.nr.vs = vectorStore -}) - -test.afterEach(async (ctx) => { - ctx.nr?.server?.destroy() - helper.unloadAgent(ctx.nr.agent) - // bust the require-cache so it can re-instrument - removeModules(['@langchain/core', '@langchain/aws', '@aws-sdk', '@elastic', '@langchain/community']) -}) - -test('should log tracking metrics', function(t) { - const { agent } = t.nr - const { version } = require('@langchain/core/package.json') - assertPackageMetrics({ agent, pkg: '@langchain/core', version }) -}) - -test('should create vectorstore events for every similarity search call', (t, end) => { - const { agent, vs } = t.nr - - helper.runInNamedTransaction(agent, async (tx) => { - await vs.similaritySearch('embed text amazon token count callback response', 1) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 3, 'should create 3 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - - assert.equal(langchainEvents.length, 2, 'should create 2 langchain events') - - tx.end() - end() - }) -}) - -test('should create span on successful vectorstore create', (t, end) => { - const { agent, vs } = t.nr - helper.runInTransaction(agent, async (tx) => { - const result = await vs.similaritySearch('embed text amazon token count callback response', 1) - assert.ok(result) - assertSegments(tx.trace, tx.trace.root, ['Llm/vectorstore/Langchain/similaritySearch'], { - exact: false - }) - tx.end() - assertSpanKind({ agent, segments: [{ name: 'Llm/vectorstore/Langchain/similaritySearch', kind: 'internal' }] }) - end() - }) -}) - -test('should increment tracking metric for each langchain vectorstore event', async (t) => { - const plan = tspl(t, { plan: 1 }) - const { agent, vs } = t.nr - - await helper.runInTransaction(agent, async (tx) => { - await vs.similaritySearch('embed text amazon token count callback response', 1) - - // `@langchain/community` and `@langchain/aws` have diverged on the `@langchain/core` - // version. Find the right one that has a call count - - for (const metric in agent.metrics._metrics.unscoped) { - if (metric.startsWith('Supportability/Nodejs/ML/Langchain')) { - plan.equal(agent.metrics._metrics.unscoped[metric].callCount > 0, true) - } - } - tx.end() - }) - await plan.completed -}) - -test('should create vectorstore events for every similarity search call with embeddings', (t, end) => { - const { agent, vs } = t.nr - - helper.runInNamedTransaction(agent, async (tx) => { - await vs.similaritySearch('embed text amazon token count callback response', 1) - - const events = agent.customEventAggregator.events.toArray() - const langchainEvents = filterLangchainEvents(events) - - const vectorSearchResultEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmVectorSearchResult' - ) - - const vectorSearchEvents = filterLangchainEventsByType(langchainEvents, 'LlmVectorSearch') - - assertLangChainVectorSearch({ - tx, - vectorSearch: vectorSearchEvents[0], - responseDocumentSize: 1, - expectedQuery: 'embed text amazon token count callback response' - }) - assertLangChainVectorSearchResult({ - tx, - vectorSearchResult: vectorSearchResultEvents, - vectorSearchId: vectorSearchEvents[0][1].id, - expectedPageContent: 'embed text amazon token count callback response' - }) - - tx.end() - end() - }) -}) - -test('should create only vectorstore search event for similarity search call with embeddings and invalid metadata filter', (t, end) => { - const { agent, vs } = t.nr - - helper.runInNamedTransaction(agent, async (tx) => { - // search for documents with invalid filter - await vs.similaritySearch('embed text amazon token count callback response', 1, { - a: 'some filter' - }) - - const events = agent.customEventAggregator.events.toArray() - const langchainEvents = filterLangchainEvents(events) - - const vectorSearchResultEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmVectorSearchResult' - ) - - const vectorSearchEvents = filterLangchainEventsByType(langchainEvents, 'LlmVectorSearch') - - // there are no documents in vector store with that filter - assert.equal(vectorSearchResultEvents.length, 0, 'should have 0 events') - assertLangChainVectorSearch({ - tx, - vectorSearch: vectorSearchEvents[0], - responseDocumentSize: 0, - expectedQuery: 'embed text amazon token count callback response' - }) - - tx.end() - end() - }) -}) - -test('should not create vectorstore events when not in a transaction', async (t) => { - const { agent, vs } = t.nr - - await vs.similaritySearch('embed text amazon token count callback response', 1) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create vectorstore events') -}) - -test('should add llm attribute to transaction', (t, end) => { - const { agent, vs } = t.nr - - helper.runInTransaction(agent, async (tx) => { - await vs.similaritySearch('embed text amazon token count callback response', 1) - - const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) - assert.equal(attributes.llm, true) - - tx.end() - end() - }) -}) - -test('should create error events', (t, end) => { - const { agent, vs } = t.nr - - helper.runInNamedTransaction(agent, async (tx) => { - try { - await vs.similaritySearch('Embedding not allowed.', 1) - } catch (error) { - assert.ok(error) - } - - const events = agent.customEventAggregator.events.toArray() - // Only LlmEmbedding and LlmVectorSearch events will be created - // LangChainVectorSearchResult event won't be created since there was an error - assert.equal(events.length, 2, 'should create 2 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - - assert.equal(langchainEvents.length, 1, 'should create 1 langchain vectorsearch event') - assert.equal(langchainEvents[0][1].error, true) - - // But, we should also get two error events: 1xLLM and 1xLangChain - const exceptions = tx.exceptions - for (const e of exceptions) { - assert.ok(e?.customAttributes?.['error.message']) - } - - tx.end() - end() - }) -}) diff --git a/test/versioned/langchain/helpers/custom-tool.js b/test/versioned/langchain/custom-tool.js similarity index 100% rename from test/versioned/langchain/helpers/custom-tool.js rename to test/versioned/langchain/custom-tool.js diff --git a/test/versioned/langchain/openai/runnables-streaming.test.js b/test/versioned/langchain/openai/runnables-streaming.test.js deleted file mode 100644 index 6581fb042b..0000000000 --- a/test/versioned/langchain/openai/runnables-streaming.test.js +++ /dev/null @@ -1,763 +0,0 @@ -/* - * Copyright 2024 New Relic Corporation. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -const test = require('node:test') -const assert = require('node:assert') - -const { removeModules } = require('../../../lib/cache-buster') -const { assertPackageMetrics, assertSegments, assertSpanKind, match } = require('../../../lib/custom-assertions') -const { findSegment } = require('../../../lib/metrics_helper') -const { - assertLangChainChatCompletionMessages, - assertLangChainChatCompletionSummary, - filterLangchainEvents, - filterLangchainEventsByType -} = require('../common') -const { version: pkgVersion } = require('@langchain/core/package.json') -const createOpenAIMockServer = require('../../openai/mock-server') -const mockResponses = require('../../openai/mock-chat-api-responses') -const helper = require('../../../lib/agent_helper') - -const config = { - ai_monitoring: { - enabled: true, - streaming: { - enabled: true - } - } -} -const { DESTINATIONS } = require('../../../../lib/config/attribute-filter') - -function consumeStreamChunk() { - // A no-op function used to consume chunks of a stream. -} - -async function beforeEach({ enabled, ctx }) { - ctx.nr = {} - const { host, port, server } = await createOpenAIMockServer() - ctx.nr.server = server - ctx.nr.agent = helper.instrumentMockedAgent(config) - ctx.nr.agent.config.ai_monitoring.streaming.enabled = enabled - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const { StringOutputParser } = require('@langchain/core/output_parsers') - const { ChatOpenAI } = require('@langchain/openai') - - ctx.nr.prompt = ChatPromptTemplate.fromMessages([['assistant', '{topic} response']]) - ctx.nr.model = new ChatOpenAI({ - streaming: true, - apiKey: 'fake-key', - maxRetries: 0, - configuration: { - baseURL: `http://${host}:${port}` - } - }) - ctx.nr.outputParser = new StringOutputParser() -} - -async function afterEach(ctx) { - ctx.nr?.server?.close() - helper.unloadAgent(ctx.nr.agent) - // bust the require-cache so it can re-instrument - removeModules(['@langchain/core', 'openai']) -} - -test('streaming enabled', async (t) => { - t.beforeEach((ctx) => beforeEach({ enabled: true, ctx })) - t.afterEach((ctx) => afterEach(ctx)) - - await t.test('should log tracking metrics', function(t) { - const { agent } = t.nr - const { version } = require('@langchain/core/package.json') - assertPackageMetrics({ agent, pkg: '@langchain/core', version }) - }) - - await t.test('should create langchain events for every stream call', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - let content = '' - for await (const chunk of stream) { - content += chunk - } - - const { streamData: expectedContent } = mockResponses.get('Streamed response') - assert.equal(content, expectedContent) - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 6, 'should create 6 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - - assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') - - tx.end() - end() - }) - }) - - await t.test( - 'should increment tracking metric for each langchain chat prompt event', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - } - - const metrics = agent.metrics.getOrCreateMetric( - `Supportability/Nodejs/ML/Langchain/${pkgVersion}` - ) - assert.equal(metrics.callCount > 0, true) - - tx.end() - end() - }) - } - ) - - await t.test( - 'should create langchain events for every stream call on chat prompt + model + parser', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input, options) - let content = '' - for await (const chunk of stream) { - content += chunk - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"Streamed"}', - output: content - }) - - tx.end() - end() - }) - } - ) - - await t.test( - 'should create langchain events for every stream call on chat prompt + model', - (t, end) => { - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model) - const stream = await chain.stream(input, options) - let content = '' - for await (const chunk of stream) { - // Have to look at content because there's no parser - content += chunk?.content - } - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"Streamed"}', - output: content - }) - - tx.end() - end() - }) - } - ) - - await t.test( - 'should create langchain events for every stream call with parser that returns an array as output', - (t, end) => { - const { CommaSeparatedListOutputParser } = require('@langchain/core/output_parsers') - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const parser = new CommaSeparatedListOutputParser() - - const input = { topic: 'Streamed' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(parser) - const stream = await chain.stream(input, options) - let content = '' - for await (const chunk of stream) { - content += chunk - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - input: '{"topic":"Streamed"}', - output: content - }) - - tx.end() - end() - }) - } - ) - - await t.test('should add runId when a callback handler exists', (t, end) => { - const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') - let runId - const cbHandler = BaseCallbackHandler.fromMethods({ - handleChainStart(...args) { - runId = args?.[2] - } - }) - - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - const options = { - metadata: { key: 'value', hello: 'world' }, - callbacks: [cbHandler], - tags: ['tag1', 'tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input, options) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - assert.equal(langchainEvents[0][1].request_id, runId) - - tx.end() - end() - }) - }) - - await t.test( - 'should create langchain events for every stream call on chat prompt + model + parser with callback', - (t, end) => { - const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') - const cbHandler = BaseCallbackHandler.fromMethods({ - handleChainStart() {} - }) - - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - const options = { - metadata: { key: 'value', hello: 'world' }, - callbacks: [cbHandler], - tags: ['tag1', 'tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input, options) - - let content = '' - for await (const chunk of stream) { - content += chunk - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0], - withCallback: cbHandler - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - withCallback: cbHandler, - input: '{"topic":"Streamed"}', - output: content - }) - - tx.end() - end() - }) - } - ) - - await t.test('should not create langchain events when not in a transaction', async (t) => { - const { agent, prompt, outputParser, model } = t.nr - - const input = { topic: 'Streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create langchain events') - }) - - await t.test('should add llm attribute to transaction', (t, end) => { - const { agent, prompt, model } = t.nr - - const input = { topic: 'Streamed' } - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) - assert.equal(attributes.llm, true) - - tx.end() - end() - }) - }) - - await t.test('should create span on successful runnables create', (t, end) => { - const { agent, prompt, model } = t.nr - - const input = { topic: 'Streamed' } - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - assertSegments(tx.trace, tx.trace.root, ['Llm/chain/Langchain/stream'], { exact: false }) - tx.end() - assertSpanKind({ agent, segments: [{ name: 'Llm/chain/Langchain/stream', kind: 'internal' }] }) - end() - }) - }) - - // testing JSON.stringify on request (input) during creation of LangChainCompletionMessage event - await t.test( - 'should use empty string for content property on completion message event when invalid input is used - circular reference', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - input.myself = input - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - - const msgEventEmptyContent = langChainMessageEvents.filter( - (event) => event[1].content === '' - ) - - assert.equal( - msgEventEmptyContent.length, - 1, - 'should have 1 event with empty content property' - ) - - tx.end() - end() - }) - } - ) - - await t.test('should create error events from input', (t, end) => { - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const prompt = ChatPromptTemplate.fromMessages([ - ['assistant', 'tell me short joke about {topic}'] - ]) - const { agent, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model).pipe(outputParser) - - try { - await chain.stream('') - } catch (error) { - assert.ok(error) - } - - // No openai events as it errors before talking to LLM - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 2, 'should create 2 events') - - const summary = events.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] - assert.equal(summary.error, true) - - // But, we should also get two error events: 1xLLM and 1xLangChain - const exceptions = tx.exceptions - for (const e of exceptions) { - const str = Object.prototype.toString.call(e.customAttributes) - assert.equal(str, '[object LlmErrorMessage]') - } - - tx.end() - end() - }) - }) - - await t.test('should create error events when stream fails', (t, end) => { - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const prompt = ChatPromptTemplate.fromMessages([['assistant', '{topic} stream']]) - const { agent, model, outputParser } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model).pipe(outputParser) - - try { - const stream = await chain.stream({ topic: 'bad' }) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - // no-op - } - } catch (error) { - assert.ok(error) - } - - // We should still get the same 3xLangChain and 3xLLM events as in the - // success case: - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 6, 'should create 6 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') - const summary = langchainEvents.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] - assert.equal(summary.error, true) - - // But, we should also get two error events: 1xLLM and 1xLangChain - const exceptions = tx.exceptions - for (const e of exceptions) { - // skip the socket error as it is not related to LLM - // this started occurring when openai used undici as the HTTP client - if (e.error.code === 'UND_ERR_SOCKET') { - continue - } - const str = Object.prototype.toString.call(e.customAttributes) - assert.equal(str, '[object LlmErrorMessage]') - match(e, { - customAttributes: { - 'error.message': /(?:Premature close)|(?:terminated)/, - completion_id: /\w{32}/ - } - }) - } - tx.end() - end() - }) - }) -}) - -test('streaming disabled', async (t) => { - t.beforeEach((ctx) => beforeEach({ enabled: false, ctx })) - t.afterEach((ctx) => afterEach(ctx)) - - await t.test( - 'should not create llm events when `ai_monitoring.streaming.enabled` is false', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - let content = '' - for await (const chunk of stream) { - content += chunk - } - - const { streamData: expectedContent } = mockResponses.get('Streamed response') - assert.equal(content, expectedContent) - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create llm events when streaming is disabled') - const metrics = agent.metrics.getOrCreateMetric( - `Supportability/Nodejs/ML/Langchain/${pkgVersion}` - ) - assert.equal(metrics.callCount > 0, true) - const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) - assert.equal(attributes.llm, true) - const streamingDisabled = agent.metrics.getOrCreateMetric( - 'Supportability/Nodejs/ML/Streaming/Disabled' - ) - assert.equal( - streamingDisabled.callCount, - 2, - 'should increment streaming disabled in both langchain and openai' - ) - - tx.end() - end() - }) - } - ) -}) - -test('ai_monitoring disabled', async (t) => { - t.beforeEach((ctx) => beforeEach({ enabled: true, ctx })) - t.afterEach((ctx) => afterEach(ctx)) - - await t.test( - 'should not create llm events when `ai_monitoring.enabled` is false', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - agent.config.ai_monitoring.enabled = false - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - let content = '' - for await (const chunk of stream) { - content += chunk - } - - const { streamData: expectedContent } = mockResponses.get('Streamed response') - assert.equal(content, expectedContent) - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled') - - tx.end() - end() - }) - } - ) - - await t.test( - 'should not create segment when `ai_monitoring.enabled` is false', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - agent.config.ai_monitoring.enabled = false - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - } - - const segment = findSegment(tx.trace, tx.trace.root, 'Llm/chain/Langchain/stream') - assert.equal(segment, undefined, 'should not create Llm/chain/Langchain/stream segment when ai_monitoring is disabled') - - tx.end() - end() - }) - } - ) -}) - -test('both ai_monitoring and streaming disabled', async (t) => { - t.beforeEach((ctx) => beforeEach({ enabled: false, ctx })) - t.afterEach((ctx) => afterEach(ctx)) - - await t.test( - 'should not create llm events when both `ai_monitoring.enabled` and `ai_monitoring.streaming.enabled` are false', - (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - agent.config.ai_monitoring.enabled = false - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - let content = '' - for await (const chunk of stream) { - content += chunk - } - - const { streamData: expectedContent } = mockResponses.get('Streamed response') - assert.equal(content, expectedContent) - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create llm events when both configs are disabled') - - tx.end() - end() - }) - } - ) -}) - -test('streaming enabled - edge cases', async (t) => { - t.beforeEach((ctx) => beforeEach({ enabled: true, ctx })) - t.afterEach((ctx) => afterEach(ctx)) - - await t.test( - 'should handle metadata properly during stream processing', - (t, end) => { - const { agent, prompt, model, outputParser } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - const options = { - metadata: { streamKey: 'streamValue', anotherKey: 'anotherValue' }, - tags: ['stream-tag1', 'stream-tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input, options) - for await (const chunk of stream) { - consumeStreamChunk(chunk) - } - - const events = agent.customEventAggregator.events.toArray() - const langchainEvents = filterLangchainEvents(events) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - const [[, summary]] = langChainSummaryEvents - assert.equal(summary['metadata.streamKey'], 'streamValue') - assert.equal(summary['metadata.anotherKey'], 'anotherValue') - - const tags = summary.tags.split(',') - assert.ok(tags.includes('stream-tag1')) - assert.ok(tags.includes('stream-tag2')) - - tx.end() - end() - }) - } - ) - - await t.test( - 'should properly extend segment duration on each stream iteration', - (t, end) => { - const { agent, prompt, model, outputParser } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'Streamed' } - - const chain = prompt.pipe(model).pipe(outputParser) - const stream = await chain.stream(input) - - const [segment] = tx.trace.getChildren(tx.trace.root.id) - assert.equal(segment.name, 'Llm/chain/Langchain/stream', 'should find the Langchain stream segment') - - let chunkCount = 0 - for await (const chunk of stream) { - consumeStreamChunk(chunk) - chunkCount++ - } - - // Segment should have been touched multiple times during streaming - assert.ok(chunkCount > 1, 'should have received multiple chunks') - assert.ok(segment.timer.hrDuration) - - tx.end() - end() - }) - } - ) -}) diff --git a/test/versioned/langchain/openai/runnables.test.js b/test/versioned/langchain/openai/runnables.test.js deleted file mode 100644 index 4713dc0f56..0000000000 --- a/test/versioned/langchain/openai/runnables.test.js +++ /dev/null @@ -1,511 +0,0 @@ -/* - * Copyright 2024 New Relic Corporation. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -const test = require('node:test') -const assert = require('node:assert') - -const { removeModules } = require('../../../lib/cache-buster') -const { assertPackageMetrics, assertSegments, assertSpanKind } = require('../../../lib/custom-assertions') -const { findSegment } = require('../../../lib/metrics_helper') -const { - assertLangChainChatCompletionMessages, - assertLangChainChatCompletionSummary, - filterLangchainEvents, - filterLangchainEventsByType -} = require('../common') -const { version: pkgVersion } = require('@langchain/core/package.json') -const createOpenAIMockServer = require('../../openai/mock-server') -const helper = require('../../../lib/agent_helper') - -const config = { - ai_monitoring: { - enabled: true - } -} -const { DESTINATIONS } = require('../../../../lib/config/attribute-filter') - -test.beforeEach(async (ctx) => { - ctx.nr = {} - const { host, port, server } = await createOpenAIMockServer() - ctx.nr.server = server - ctx.nr.agent = helper.instrumentMockedAgent(config) - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const { StringOutputParser } = require('@langchain/core/output_parsers') - const { ChatOpenAI } = require('@langchain/openai') - - ctx.nr.prompt = ChatPromptTemplate.fromMessages([['assistant', 'You are a {topic}.']]) - ctx.nr.model = new ChatOpenAI({ - apiKey: 'fake-key', - maxRetries: 0, - configuration: { - baseURL: `http://${host}:${port}` - } - }) - ctx.nr.outputParser = new StringOutputParser() -}) - -test.afterEach(async (ctx) => { - ctx.nr?.server?.close() - helper.unloadAgent(ctx.nr.agent) - // bust the require-cache so it can re-instrument - removeModules(['@langchain/core', 'openai']) -}) - -test('should log tracking metrics', function(t) { - const { agent } = t.nr - const { version } = require('@langchain/core/package.json') - assertPackageMetrics({ agent, pkg: '@langchain/core', version }) -}) - -test('should create langchain events for every invoke call', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - const result = await chain.invoke(input, options) - assert.ok(result) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 6, 'should create 6 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - - assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') - - tx.end() - end() - }) -}) - -test('should increment tracking metric for each langchain chat prompt event', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const metrics = agent.metrics.getOrCreateMetric( - `Supportability/Nodejs/ML/Langchain/${pkgVersion}` - ) - assert.equal(metrics.callCount > 0, true) - - tx.end() - end() - }) -}) - -test('should support custom attributes on the LLM events', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - const api = helper.getAgentApi() - helper.runInTransaction(agent, async (tx) => { - api.withLlmCustomAttributes({ 'llm.contextAttribute': 'someValue' }, async () => { - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - const events = agent.customEventAggregator.events.toArray() - - const [[, message]] = events - assert.equal(message['llm.contextAttribute'], 'someValue') - - tx.end() - end() - }) - }) -}) - -test('should create langchain events for every invoke call on chat prompt + model + parser', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1] - }) - - tx.end() - end() - }) -}) - -test('should create langchain events for every invoke call on chat prompt + model', (t, end) => { - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1] - }) - - tx.end() - end() - }) -}) - -test('should create langchain events for every invoke call with parser that returns an array as output', (t, end) => { - const { CommaSeparatedListOutputParser } = require('@langchain/core/output_parsers') - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const parser = new CommaSeparatedListOutputParser() - - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(parser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0] - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - output: '["212 degrees Fahrenheit is equal to 100 degrees Celsius."]' - }) - - tx.end() - end() - }) -}) - -test('should add runId when a callback handler exists', (t, end) => { - const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') - let runId - const cbHandler = BaseCallbackHandler.fromMethods({ - handleChainStart(...args) { - runId = args?.[2] - } - }) - - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const options = { - metadata: { key: 'value', hello: 'world' }, - callbacks: [cbHandler], - tags: ['tag1', 'tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - assert.equal(langchainEvents[0][1].request_id, runId) - - tx.end() - end() - }) -}) - -test('should create langchain events for every invoke call on chat prompt + model + parser with callback', (t, end) => { - const { BaseCallbackHandler } = require('@langchain/core/callbacks/base') - const cbHandler = BaseCallbackHandler.fromMethods({ - handleChainStart() {} - }) - - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const options = { - metadata: { key: 'value', hello: 'world' }, - callbacks: [cbHandler], - tags: ['tag1', 'tag2'] - } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - assertLangChainChatCompletionSummary({ - tx, - chatSummary: langChainSummaryEvents[0], - withCallback: cbHandler - }) - - assertLangChainChatCompletionMessages({ - tx, - chatMsgs: langChainMessageEvents, - chatSummary: langChainSummaryEvents[0][1], - withCallback: cbHandler - }) - - tx.end() - end() - }) -}) - -test('should not create langchain events when not in a transaction', async (t) => { - const { agent, prompt, outputParser, model } = t.nr - - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create langchain events') -}) - -test('should add llm attribute to transaction', (t, end) => { - const { agent, prompt, model } = t.nr - - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model) - await chain.invoke(input, options) - - const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) - assert.equal(attributes.llm, true) - - tx.end() - end() - }) -}) - -test('should create span on successful runnables create', (t, end) => { - const { agent, prompt, model } = t.nr - - const input = { topic: 'scientist' } - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model) - const result = await chain.invoke(input, options) - - assert.ok(result) - assertSegments(tx.trace, tx.trace.root, ['Llm/chain/Langchain/invoke'], { exact: false }) - tx.end() - assertSpanKind({ agent, segments: [{ name: 'Llm/chain/Langchain/invoke', kind: 'internal' }] }) - end() - }) -}) - -// testing JSON.stringify on request (input) during creation of LangChainCompletionMessage event -test('should use empty string for content property on completion message event when invalid input is used - circular reference', (t, end) => { - const { agent, prompt, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - input.myself = input - const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } - - const chain = prompt.pipe(model).pipe(outputParser) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - - const langchainEvents = filterLangchainEvents(events) - const langChainMessageEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionMessage' - ) - - const msgEventEmptyContent = langChainMessageEvents.filter((event) => event[1].content === '') - - assert.equal(msgEventEmptyContent.length, 1, 'should have 1 event with empty content property') - - tx.end() - end() - }) -}) - -test('should create error events', (t, end) => { - const { ChatPromptTemplate } = require('@langchain/core/prompts') - const prompt = ChatPromptTemplate.fromMessages([['assistant', 'Invalid API key.']]) - const { agent, outputParser, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const chain = prompt.pipe(model).pipe(outputParser) - - try { - await chain.invoke('') - } catch (error) { - assert.ok(error) - } - - // We should still get the same 3xLangChain and 3xLLM events as in the - // success case: - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 6, 'should create 6 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') - const summary = langchainEvents.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] - assert.equal(summary.error, true) - - // But, we should also get two error events: 1xLLM and 1xLangChain - const exceptions = tx.exceptions - for (const e of exceptions) { - const str = Object.prototype.toString.call(e.customAttributes) - assert.equal(str, '[object LlmErrorMessage]') - } - - tx.end() - end() - }) -}) - -test('should not create llm runnable events when ai_monitoring is disabled', (t, end) => { - const { agent, prompt, model } = t.nr - agent.config.ai_monitoring.enabled = false - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const chain = prompt.pipe(model) - await chain.invoke(input) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled') - - tx.end() - end() - }) -}) - -test('should not create segment when ai_monitoring is disabled', (t, end) => { - const { agent, prompt, model } = t.nr - agent.config.ai_monitoring.enabled = false - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const chain = prompt.pipe(model) - const result = await chain.invoke(input) - assert.ok(result, 'should not mess up result') - - const segment = findSegment(tx.trace, tx.trace.root, 'Llm/chain/Langchain/stream') - assert.equal(segment, undefined, 'should not create Llm/chain/Langchain/stream segment when ai_monitoring is disabled') - - tx.end() - end() - }) -}) - -test('should handle metadata and tags properly', (t, end) => { - const { agent, prompt, model } = t.nr - - helper.runInTransaction(agent, async (tx) => { - const input = { topic: 'scientist' } - const options = { - metadata: { customKey: 'customValue', anotherKey: 'anotherValue' }, - tags: ['custom-tag1', 'custom-tag2', 'custom-tag3'] - } - - const chain = prompt.pipe(model) - await chain.invoke(input, options) - - const events = agent.customEventAggregator.events.toArray() - const langchainEvents = filterLangchainEvents(events) - const langChainSummaryEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmChatCompletionSummary' - ) - - const [[, summary]] = langChainSummaryEvents - assert.equal(summary['metadata.customKey'], 'customValue') - assert.equal(summary['metadata.anotherKey'], 'anotherValue') - - const tags = summary.tags.split(',') - assert.ok(tags.includes('custom-tag1')) - assert.ok(tags.includes('custom-tag2')) - assert.ok(tags.includes('custom-tag3')) - - tx.end() - end() - }) -}) diff --git a/test/versioned/langchain/openai/vectorstore.test.js b/test/versioned/langchain/openai/vectorstore.test.js deleted file mode 100644 index 4f3030cb16..0000000000 --- a/test/versioned/langchain/openai/vectorstore.test.js +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright 2024 New Relic Corporation. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict' - -const test = require('node:test') -const assert = require('node:assert') - -const { removeModules } = require('../../../lib/cache-buster') -const { assertPackageMetrics, assertSegments, assertSpanKind } = require('../../../lib/custom-assertions') -const { findSegment } = require('../../../lib/metrics_helper') -const { - assertLangChainVectorSearch, - assertLangChainVectorSearchResult, - filterLangchainEvents, - filterLangchainEventsByType -} = require('../common') -const { Document } = require('@langchain/core/documents') -const createOpenAIMockServer = require('../../openai/mock-server') -const params = require('../../../lib/params') -const helper = require('../../../lib/agent_helper') - -const config = { - ai_monitoring: { - enabled: true - } -} -const { DESTINATIONS } = require('../../../../lib/config/attribute-filter') -const { tspl } = require('@matteo.collina/tspl') - -test.beforeEach(async (ctx) => { - ctx.nr = {} - const { host, port, server } = await createOpenAIMockServer() - ctx.nr.server = server - ctx.nr.agent = helper.instrumentMockedAgent(config) - const { OpenAIEmbeddings } = require('@langchain/openai') - - const { Client } = require('@elastic/elasticsearch') - const clientArgs = { - client: new Client({ - node: `http://${params.elastic_host}:${params.elastic_port}` - }) - } - const { ElasticVectorSearch } = require('@langchain/community/vectorstores/elasticsearch') - - ctx.nr.embedding = new OpenAIEmbeddings({ - apiKey: 'fake-key', - configuration: { - baseURL: `http://${host}:${port}` - } - }) - const docs = [ - new Document({ - metadata: { id: '2' }, - pageContent: 'This is an embedding test.' - }) - ] - const vectorStore = new ElasticVectorSearch(ctx.nr.embedding, clientArgs) - await vectorStore.deleteIfExists() - await vectorStore.addDocuments(docs) - ctx.nr.vs = vectorStore -}) - -test.afterEach(async (ctx) => { - ctx.nr?.server?.close() - helper.unloadAgent(ctx.nr.agent) - // bust the require-cache so it can re-instrument - removeModules(['@langchain/core', 'openai', '@elastic', '@langchain/community']) -}) - -test('should log tracking metrics', function(t) { - const { agent } = t.nr - const { version } = require('@langchain/core/package.json') - assertPackageMetrics({ agent, pkg: '@langchain/core', version }) -}) - -test('should create vectorstore events for every similarity search call', (t, end) => { - const { agent, vs } = t.nr - - helper.runInNamedTransaction(agent, async (tx) => { - await vs.similaritySearch('This is an embedding test.', 1) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 3, 'should create 3 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - - assert.equal(langchainEvents.length, 2, 'should create 2 langchain events') - - tx.end() - end() - }) -}) - -test('should create span on successful vectorstore create', (t, end) => { - const { agent, vs } = t.nr - helper.runInTransaction(agent, async (tx) => { - const result = await vs.similaritySearch('This is an embedding test.', 1) - assert.ok(result) - assertSegments(tx.trace, tx.trace.root, ['Llm/vectorstore/Langchain/similaritySearch'], { - exact: false - }) - tx.end() - assertSpanKind({ agent, segments: [{ name: 'Llm/vectorstore/Langchain/similaritySearch', kind: 'internal' }] }) - end() - }) -}) - -test('should increment tracking metric for each langchain vectorstore event', async (t) => { - const plan = tspl(t, { plan: 1 }) - const { agent, vs } = t.nr - - await helper.runInTransaction(agent, async (tx) => { - await vs.similaritySearch('This is an embedding test.', 1) - - // `@langchain/community` and `@langchain/openai` have diverged on the `@langchain/core` - // version. Find the right one that has a call count - - for (const metric in agent.metrics._metrics.unscoped) { - if (metric.startsWith('Supportability/Nodejs/ML/Langchain')) { - plan.equal(agent.metrics._metrics.unscoped[metric].callCount > 0, true) - } - } - tx.end() - }) - await plan.completed -}) - -test('should create vectorstore events for every similarity search call with embeddings', (t, end) => { - const { agent, vs } = t.nr - - helper.runInNamedTransaction(agent, async (tx) => { - await vs.similaritySearch('This is an embedding test.', 1) - - const events = agent.customEventAggregator.events.toArray() - const langchainEvents = filterLangchainEvents(events) - - const vectorSearchResultEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmVectorSearchResult' - ) - - const vectorSearchEvents = filterLangchainEventsByType(langchainEvents, 'LlmVectorSearch') - - assertLangChainVectorSearch({ - tx, - vectorSearch: vectorSearchEvents[0], - responseDocumentSize: 1 - }) - assertLangChainVectorSearchResult({ - tx, - vectorSearchResult: vectorSearchResultEvents, - vectorSearchId: vectorSearchEvents[0][1].id - }) - - tx.end() - end() - }) -}) - -test('should create only vectorstore search event for similarity search call with embeddings and invalid metadata filter', (t, end) => { - const { agent, vs } = t.nr - - helper.runInNamedTransaction(agent, async (tx) => { - // search for documents with invalid filter - await vs.similaritySearch('This is an embedding test.', 1, { - a: 'some filter' - }) - - const events = agent.customEventAggregator.events.toArray() - const langchainEvents = filterLangchainEvents(events) - - const vectorSearchResultEvents = filterLangchainEventsByType( - langchainEvents, - 'LlmVectorSearchResult' - ) - - const vectorSearchEvents = filterLangchainEventsByType(langchainEvents, 'LlmVectorSearch') - - // there are no documents in vector store with that filter - assert.equal(vectorSearchResultEvents.length, 0, 'should have 0 events') - assertLangChainVectorSearch({ - tx, - vectorSearch: vectorSearchEvents[0], - responseDocumentSize: 0 - }) - - tx.end() - end() - }) -}) - -test('should not create vectorstore events when not in a transaction', async (t) => { - const { agent, vs } = t.nr - - await vs.similaritySearch('This is an embedding test.', 1) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create vectorstore events') -}) - -test('should add llm attribute to transaction', (t, end) => { - const { agent, vs } = t.nr - - helper.runInTransaction(agent, async (tx) => { - await vs.similaritySearch('This is an embedding test.', 1) - - const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) - assert.equal(attributes.llm, true) - - tx.end() - end() - }) -}) - -test('should create error events', (t, end) => { - const { agent, vs } = t.nr - - helper.runInNamedTransaction(agent, async (tx) => { - try { - await vs.similaritySearch('Embedding not allowed.', 1) - } catch (error) { - assert.ok(error) - } - - const events = agent.customEventAggregator.events.toArray() - // Only LlmEmbedding and LlmVectorSearch events will be created - // LangChainVectorSearchResult event won't be created since there was an error - assert.equal(events.length, 2, 'should create 2 events') - - const langchainEvents = events.filter((event) => { - const [, chainEvent] = event - return chainEvent.vendor === 'langchain' - }) - - assert.equal(langchainEvents.length, 1, 'should create 1 langchain vectorsearch event') - assert.equal(langchainEvents[0][1].error, true) - - // But, we should also get two error events: 1xLLM and 1xLangChain - const exceptions = tx.exceptions - for (const e of exceptions) { - const str = Object.prototype.toString.call(e.customAttributes) - assert.equal(str, '[object LlmErrorMessage]') - } - - tx.end() - end() - }) -}) - -test('should not create llm vectorstore events when ai_monitoring is disabled', (t, end) => { - const { agent, vs } = t.nr - agent.config.ai_monitoring.enabled = false - - helper.runInTransaction(agent, async (tx) => { - await vs.similaritySearch('This is an embedding test.', 1) - - const events = agent.customEventAggregator.events.toArray() - assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled') - - tx.end() - end() - }) -}) - -test('should not create segment when ai_monitoring is disabled', (t, end) => { - const { agent, vs } = t.nr - agent.config.ai_monitoring.enabled = false - - helper.runInTransaction(agent, async (tx) => { - await vs.similaritySearch('This is an embedding test.', 1) - - const segment = findSegment(tx.trace, tx.trace.root, 'Llm/vectorstore/Langchain/similaritySearch') - assert.equal(segment, undefined, 'should not create Llm/vectorstore/Langchain/similaritySearch segment when ai_monitoring is disabled') - - tx.end() - end() - }) -}) diff --git a/test/versioned/langchain/package.json b/test/versioned/langchain/package.json index 8f55fbcf92..baa2c87064 100644 --- a/test/versioned/langchain/package.json +++ b/test/versioned/langchain/package.json @@ -17,23 +17,11 @@ "engines": { "node": ">=20" }, - "comment": "`@langchain/core` is the only dep under test. The rest are to help seed tests and assert real-life scenarios. In the future, feel free to change semver ranges of those packages.", "dependencies": { - "@langchain/openai": ">=1.0.0", - "@langchain/core": ">=1.0.0", - "openai": "4.90.0", - "@langchain/community": ">=1.0.0", - "@elastic/elasticsearch": "8.13.1", - "@langchain/aws": ">=1.1.0" + "@langchain/core": ">=1.0.0" }, "files": [ - "tools.test.js", - "openai/runnables.test.js", - "openai/runnables-streaming.test.js", - "openai/vectorstore.test.js", - "bedrock/runnables.test.js", - "bedrock/runnables-streaming.test.js", - "bedrock/vectorstore.test.js" + "tools.test.js" ] } ] diff --git a/test/versioned/langchain/runnables-streaming.js b/test/versioned/langchain/runnables-streaming.js new file mode 100644 index 0000000000..b8d4ded6bf --- /dev/null +++ b/test/versioned/langchain/runnables-streaming.js @@ -0,0 +1,771 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const assert = require('node:assert') + +const { assertPackageMetrics, assertSegments, assertSpanKind } = require('../../lib/custom-assertions') +const { findSegment } = require('../../lib/metrics_helper') +const { + assertLangChainChatCompletionMessages, + assertLangChainChatCompletionSummary, + filterLangchainEvents, + filterLangchainEventsByType +} = require('./common') +const { DESTINATIONS } = require('../../../lib/config/attribute-filter') +const helper = require('../../lib/agent_helper') + +function consumeStreamChunk() { + // A no-op function used to consume chunks of a stream. +} + +/** + * Runs the common runnables-streaming tests for streaming enabled + * @param {object} config Configuration for the test suite + * @param {object} config.inputData The input data to pass to stream calls + * @param {string} [config.expectedInput] Expected input string for assertions + * @param {Function} [config.expectedContent] Function to get expected content (optional) + * @param {object} [config.chunkContentAccess] How to access content from chunks without parser (e.g., 'chunk?.content') + * @param {object} [config.errorFromInputAssertion] Custom assertion for error from input test + * @param {object} [config.errorFromStreamAssertion] Custom assertion for error from stream test + * @param {number} [config.errorFromStreamEventCount] Expected event count for error from stream test + * @param {number} [config.errorFromStreamLangchainEventCount] Expected langchain event count for error from stream test + * @param {Array} [config.errorPromptTemplate] The prompt template to trigger errors + */ +function runStreamingEnabledTests(config) { + const { + inputData, + expectedInput, + expectedContent, + chunkContentAccess = (chunk) => chunk?.content, + errorFromInputAssertion, + errorFromStreamAssertion, + errorFromStreamEventCount = 6, + errorFromStreamLangchainEventCount = 3, + errorPromptTemplate + } = config + + return async (t) => { + await t.test('should log tracking metrics', function(t) { + const { agent, langchainCoreVersion } = t.nr + assertPackageMetrics({ agent, pkg: '@langchain/core', version: langchainCoreVersion }) + }) + + await t.test('should create langchain events for every stream call', (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + let content = '' + for await (const chunk of stream) { + content += chunk + } + + if (expectedContent) { + const expected = expectedContent() + assert.equal(content, expected) + } + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 6, 'should create 6 events') + + const langchainEvents = events.filter((event) => { + const [, chainEvent] = event + return chainEvent.vendor === 'langchain' + }) + + assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') + + tx.end() + end() + }) + }) + + await t.test( + 'should increment tracking metric for each langchain chat prompt event', + (t, end) => { + const { agent, prompt, outputParser, model, langchainCoreVersion } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + const metrics = agent.metrics.getOrCreateMetric( + `Supportability/Nodejs/ML/Langchain/${langchainCoreVersion}` + ) + assert.equal(metrics.callCount > 0, true) + + tx.end() + end() + }) + } + ) + + await t.test( + 'should create langchain events for every stream call on chat prompt + model + parser', + (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input, options) + let content = '' + for await (const chunk of stream) { + content += chunk + } + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + + assertLangChainChatCompletionSummary({ + tx, + chatSummary: langChainSummaryEvents[0] + }) + + const messageAssertions = { + tx, + chatMsgs: langChainMessageEvents, + chatSummary: langChainSummaryEvents[0][1], + output: content + } + + if (expectedInput) { + messageAssertions.input = expectedInput + } + + assertLangChainChatCompletionMessages(messageAssertions) + + tx.end() + end() + }) + } + ) + + await t.test( + 'should create langchain events for every stream call on chat prompt + model', + (t, end) => { + const { agent, prompt, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model) + const stream = await chain.stream(input, options) + let content = '' + for await (const chunk of stream) { + content += chunkContentAccess(chunk) + } + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + + assertLangChainChatCompletionSummary({ + tx, + chatSummary: langChainSummaryEvents[0] + }) + + const messageAssertions = { + tx, + chatMsgs: langChainMessageEvents, + chatSummary: langChainSummaryEvents[0][1], + output: content + } + + if (expectedInput) { + messageAssertions.input = expectedInput + } + + assertLangChainChatCompletionMessages(messageAssertions) + + tx.end() + end() + }) + } + ) + + await t.test( + 'should create langchain events for every stream call with parser that returns an array as output', + (t, end) => { + const { agent, prompt, model, CommaSeparatedListOutputParser } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const parser = new CommaSeparatedListOutputParser() + + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(parser) + const stream = await chain.stream(input, options) + let content = '' + for await (const chunk of stream) { + content += chunk + } + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + + assertLangChainChatCompletionSummary({ + tx, + chatSummary: langChainSummaryEvents[0] + }) + + const messageAssertions = { + tx, + chatMsgs: langChainMessageEvents, + chatSummary: langChainSummaryEvents[0][1], + output: content + } + + if (expectedInput) { + messageAssertions.input = expectedInput + } + + assertLangChainChatCompletionMessages(messageAssertions) + + tx.end() + end() + }) + } + ) + + await t.test('should add runId when a callback handler exists', (t, end) => { + const { BaseCallbackHandler } = t.nr + let runId + const cbHandler = BaseCallbackHandler.fromMethods({ + handleChainStart(...args) { + runId = args?.[2] + } + }) + + const { agent, prompt, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { + metadata: { key: 'value', hello: 'world' }, + callbacks: [cbHandler], + tags: ['tag1', 'tag2'] + } + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input, options) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + assert.equal(langchainEvents[0][1].request_id, runId) + + tx.end() + end() + }) + }) + + await t.test( + 'should create langchain events for every stream call on chat prompt + model + parser with callback', + (t, end) => { + const { BaseCallbackHandler } = t.nr + const cbHandler = BaseCallbackHandler.fromMethods({ + handleChainStart() {} + }) + + const { agent, prompt, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { + metadata: { key: 'value', hello: 'world' }, + callbacks: [cbHandler], + tags: ['tag1', 'tag2'] + } + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input, options) + + let content = '' + for await (const chunk of stream) { + content += chunk + } + + if (expectedContent) { + const expected = expectedContent() + assert.equal(content, expected) + } + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + assertLangChainChatCompletionSummary({ + tx, + chatSummary: langChainSummaryEvents[0], + withCallback: cbHandler + }) + + const messageAssertions = { + tx, + chatMsgs: langChainMessageEvents, + chatSummary: langChainSummaryEvents[0][1], + withCallback: cbHandler, + output: content + } + + if (expectedInput) { + messageAssertions.input = expectedInput + } + + assertLangChainChatCompletionMessages(messageAssertions) + + tx.end() + end() + }) + } + ) + + await t.test('should not create langchain events when not in a transaction', async (t) => { + const { agent, prompt, outputParser, model } = t.nr + + const input = inputData + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 0, 'should not create langchain events') + }) + + await t.test('should add llm attribute to transaction', (t, end) => { + const { agent, prompt, model } = t.nr + + const input = inputData + + helper.runInTransaction(agent, async (tx) => { + const chain = prompt.pipe(model) + const stream = await chain.stream(input) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) + assert.equal(attributes.llm, true) + + tx.end() + end() + }) + }) + + await t.test('should create span on successful runnables create', (t, end) => { + const { agent, prompt, model } = t.nr + + const input = inputData + + helper.runInTransaction(agent, async (tx) => { + const chain = prompt.pipe(model) + const stream = await chain.stream(input) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + assertSegments(tx.trace, tx.trace.root, ['Llm/chain/Langchain/stream'], { exact: false }) + tx.end() + assertSpanKind({ agent, segments: [{ name: 'Llm/chain/Langchain/stream', kind: 'internal' }] }) + end() + }) + }) + + await t.test( + 'should use empty string for content property on completion message event when invalid input is used - circular reference', + (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = { ...inputData } + input.myself = input + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + + const msgEventEmptyContent = langChainMessageEvents.filter( + (event) => event[1].content === '' + ) + + assert.equal( + msgEventEmptyContent.length, + 1, + 'should have 1 event with empty content property' + ) + + tx.end() + end() + }) + } + ) + + await t.test('should create error events from input', (t, end) => { + const { ChatPromptTemplate } = t.nr + const prompt = ChatPromptTemplate.fromMessages([ + ['assistant', 'tell me short joke about {topic}'] + ]) + const { agent, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const chain = prompt.pipe(model).pipe(outputParser) + + try { + await chain.stream('') + } catch (error) { + assert.ok(error) + } + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 2, 'should create 2 events') + + const summary = events.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] + assert.equal(summary.error, true) + + const exceptions = tx.exceptions + if (errorFromInputAssertion) { + errorFromInputAssertion(exceptions) + } else { + for (const e of exceptions) { + assert.ok(e.customAttributes?.['error.message'], 'error.message should be set') + } + } + + tx.end() + end() + }) + }) + + await t.test('should create error events when stream fails', (t, end) => { + const { ChatPromptTemplate } = t.nr + const prompt = ChatPromptTemplate.fromMessages([[errorPromptTemplate[0], errorPromptTemplate[1]]]) + const { agent, model, outputParser } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const chain = prompt.pipe(model).pipe(outputParser) + + try { + const stream = await chain.stream({ topic: 'bad' }) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + } catch (error) { + assert.ok(error) + } + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, errorFromStreamEventCount, `should create ${errorFromStreamEventCount} events`) + + const langchainEvents = events.filter((event) => { + const [, chainEvent] = event + return chainEvent.vendor === 'langchain' + }) + assert.equal(langchainEvents.length, errorFromStreamLangchainEventCount, `should create ${errorFromStreamLangchainEventCount} langchain events`) + const summary = langchainEvents.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] + assert.equal(summary.error, true) + + const exceptions = tx.exceptions + if (errorFromStreamAssertion) { + errorFromStreamAssertion(exceptions) + } + + tx.end() + end() + }) + }) + + await t.test( + 'should handle metadata properly during stream processing', + (t, end) => { + const { agent, prompt, model, outputParser } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { + metadata: { streamKey: 'streamValue', anotherKey: 'anotherValue' }, + tags: ['stream-tag1', 'stream-tag2'] + } + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input, options) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + const events = agent.customEventAggregator.events.toArray() + const langchainEvents = filterLangchainEvents(events) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + + const [[, summary]] = langChainSummaryEvents + assert.equal(summary['metadata.streamKey'], 'streamValue') + assert.equal(summary['metadata.anotherKey'], 'anotherValue') + + const tags = summary.tags.split(',') + assert.ok(tags.includes('stream-tag1')) + assert.ok(tags.includes('stream-tag2')) + + tx.end() + end() + }) + } + ) + + await t.test( + 'should properly extend segment duration on each stream iteration', + (t, end) => { + const { agent, prompt, model, outputParser } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + + const [segment] = tx.trace.getChildren(tx.trace.root.id) + assert.equal(segment.name, 'Llm/chain/Langchain/stream', 'should find the Langchain stream segment') + + let chunkCount = 0 + for await (const chunk of stream) { + consumeStreamChunk(chunk) + chunkCount++ + } + + // Segment should have been touched multiple times during streaming + assert.ok(chunkCount > 1, 'should have received multiple chunks') + assert.ok(segment.timer.hrDuration) + + tx.end() + end() + }) + } + ) + } +} + +/** + * Runs the streaming disabled test + * @param {object} config Configuration for the test suite + * @param {object} config.inputData The input data to pass to stream calls + * @param {Function} [config.expectedContent] Function to get expected content + * @param {string} [config.streamingDisabledMessage] Custom message for streaming disabled metric + */ +function runStreamingDisabledTest(config) { + const { + inputData, + expectedContent, + streamingDisabledMessage = 'should increment streaming disabled' + } = config + + return async (t) => { + await t.test( + 'should not create llm events when `ai_monitoring.streaming.enabled` is false', + (t, end) => { + const { agent, prompt, outputParser, model, langchainCoreVersion } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + let content = '' + for await (const chunk of stream) { + content += chunk + } + + if (expectedContent) { + const expected = expectedContent() + assert.equal(content, expected) + } + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 0, 'should not create llm events when streaming is disabled') + const metrics = agent.metrics.getOrCreateMetric( + `Supportability/Nodejs/ML/Langchain/${langchainCoreVersion}` + ) + assert.equal(metrics.callCount > 0, true) + const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) + assert.equal(attributes.llm, true) + const streamingDisabled = agent.metrics.getOrCreateMetric( + 'Supportability/Nodejs/ML/Streaming/Disabled' + ) + assert.equal( + streamingDisabled.callCount, + 2, + streamingDisabledMessage + ) + + tx.end() + end() + }) + } + ) + + await t.test( + 'should not create segment when `ai_monitoring.streaming.enabled` is false', + (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + agent.config.ai_monitoring.enabled = false + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + const segment = findSegment(tx.trace, tx.trace.root, 'Llm/chain/Langchain/stream') + assert.equal(segment, undefined, 'should not create Llm/chain/Langchain/stream segment when ai_monitoring is disabled') + + tx.end() + end() + }) + } + ) + } +} + +/** + * Runs the ai_monitoring disabled tests + * @param {object} config Configuration for the test suite + * @param {object} config.inputData The input data to pass to stream calls + * @param {Function} [config.expectedContent] Function to get expected content + */ +function runAiMonitoringDisabledTests(config) { + const { inputData, expectedContent } = config + + function consumeStreamChunk() { + // A no-op function used to consume chunks of a stream. + } + + return async (t) => { + await t.test( + 'should not create llm events when `ai_monitoring.enabled` is false', + (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + agent.config.ai_monitoring.enabled = false + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + let content = '' + for await (const chunk of stream) { + content += chunk + } + + if (expectedContent) { + const expected = expectedContent() + assert.equal(content, expected) + } + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled') + + tx.end() + end() + }) + } + ) + + await t.test( + 'should not create segment when `ai_monitoring.enabled` is false', + (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + agent.config.ai_monitoring.enabled = false + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + + const chain = prompt.pipe(model).pipe(outputParser) + const stream = await chain.stream(input) + for await (const chunk of stream) { + consumeStreamChunk(chunk) + } + + const segment = findSegment(tx.trace, tx.trace.root, 'Llm/chain/Langchain/stream') + assert.equal(segment, undefined, 'should not create Llm/chain/Langchain/stream segment when ai_monitoring is disabled') + + tx.end() + end() + }) + } + ) + } +} + +module.exports = { + runStreamingEnabledTests, + runStreamingDisabledTest, + runAiMonitoringDisabledTests +} diff --git a/test/versioned/langchain/runnables.js b/test/versioned/langchain/runnables.js new file mode 100644 index 0000000000..d812c7e5ab --- /dev/null +++ b/test/versioned/langchain/runnables.js @@ -0,0 +1,539 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const test = require('node:test') +const assert = require('node:assert') + +const { assertPackageMetrics, assertSegments, assertSpanKind } = require('../../lib/custom-assertions') +const { findSegment } = require('../../lib/metrics_helper') +const { + assertLangChainChatCompletionMessages, + assertLangChainChatCompletionSummary, + filterLangchainEvents, + filterLangchainEventsByType +} = require('./common') +const { DESTINATIONS } = require('../../../lib/config/attribute-filter') +const helper = require('../../lib/agent_helper') + +/** + * Runs the common runnables test suite + * @param {object} config Configuration for the test suite + * @param {object} config.inputData The input data to pass to invoke calls + * @param {string} [config.expectedInput] Expected input string for assertions + * @param {string} [config.expectedOutput] Expected output string for assertions + * @param {string} [config.errorPromptTemplate] The prompt template to trigger errors + * @param {number} [config.errorEventCount] Expected event count during errors + * @param {object} [config.errorAssertion] Custom error assertion function + * @param {object} [config.arrayParserOutput] Expected output for array parser test + */ +function runRunnablesTests(config) { + const { + inputData, + expectedInput, + expectedOutput, + errorPromptTemplate, + errorEventCount = 6, + errorAssertion, + arrayParserOutput + } = config + + test('should log tracking metrics', function(t) { + const { agent, langchainCoreVersion } = t.nr + assertPackageMetrics({ agent, pkg: '@langchain/core', version: langchainCoreVersion }) + }) + + test('should create langchain events for every invoke call', (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(outputParser) + const result = await chain.invoke(input, options) + assert.ok(result) + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 6, 'should create 6 events') + + const langchainEvents = events.filter((event) => { + const [, chainEvent] = event + return chainEvent.vendor === 'langchain' + }) + + assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') + + tx.end() + end() + }) + }) + + test('should increment tracking metric for each langchain chat prompt event', (t, end) => { + const { agent, prompt, outputParser, model, langchainCoreVersion } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(outputParser) + await chain.invoke(input, options) + + const metrics = agent.metrics.getOrCreateMetric( + `Supportability/Nodejs/ML/Langchain/${langchainCoreVersion}` + ) + assert.equal(metrics.callCount > 0, true) + + tx.end() + end() + }) + }) + + test('should support custom attributes on the LLM events', (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + const api = helper.getAgentApi() + helper.runInTransaction(agent, async (tx) => { + api.withLlmCustomAttributes({ 'llm.contextAttribute': 'someValue' }, async () => { + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(outputParser) + await chain.invoke(input, options) + const events = agent.customEventAggregator.events.toArray() + + const [[, message]] = events + assert.equal(message['llm.contextAttribute'], 'someValue') + + tx.end() + end() + }) + }) + }) + + test('should create langchain events for every invoke call on chat prompt + model + parser', (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(outputParser) + await chain.invoke(input, options) + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + + assertLangChainChatCompletionSummary({ + tx, + chatSummary: langChainSummaryEvents[0] + }) + + const messageAssertions = { + tx, + chatMsgs: langChainMessageEvents, + chatSummary: langChainSummaryEvents[0][1] + } + + if (expectedInput) { + messageAssertions.input = expectedInput + } + + if (expectedOutput) { + messageAssertions.output = expectedOutput + } + + assertLangChainChatCompletionMessages(messageAssertions) + + tx.end() + end() + }) + }) + + test('should create langchain events for every invoke call on chat prompt + model', (t, end) => { + const { agent, prompt, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model) + await chain.invoke(input, options) + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + + assertLangChainChatCompletionSummary({ + tx, + chatSummary: langChainSummaryEvents[0] + }) + + const messageAssertions = { + tx, + chatMsgs: langChainMessageEvents, + chatSummary: langChainSummaryEvents[0][1] + } + + if (expectedInput) { + messageAssertions.input = expectedInput + } + + if (expectedOutput) { + messageAssertions.output = expectedOutput + } + + assertLangChainChatCompletionMessages(messageAssertions) + + tx.end() + end() + }) + }) + + test('should create langchain events for every invoke call with parser that returns an array as output', (t, end) => { + const { agent, prompt, model, CommaSeparatedListOutputParser } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const parser = new CommaSeparatedListOutputParser() + + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(parser) + await chain.invoke(input, options) + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + + assertLangChainChatCompletionSummary({ + tx, + chatSummary: langChainSummaryEvents[0] + }) + + const messageAssertions = { + tx, + chatMsgs: langChainMessageEvents, + chatSummary: langChainSummaryEvents[0][1] + } + + if (expectedInput) { + messageAssertions.input = expectedInput + } + + if (arrayParserOutput) { + messageAssertions.output = arrayParserOutput + } + + assertLangChainChatCompletionMessages(messageAssertions) + + tx.end() + end() + }) + }) + + test('should add runId when a callback handler exists', (t, end) => { + const { BaseCallbackHandler } = t.nr + let runId + const cbHandler = BaseCallbackHandler.fromMethods({ + handleChainStart(...args) { + runId = args?.[2] + } + }) + + const { agent, prompt, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { + metadata: { key: 'value', hello: 'world' }, + callbacks: [cbHandler], + tags: ['tag1', 'tag2'] + } + + const chain = prompt.pipe(model).pipe(outputParser) + await chain.invoke(input, options) + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + assert.equal(langchainEvents[0][1].request_id, runId) + + tx.end() + end() + }) + }) + + test('should create langchain events for every invoke call on chat prompt + model + parser with callback', (t, end) => { + const { BaseCallbackHandler } = t.nr + const cbHandler = BaseCallbackHandler.fromMethods({ + handleChainStart() {} + }) + + const { agent, prompt, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { + metadata: { key: 'value', hello: 'world' }, + callbacks: [cbHandler], + tags: ['tag1', 'tag2'] + } + + const chain = prompt.pipe(model).pipe(outputParser) + await chain.invoke(input, options) + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + assertLangChainChatCompletionSummary({ + tx, + chatSummary: langChainSummaryEvents[0], + withCallback: cbHandler + }) + + const messageAssertions = { + tx, + chatMsgs: langChainMessageEvents, + chatSummary: langChainSummaryEvents[0][1], + withCallback: cbHandler + } + + if (expectedInput) { + messageAssertions.input = expectedInput + } + + if (expectedOutput) { + messageAssertions.output = expectedOutput + } + + assertLangChainChatCompletionMessages(messageAssertions) + + tx.end() + end() + }) + }) + + test('should not create langchain events when not in a transaction', async (t) => { + const { agent, prompt, outputParser, model } = t.nr + + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(outputParser) + await chain.invoke(input, options) + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 0, 'should not create langchain events') + }) + + test('should add llm attribute to transaction', (t, end) => { + const { agent, prompt, model } = t.nr + + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + helper.runInTransaction(agent, async (tx) => { + const chain = prompt.pipe(model) + await chain.invoke(input, options) + + const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) + assert.equal(attributes.llm, true) + + tx.end() + end() + }) + }) + + test('should create span on successful runnables create', (t, end) => { + const { agent, prompt, model } = t.nr + + const input = inputData + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + helper.runInTransaction(agent, async (tx) => { + const chain = prompt.pipe(model) + const result = await chain.invoke(input, options) + + assert.ok(result) + assertSegments(tx.trace, tx.trace.root, ['Llm/chain/Langchain/invoke'], { exact: false }) + tx.end() + assertSpanKind({ agent, segments: [{ name: 'Llm/chain/Langchain/invoke', kind: 'internal' }] }) + end() + }) + }) + + test('should use empty string for content property on completion message event when invalid input is used - circular reference', (t, end) => { + const { agent, prompt, outputParser, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = { ...inputData } + input.myself = input + const options = { metadata: { key: 'value', hello: 'world' }, tags: ['tag1', 'tag2'] } + + const chain = prompt.pipe(model).pipe(outputParser) + await chain.invoke(input, options) + + const events = agent.customEventAggregator.events.toArray() + + const langchainEvents = filterLangchainEvents(events) + const langChainMessageEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionMessage' + ) + + const msgEventEmptyContent = langChainMessageEvents.filter((event) => event[1].content === '') + + assert.equal(msgEventEmptyContent.length, 1, 'should have 1 event with empty content property') + + tx.end() + end() + }) + }) + + test('should create error events', (t, end) => { + const { ChatPromptTemplate, agent, outputParser, model } = t.nr + const prompt = ChatPromptTemplate.fromMessages([[errorPromptTemplate[0], errorPromptTemplate[1]]]) + + helper.runInTransaction(agent, async (tx) => { + const chain = prompt.pipe(model).pipe(outputParser) + + try { + await chain.invoke('') + } catch (error) { + assert.ok(error) + } + + // We should still get events as in the success case: + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, errorEventCount, `should create ${errorEventCount} events`) + + const langchainEvents = events.filter((event) => { + const [, chainEvent] = event + return chainEvent.vendor === 'langchain' + }) + assert.equal(langchainEvents.length, 3, 'should create 3 langchain events') + const summary = langchainEvents.find((e) => e[0].type === 'LlmChatCompletionSummary')?.[1] + assert.equal(summary.error, true) + + // But, we should also get error events: 1xLLM and 1xLangChain + const exceptions = tx.exceptions + if (errorAssertion) { + errorAssertion(exceptions) + } else { + for (const e of exceptions) { + assert.ok(e?.customAttributes?.['error.message']) + } + } + + tx.end() + end() + }) + }) + + test('should not create llm runnable events when ai_monitoring is disabled', (t, end) => { + const { agent, prompt, model } = t.nr + agent.config.ai_monitoring.enabled = false + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const chain = prompt.pipe(model) + await chain.invoke(input) + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled') + + tx.end() + end() + }) + }) + + test('should not create segment when ai_monitoring is disabled', (t, end) => { + const { agent, prompt, model } = t.nr + agent.config.ai_monitoring.enabled = false + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const chain = prompt.pipe(model) + const result = await chain.invoke(input) + assert.ok(result, 'should not mess up result') + + const segment = findSegment(tx.trace, tx.trace.root, 'Llm/chain/Langchain/stream') + assert.equal(segment, undefined, 'should not create Llm/chain/Langchain/stream segment when ai_monitoring is disabled') + + tx.end() + end() + }) + }) + + test('should handle metadata and tags properly', (t, end) => { + const { agent, prompt, model } = t.nr + + helper.runInTransaction(agent, async (tx) => { + const input = inputData + const options = { + metadata: { customKey: 'customValue', anotherKey: 'anotherValue' }, + tags: ['custom-tag1', 'custom-tag2', 'custom-tag3'] + } + + const chain = prompt.pipe(model) + await chain.invoke(input, options) + + const events = agent.customEventAggregator.events.toArray() + const langchainEvents = filterLangchainEvents(events) + const langChainSummaryEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmChatCompletionSummary' + ) + + const [[, summary]] = langChainSummaryEvents + assert.equal(summary['metadata.customKey'], 'customValue') + assert.equal(summary['metadata.anotherKey'], 'anotherValue') + + const tags = summary.tags.split(',') + assert.ok(tags.includes('custom-tag1')) + assert.ok(tags.includes('custom-tag2')) + assert.ok(tags.includes('custom-tag3')) + + tx.end() + end() + }) + }) +} + +module.exports = { + runRunnablesTests +} diff --git a/test/versioned/langchain/tools.test.js b/test/versioned/langchain/tools.test.js index d98d03c614..da542c5367 100644 --- a/test/versioned/langchain/tools.test.js +++ b/test/versioned/langchain/tools.test.js @@ -24,7 +24,7 @@ const { DESTINATIONS } = require('../../../lib/config/attribute-filter') test.beforeEach((ctx) => { ctx.nr = {} ctx.nr.agent = helper.instrumentMockedAgent(config) - const TestTool = require('./helpers/custom-tool') + const TestTool = require('./custom-tool') const tool = new TestTool({ baseUrl }) @@ -36,7 +36,7 @@ test.afterEach((ctx) => { helper.unloadAgent(ctx.nr.agent) // bust the require-cache so it can re-instrument removeModules(['@langchain/core']) - removeMatchedModules(/helpers\/custom-tool\.js$/) + removeMatchedModules(/custom-tool\.js$/) }) test('should log tracking metrics', function(t) { diff --git a/test/versioned/langchain/vectorstore.js b/test/versioned/langchain/vectorstore.js new file mode 100644 index 0000000000..2ea01fe7cd --- /dev/null +++ b/test/versioned/langchain/vectorstore.js @@ -0,0 +1,277 @@ +/* + * Copyright 2025 New Relic Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const test = require('node:test') +const assert = require('node:assert') + +const { assertPackageMetrics, assertSegments, assertSpanKind } = require('../../lib/custom-assertions') +const { findSegment } = require('../../lib/metrics_helper') +const { + assertLangChainVectorSearch, + assertLangChainVectorSearchResult, + filterLangchainEvents, + filterLangchainEventsByType +} = require('./common') +const { DESTINATIONS } = require('../../../lib/config/attribute-filter') +const { tspl } = require('@matteo.collina/tspl') +const helper = require('../../lib/agent_helper') + +/** + * Runs the common vectorstore test suite + * @param {object} config Configuration for the test suite + * @param {string} config.searchQuery The query string to use for similarity search + * @param {string} [config.expectedQuery] The expected query in assertions (defaults to searchQuery) + * @param {string} [config.expectedPageContent] The expected page content in vector search results + * @param {object} [config.errorAssertion] Custom error assertion function + */ +function runVectorstoreTests(config) { + const { + searchQuery, + expectedQuery = searchQuery, + expectedPageContent, + errorAssertion + } = config + + test('should log tracking metrics', function(t) { + const { agent, langchainCoreVersion } = t.nr + assertPackageMetrics({ agent, pkg: '@langchain/core', version: langchainCoreVersion }) + }) + + test('should create vectorstore events for every similarity search call', (t, end) => { + const { agent, vs } = t.nr + + helper.runInNamedTransaction(agent, async (tx) => { + await vs.similaritySearch(searchQuery, 1) + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 3, 'should create 3 events') + + const langchainEvents = events.filter((event) => { + const [, chainEvent] = event + return chainEvent.vendor === 'langchain' + }) + + assert.equal(langchainEvents.length, 2, 'should create 2 langchain events') + + tx.end() + end() + }) + }) + + test('should create span on successful vectorstore create', (t, end) => { + const { agent, vs } = t.nr + helper.runInTransaction(agent, async (tx) => { + const result = await vs.similaritySearch(searchQuery, 1) + assert.ok(result) + assertSegments(tx.trace, tx.trace.root, ['Llm/vectorstore/Langchain/similaritySearch'], { + exact: false + }) + tx.end() + assertSpanKind({ agent, segments: [{ name: 'Llm/vectorstore/Langchain/similaritySearch', kind: 'internal' }] }) + end() + }) + }) + + test('should increment tracking metric for each langchain vectorstore event', async (t) => { + const plan = tspl(t, { plan: 1 }) + const { agent, vs } = t.nr + + await helper.runInTransaction(agent, async (tx) => { + await vs.similaritySearch(searchQuery, 1) + + // `@langchain/community` and provider packages have diverged on the `@langchain/core` + // version. Find the right one that has a call count + + for (const metric in agent.metrics._metrics.unscoped) { + if (metric.startsWith('Supportability/Nodejs/ML/Langchain')) { + plan.equal(agent.metrics._metrics.unscoped[metric].callCount > 0, true) + } + } + tx.end() + }) + await plan.completed + }) + + test('should create vectorstore events for every similarity search call with embeddings', (t, end) => { + const { agent, vs } = t.nr + + helper.runInNamedTransaction(agent, async (tx) => { + await vs.similaritySearch(searchQuery, 1) + + const events = agent.customEventAggregator.events.toArray() + const langchainEvents = filterLangchainEvents(events) + + const vectorSearchResultEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmVectorSearchResult' + ) + + const vectorSearchEvents = filterLangchainEventsByType(langchainEvents, 'LlmVectorSearch') + + const vectorSearchAssertions = { + tx, + vectorSearch: vectorSearchEvents[0], + responseDocumentSize: 1 + } + + if (expectedQuery) { + vectorSearchAssertions.expectedQuery = expectedQuery + } + + assertLangChainVectorSearch(vectorSearchAssertions) + + const vectorSearchResultAssertions = { + tx, + vectorSearchResult: vectorSearchResultEvents, + vectorSearchId: vectorSearchEvents[0][1].id + } + + if (expectedPageContent) { + vectorSearchResultAssertions.expectedPageContent = expectedPageContent + } + + assertLangChainVectorSearchResult(vectorSearchResultAssertions) + + tx.end() + end() + }) + }) + + test('should create only vectorstore search event for similarity search call with embeddings and invalid metadata filter', (t, end) => { + const { agent, vs } = t.nr + + helper.runInNamedTransaction(agent, async (tx) => { + // search for documents with invalid filter + await vs.similaritySearch(searchQuery, 1, { + a: 'some filter' + }) + + const events = agent.customEventAggregator.events.toArray() + const langchainEvents = filterLangchainEvents(events) + + const vectorSearchResultEvents = filterLangchainEventsByType( + langchainEvents, + 'LlmVectorSearchResult' + ) + + const vectorSearchEvents = filterLangchainEventsByType(langchainEvents, 'LlmVectorSearch') + + // there are no documents in vector store with that filter + assert.equal(vectorSearchResultEvents.length, 0, 'should have 0 events') + + const vectorSearchAssertions = { + tx, + vectorSearch: vectorSearchEvents[0], + responseDocumentSize: 0 + } + + if (expectedQuery) { + vectorSearchAssertions.expectedQuery = expectedQuery + } + + assertLangChainVectorSearch(vectorSearchAssertions) + + tx.end() + end() + }) + }) + + test('should not create vectorstore events when not in a transaction', async (t) => { + const { agent, vs } = t.nr + + await vs.similaritySearch(searchQuery, 1) + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 0, 'should not create vectorstore events') + }) + + test('should add llm attribute to transaction', (t, end) => { + const { agent, vs } = t.nr + + helper.runInTransaction(agent, async (tx) => { + await vs.similaritySearch(searchQuery, 1) + + const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT) + assert.equal(attributes.llm, true) + + tx.end() + end() + }) + }) + + test('should create error events', (t, end) => { + const { agent, vs } = t.nr + + helper.runInNamedTransaction(agent, async (tx) => { + try { + await vs.similaritySearch('Embedding not allowed.', 1) + } catch (error) { + assert.ok(error) + } + + const events = agent.customEventAggregator.events.toArray() + // Only LlmEmbedding and LlmVectorSearch events will be created + // LangChainVectorSearchResult event won't be created since there was an error + assert.equal(events.length, 2, 'should create 2 events') + + const langchainEvents = events.filter((event) => { + const [, chainEvent] = event + return chainEvent.vendor === 'langchain' + }) + + assert.equal(langchainEvents.length, 1, 'should create 1 langchain vectorsearch event') + assert.equal(langchainEvents[0][1].error, true) + + // But, we should also get two error events: 1xLLM and 1xLangChain + const exceptions = tx.exceptions + if (errorAssertion) { + errorAssertion(exceptions) + } else { + for (const e of exceptions) { + assert.ok(e?.customAttributes?.['error.message']) + } + } + + tx.end() + end() + }) + }) + + test('should not create llm vectorstore events when ai_monitoring is disabled', (t, end) => { + const { agent, vs } = t.nr + agent.config.ai_monitoring.enabled = false + + helper.runInTransaction(agent, async (tx) => { + await vs.similaritySearch(searchQuery, 1) + + const events = agent.customEventAggregator.events.toArray() + assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled') + + tx.end() + end() + }) + }) + + test('should not create segment when ai_monitoring is disabled', (t, end) => { + const { agent, vs } = t.nr + agent.config.ai_monitoring.enabled = false + + helper.runInTransaction(agent, async (tx) => { + await vs.similaritySearch(searchQuery, 1) + + const segment = findSegment(tx.trace, tx.trace.root, 'Llm/vectorstore/Langchain/similaritySearch') + assert.equal(segment, undefined, 'should not create Llm/vectorstore/Langchain/similaritySearch segment when ai_monitoring is disabled') + + tx.end() + end() + }) + }) +} + +module.exports = { + runVectorstoreTests +}