Skip to content

Commit a0a7055

Browse files
committed
fix: langchainjs works with langfuse at 2510
1 parent ac1553f commit a0a7055

File tree

3 files changed

+100
-58
lines changed

3 files changed

+100
-58
lines changed
Lines changed: 19 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,18 @@
1-
// import '@dotenvx/dotenvx/config';
1+
import '@dotenvx/dotenvx/config';
22
import './langfuse-instrumentation';
33

44
import { HumanMessage } from '@langchain/core/messages';
55
import { ChatOpenAI } from '@langchain/openai';
6+
import { CallbackHandler } from '@langfuse/langchain';
67

7-
import { startActiveObservation, startObservation } from '@langfuse/tracing';
8+
import { langfuseSpanProcessor } from './langfuse-instrumentation';
89

9-
// import { NodeSDK } from "@opentelemetry/sdk-node";
10-
// import { LangfuseSpanProcessor } from "@langfuse/otel";
10+
// Initialize the Langfuse callback handler with tracing configuration
11+
const langfuseHandler = new CallbackHandler({
12+
sessionId: 'user-session-123', // Track user session
13+
userId: 'user-abc', // Track user identity
14+
tags: ['langchain-test'], // Add searchable tags
15+
});
1116

1217
const model = new ChatOpenAI({
1318
model: 'qwen/qwen3-vl-4b',
@@ -18,60 +23,17 @@ const model = new ChatOpenAI({
1823
// temperature: 0.5,
1924
});
2025

21-
// import { CallbackHandler } from "@langfuse/langchain";
22-
// const langfuseHandler = new CallbackHandler();
23-
2426
console.log(';; langfuse ', process.env['LANGFUSE_PUBLIC_KEY']);
2527

26-
// const sdk = new NodeSDK({
27-
// spanProcessors: [new LangfuseSpanProcessor()],
28-
// });
29-
30-
// sdk.start();
31-
32-
// await startActiveObservation("user-request", async (span) => {
33-
// span.update({
34-
// input: { query: "What is the capital of France?" },
35-
// });
36-
37-
// // This generation will automatically be a child of "user-request"
38-
// const generation = startObservation(
39-
// "llm-call",
40-
// {
41-
// model: "gpt-4",
42-
// input: [{ role: "user", content: "What is the capital of France?" }],
43-
// },
44-
// { asType: "generation" },
45-
// );
46-
47-
// // ... LLM call logic ...
48-
// // refers to https://github.com/calcajack3/auto-tool-eval/blob/main/src/utils/utils.ts
28+
const messages = [
29+
new HumanMessage('give an brief intro to codemirror in less than 80 words '),
30+
];
4931

50-
// const messages = [
51-
// new HumanMessage('give an brief intro to codemirror in less than 80 words '),
52-
// ];
53-
54-
// const res = await model.invoke(messages, {
55-
// // callbacks: [langfuseHandler]
56-
// });
57-
// console.log('res');
58-
59-
// generation
60-
// .update({
61-
// output: { content: "The capital of France is Paris." },
62-
// })
63-
// .end();
64-
65-
// span.update({ output: "Successfully answered." });
66-
// });
67-
68-
async function main() {
69-
await startActiveObservation('my-first-trace', async (span) => {
70-
span.update({
71-
input: 'Hello, Langfuse!',
72-
output: 'This is my first trace!',
73-
});
74-
});
75-
}
32+
const res = await model.invoke(messages, {
33+
callbacks: [langfuseHandler],
34+
// Name for the trace (if no active span)
35+
runName: 'joke-generator',
36+
});
37+
console.log(';; res ', res.content);
7638

77-
main();
39+
await langfuseSpanProcessor.forceFlush();
Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,18 @@
11
import { NodeSDK } from '@opentelemetry/sdk-node';
22
import { LangfuseSpanProcessor } from '@langfuse/otel';
33

4+
// Export the processor to be able to flush it later
5+
// This is important for ensuring all spans are sent to Langfuse
6+
export const langfuseSpanProcessor = new LangfuseSpanProcessor({
7+
publicKey: process.env.LANGFUSE_PUBLIC_KEY!,
8+
secretKey: process.env.LANGFUSE_SECRET_KEY!,
9+
baseUrl: process.env.LANGFUSE_HOST ?? 'https://us.cloud.langfuse.com', // Default to cloud if not specified
10+
environment: process.env.NODE_ENV ?? 'development', // Default to development if not specified
11+
});
12+
13+
// Initialize the OpenTelemetry SDK with our Langfuse processor
414
const sdk = new NodeSDK({
5-
spanProcessors: [new LangfuseSpanProcessor()],
15+
spanProcessors: [langfuseSpanProcessor],
616
});
717

818
sdk.start();
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
import './langfuse-instrumentation';
2+
3+
// Import necessary functions from the tracing package
4+
import {
5+
startActiveObservation,
6+
startObservation,
7+
updateActiveTrace,
8+
updateActiveObservation,
9+
} from '@langfuse/tracing';
10+
import { langfuseSpanProcessor } from './langfuse-instrumentation';
11+
12+
// Start a new span with automatic context management
13+
await startActiveObservation('context-manager', async (span) => {
14+
// Log the initial user query
15+
span.update({
16+
input: { query: 'What is the capital of France?' },
17+
});
18+
19+
// Create a new generation span that will automatically be a child of "context-manager"
20+
const generation = startObservation(
21+
'llm-call',
22+
{
23+
model: 'gpt-4',
24+
input: [{ role: 'user', content: 'What is the capital of France?' }],
25+
},
26+
{ asType: 'generation' },
27+
);
28+
29+
// ... LLM call logic would go here ...
30+
31+
// Update the generation with token usage statistics
32+
generation.update({
33+
usageDetails: {
34+
input: 10, // Number of input tokens
35+
output: 5, // Number of output tokens
36+
cache_read_input_tokens: 2, // Tokens read from cache
37+
some_other_token_count: 10, // Custom token metric
38+
total: 17, // Optional: automatically calculated if not provided
39+
},
40+
});
41+
42+
// End the generation with the LLM response
43+
generation
44+
.update({
45+
output: { content: 'The capital of France is Paris.' },
46+
})
47+
.end();
48+
49+
// Example user information
50+
const user = { id: 'user-5678', name: 'Jane Doe', sessionId: '123' };
51+
52+
// Add an optional log level of type warning to the active span
53+
updateActiveObservation({
54+
level: 'WARNING',
55+
statusMessage: 'This is a warning',
56+
});
57+
58+
// Update the trace with user context
59+
updateActiveTrace({
60+
userId: user.id,
61+
sessionId: user.sessionId,
62+
metadata: { userName: user.name },
63+
});
64+
65+
// Mark the span as complete with final output
66+
span.update({ output: 'Successfully answered.' });
67+
});
68+
69+
// Ensure all spans are sent to Langfuse
70+
await langfuseSpanProcessor.forceFlush();

0 commit comments

Comments
 (0)