-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathindex.ts
94 lines (80 loc) · 3.12 KB
/
index.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import {
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
} from "@langchain/core/messages";
import { ChatAnthropic } from "@langchain/anthropic";
import { StateGraph } from "@langchain/langgraph";
import { MemorySaver, Annotation } from "@langchain/langgraph";
import { ToolNode } from "@langchain/langgraph/prebuilt";
import wxflows from "@wxflows/sdk/langchain";
import "dotenv/config";
(async () => {
// Define the graph state
// See here for more info: https://langchain-ai.github.io/langgraphjs/how-tos/define-state/
const StateAnnotation = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: (x, y) => x.concat(y),
}),
});
const toolClient = new wxflows({
endpoint: process.env.WXFLOWS_ENDPOINT,
apikey: process.env.WXFLOWS_APIKEY,
});
const tools = await toolClient.lcTools;
const toolNode = new ToolNode(tools);
const model = new ChatAnthropic({
model: "claude-3-5-sonnet-20240620",
temperature: 0,
}).bindTools(tools);
// Define the function that determines whether to continue or not
// We can extract the state typing via `StateAnnotation.State`
function shouldContinue(state: typeof StateAnnotation.State) {
const messages = state.messages;
const lastMessage = messages[messages.length - 1] as AIMessage;
// If the LLM makes a tool call, then we route to the "tools" node
if (lastMessage.tool_calls?.length) {
return "tools";
}
// Otherwise, we stop (reply to the user)
return "__end__";
}
// Define the function that calls the model
async function callModel(state: typeof StateAnnotation.State) {
const messages = state.messages;
const response = await model.invoke(messages);
// We return a list, because this will get added to the existing list
return { messages: [response] };
}
// Define a new graph
const workflow = new StateGraph(StateAnnotation)
.addNode("agent", callModel)
.addNode("tools", toolNode)
.addEdge("__start__", "agent")
.addConditionalEdges("agent", shouldContinue)
.addEdge("tools", "agent");
// Initialize memory to persist state between graph runs
const checkpointer = new MemorySaver();
// Finally, we compile it!
// This compiles it into a LangChain Runnable.
// Note that we're (optionally) passing the memory when compiling the graph
const app = workflow.compile({ checkpointer });
// Use the Runnable
const finalState = await app.invoke(
{
messages: [
new SystemMessage(
"You are a helpful assistant that will only use the tools available and doesn't answer the question based on pre-trained data. Only perform a single tool call to retrieve all the information you need."
),
new HumanMessage(
"Search information about the book escape from james patterson"
),
],
},
{ configurable: { thread_id: "42" } }
);
console.log(finalState.messages);
console.log(finalState.messages[finalState.messages.length - 1].content);
// You can use the `thread_id` to ask follow up questions, the conversation context is retained via the saved state (i.e. stored list of messages):
})();