Skip to content

Commit 02d9435

Browse files
committed
feat: langchain + local LM Studio
1 parent 0516ff5 commit 02d9435

14 files changed

+185
-22
lines changed

.env.example

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
GOOGLE_API_KEY="AIzaSxxxxx"
2+
GROQ_API_KEY="gsk_U9xxxxxx"
3+
TAVILY_API_KEY="tvly-dev-Wb8Naxxxxxx"
4+
5+
LANGSMITH_TRACING="true"
6+
LANGSMITH_ENDPOINT="https://api.smith.langchain.com"
7+
LANGSMITH_API_KEY="lsv2_pt_c3fxxxxxx"
8+
LANGSMITH_PROJECT="pr-standard-yesxxxxxx"

README.md

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,35 @@
44
55
## features
66

7-
- llm with groq api
8-
- tool call: tavily api
9-
- structured output
7+
- examples(mostly with langgraph)
8+
- llm with groq api
9+
- ✨ llm with local llm, tested with LM Studio
10+
- tool call: tavily api
11+
- structured output
12+
13+
- RAG
14+
- simple
15+
- generate_query_or_respond
16+
- memory: chat history
17+
- docs grading
1018

1119
## quickstart
1220

1321
```shell
1422
npm i
1523

24+
# Option 1: use local llm, configure the `baseURL` in code then run
25+
npx tsx ./langchain/chain-groq1-chat-local-mini.ts
26+
27+
# Option 2: use groq api, configure the `groq_api_key` first
28+
cp .env.example .env
1629
npx tsx ./server/chain-groq1-starter.ts
1730
```
1831

32+
# roadmap
33+
- [ ] `graph.stream` not work with local llm
34+
# notes
35+
- examples in python: https://github.com/uptonking/langchain-langgraph-play
36+
37+
# license
38+
MIT
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import { HumanMessage } from '@langchain/core/messages';
2+
import { ChatOpenAI } from '@langchain/openai';
3+
4+
// refers to https://github.com/calcajack3/auto-tool-eval/blob/main/src/utils/utils.ts
5+
const model = new ChatOpenAI({
6+
model: 'qwen/qwen3-4b-2507',
7+
configuration: {
8+
baseURL: 'http://localhost:1234/v1',
9+
apiKey: 'not-needed',
10+
},
11+
temperature: 0.5,
12+
});
13+
14+
const messages = [
15+
new HumanMessage('give an brief intro to langchain in less than 80 words '),
16+
];
17+
18+
const res = await model.invoke(messages);
19+
console.log(res);
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import '@dotenvx/dotenvx/config';
2+
3+
import { HumanMessage } from '@langchain/core/messages';
4+
5+
import { initChatModel } from 'langchain/chat_models/universal';
6+
7+
// ❌ not working
8+
const model = await initChatModel('qwen/qwen3-4b-2507', {
9+
modelProvider: 'openai',
10+
baseUrl: 'http://localhost:1234/v1',
11+
apiKey: 'not-needed',
12+
temperature: 0,
13+
});
14+
15+
const messages = [
16+
// new SystemMessage('Translate the following from English into Chinese'),
17+
// new HumanMessage('what day is it today?'),
18+
new HumanMessage('give an brief intro to reactjs in less than 80 words '),
19+
];
20+
21+
const res = await model.invoke(messages);
22+
console.log(res);
23+
24+
// const stream = await model.stream(messages);
25+
26+
// const chunks: AIMessageChunk[] = [];
27+
// for await (const chunk of stream) {
28+
// chunks.push(chunk);
29+
// console.log(`${chunk.content}`);
30+
// }
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import '@dotenvx/dotenvx/config';
2+
3+
import {
4+
AIMessageChunk,
5+
HumanMessage,
6+
SystemMessage,
7+
} from '@langchain/core/messages';
8+
import { ChatGroq } from '@langchain/groq';
9+
10+
const model = new ChatGroq({
11+
model: 'meta-llama/llama-4-scout-17b-16e-instruct',
12+
temperature: 0,
13+
});
14+
15+
const messages = [
16+
new SystemMessage('Translate the following from English into Chinese'),
17+
new HumanMessage('what day is it today?'),
18+
];
19+
20+
// const res = await model.invoke(messages);
21+
// console.log(res);
22+
23+
const stream = await model.stream(messages);
24+
25+
const chunks: AIMessageChunk[] = [];
26+
for await (const chunk of stream) {
27+
chunks.push(chunk);
28+
console.log(`${chunk.content}`);
29+
}
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import '@dotenvx/dotenvx/config';
2+
3+
import { HumanMessage } from '@langchain/core/messages';
4+
5+
import { initChatModel } from 'langchain/chat_models/universal';
6+
7+
const model = await initChatModel('llama-3.1-8b-instant', {
8+
modelProvider: 'groq',
9+
temperature: 0,
10+
});
11+
12+
const messages = [
13+
// new SystemMessage('Translate the following from English into Chinese'),
14+
// new HumanMessage('what day is it today?'),
15+
new HumanMessage('give an brief intro to reactjs in less than 80 words '),
16+
];
17+
18+
const res = await model.invoke(messages);
19+
console.log(res);
20+
21+
// const stream = await model.stream(messages);
22+
23+
// const chunks: AIMessageChunk[] = [];
24+
// for await (const chunk of stream) {
25+
// chunks.push(chunk);
26+
// console.log(`${chunk.content}`);
27+
// }

server/chain-groq2-structured-output.ts renamed to langchain/chain-groq2-structured-output.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@ const structuredLlm = model.withStructuredOutput({
3434
required: ['briefDescription', 'details'],
3535
},
3636
});
37-
// @ts-expect-error llm-topic
3837
const res = await structuredLlm.invoke('introduce sort algorithms', {
38+
// @ts-expect-error llm-topic
3939
name: 'computerTopic',
4040
});
4141

File renamed without changes.
File renamed without changes.

0 commit comments

Comments
 (0)