Skip to content
This repository was archived by the owner on Sep 12, 2024. It is now read-only.

Commit 653aa69

Browse files
committed
chore: update example
1 parent 9fa5b4e commit 653aa69

File tree

27 files changed

+234
-27
lines changed

27 files changed

+234
-27
lines changed

example/js/langchain/langchain.js

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import { MemoryVectorStore } from "langchain/vectorstores/memory";
2+
import { LLamaEmbeddings } from "llama-node/dist/extensions/langchain.js";
3+
import { LLama } from "llama-node";
4+
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";
5+
import path from "path";
6+
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-1.1-q4_1.bin");
7+
const llama = new LLama(LLamaCpp);
8+
const config = {
9+
path: model,
10+
enableLogging: true,
11+
nCtx: 1024,
12+
nParts: -1,
13+
seed: 0,
14+
f16Kv: false,
15+
logitsAll: false,
16+
vocabOnly: false,
17+
useMlock: false,
18+
embedding: true,
19+
useMmap: true,
20+
};
21+
llama.load(config);
22+
const run = async () => {
23+
// Load the docs into the vector store
24+
const vectorStore = await MemoryVectorStore.fromTexts(["Hello world", "Bye bye", "hello nice world"], [{ id: 2 }, { id: 1 }, { id: 3 }], new LLamaEmbeddings({ maxConcurrency: 1 }, llama));
25+
// Search for the most similar document
26+
const resultOne = await vectorStore.similaritySearch("hello world", 1);
27+
console.log(resultOne);
28+
};
29+
run();

example/js/llama-cpp/embedding.js

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import { LLama } from "llama-node";
2+
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";
3+
import path from "path";
4+
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-1.1-q4_1.bin");
5+
const llama = new LLama(LLamaCpp);
6+
const config = {
7+
path: model,
8+
enableLogging: true,
9+
nCtx: 1024,
10+
nParts: -1,
11+
seed: 0,
12+
f16Kv: false,
13+
logitsAll: false,
14+
vocabOnly: false,
15+
useMlock: false,
16+
embedding: true,
17+
useMmap: true,
18+
};
19+
llama.load(config);
20+
const prompt = `Who is the president of the United States?`;
21+
const params = {
22+
nThreads: 4,
23+
nTokPredict: 2048,
24+
topK: 40,
25+
topP: 0.1,
26+
temp: 0.2,
27+
repeatPenalty: 1,
28+
prompt,
29+
};
30+
llama.getEmbedding(params).then(console.log);

example/js/llama-cpp/llama-cpp.js

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import { LLama } from "llama-node";
2+
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";
3+
import path from "path";
4+
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-1.1-q4_1.bin");
5+
const llama = new LLama(LLamaCpp);
6+
const config = {
7+
path: model,
8+
enableLogging: true,
9+
nCtx: 1024,
10+
nParts: -1,
11+
seed: 0,
12+
f16Kv: false,
13+
logitsAll: false,
14+
vocabOnly: false,
15+
useMlock: false,
16+
embedding: false,
17+
useMmap: true,
18+
};
19+
llama.load(config);
20+
const template = `How are you?`;
21+
const prompt = `### Human:
22+
23+
${template}
24+
25+
### Assistant:`;
26+
llama.createCompletion({
27+
nThreads: 4,
28+
nTokPredict: 2048,
29+
topK: 40,
30+
topP: 0.1,
31+
temp: 0.2,
32+
repeatPenalty: 1,
33+
stopSequence: "### Human",
34+
prompt,
35+
}, (response) => {
36+
process.stdout.write(response.token);
37+
});

example/js/llama-cpp/tokenize.js

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import { LLama } from "llama-node";
2+
import { LLamaCpp } from "llama-node/dist/llm/llama-cpp.js";
3+
import path from "path";
4+
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-1.1-q4_1.bin");
5+
const llama = new LLama(LLamaCpp);
6+
const config = {
7+
path: model,
8+
enableLogging: true,
9+
nCtx: 1024,
10+
nParts: -1,
11+
seed: 0,
12+
f16Kv: false,
13+
logitsAll: false,
14+
vocabOnly: false,
15+
useMlock: false,
16+
embedding: false,
17+
useMmap: true,
18+
};
19+
llama.load(config);
20+
const content = "how are you?";
21+
llama.tokenize({ content, nCtx: 2048 }).then(console.log);

example/js/llama-rs/embedding.js

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import { LLama } from "llama-node";
2+
import { LLamaRS } from "llama-node/dist/llm/llama-rs.js";
3+
import path from "path";
4+
import fs from "fs";
5+
const model = path.resolve(process.cwd(), "../ggml-alpaca-7b-q4.bin");
6+
const llama = new LLama(LLamaRS);
7+
llama.load({ path: model });
8+
const getWordEmbeddings = async (prompt, file) => {
9+
const data = await llama.getEmbedding({
10+
prompt,
11+
numPredict: 128,
12+
temp: 0.2,
13+
topP: 1,
14+
topK: 40,
15+
repeatPenalty: 1,
16+
repeatLastN: 64,
17+
seed: 0,
18+
});
19+
console.log(prompt, data);
20+
await fs.promises.writeFile(path.resolve(process.cwd(), file), JSON.stringify(data));
21+
};
22+
const run = async () => {
23+
const dog1 = `My favourite animal is the dog`;
24+
await getWordEmbeddings(dog1, "./example/semantic-compare/dog1.json");
25+
const dog2 = `I have just adopted a cute dog`;
26+
await getWordEmbeddings(dog2, "./example/semantic-compare/dog2.json");
27+
const cat1 = `My favourite animal is the cat`;
28+
await getWordEmbeddings(cat1, "./example/semantic-compare/cat1.json");
29+
};
30+
run();

example/js/llama-rs/llama-rs.js

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import { LLama } from "llama-node";
2+
import { LLamaRS } from "llama-node/dist/llm/llama-rs.js";
3+
import path from "path";
4+
const model = path.resolve(process.cwd(), "../ggml-alpaca-7b-q4.bin");
5+
const llama = new LLama(LLamaRS);
6+
llama.load({ path: model });
7+
const template = `how are you`;
8+
const prompt = `Below is an instruction that describes a task. Write a response that appropriately completes the request.
9+
10+
### Instruction:
11+
12+
${template}
13+
14+
### Response:`;
15+
llama.createCompletion({
16+
prompt,
17+
numPredict: 128,
18+
temp: 0.2,
19+
topP: 1,
20+
topK: 40,
21+
repeatPenalty: 1,
22+
repeatLastN: 64,
23+
seed: 0,
24+
feedPrompt: true,
25+
}, (response) => {
26+
process.stdout.write(response.token);
27+
});

example/js/llama-rs/semantic-compare/cat1.json

Lines changed: 1 addition & 0 deletions
Large diffs are not rendered by default.
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import * as tf from "@tensorflow/tfjs-node";
2+
import dog1 from "./dog1.json";
3+
import dog2 from "./dog2.json";
4+
import cat1 from "./cat1.json";
5+
const dog1Tensor = tf.tensor(dog1);
6+
const dog2Tensor = tf.tensor(dog2);
7+
const cat1Tensor = tf.tensor(cat1);
8+
const compareCosineSimilarity = (tensor1, tensor2) => {
9+
const dotProduct = tensor1.dot(tensor2);
10+
const norm1 = tensor1.norm();
11+
const norm2 = tensor2.norm();
12+
const cosineSimilarity = dotProduct.div(norm1.mul(norm2));
13+
return cosineSimilarity.dataSync()[0];
14+
};
15+
console.log("dog1 vs dog2", compareCosineSimilarity(dog1Tensor, dog2Tensor));
16+
console.log("dog1 vs cat1", compareCosineSimilarity(dog1Tensor, cat1Tensor));

example/js/llama-rs/semantic-compare/dog1.json

Lines changed: 1 addition & 0 deletions
Large diffs are not rendered by default.

example/js/llama-rs/semantic-compare/dog2.json

Lines changed: 1 addition & 0 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)