Skip to content
This repository was archived by the owner on Sep 12, 2024. It is now read-only.

Commit 2419b80

Browse files
committed
chore: update example
1 parent a10a0b5 commit 2419b80

File tree

14 files changed

+554
-19
lines changed

14 files changed

+554
-19
lines changed

example/package-lock.json

Lines changed: 404 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

example/package.json

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
{
2+
"name": "@llama-node/examples",
3+
"version": "1.0.0",
4+
"description": "",
5+
"main": "index.js",
6+
"type": "module",
7+
"scripts": {
8+
"build": "tsc -p .",
9+
"test": "echo \"Error: no test specified\" && exit 1"
10+
},
11+
"author": "",
12+
"license": "MIT",
13+
"devDependencies": {
14+
"@types/node": "^18.15.11",
15+
"typescript": "^5.0.4"
16+
},
17+
"dependencies": {
18+
"@llama-node/core": "^0.0.22",
19+
"@llama-node/llama-cpp": "^0.0.22",
20+
"llama-node": "^0.0.22"
21+
}
22+
}
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
import { LLama } from "../../src";
2-
import { LLamaCpp, LoadConfig } from "../../src/llm/llama-cpp";
1+
import { LLama } from "llama-node";
2+
import { LLamaCpp, LoadConfig } from "llama-node/dist/llm/llama-cpp.js";
33
import path from "path";
44

5-
const model = path.resolve(process.cwd(), "./ggml-vicuna-7b-4bit-rev1.bin");
5+
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-4bit-rev1.bin");
66

77
const llama = new LLama(LLamaCpp);
88

@@ -16,7 +16,7 @@ const config: LoadConfig = {
1616
logitsAll: false,
1717
vocabOnly: false,
1818
useMlock: false,
19-
embedding: false,
19+
embedding: true,
2020
};
2121

2222
llama.load(config);
Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
import { LLama } from "../../src";
2-
import { LLamaCpp, LoadConfig } from "../../src/llm/llama-cpp";
1+
import { LLama } from "llama-node";
2+
import { LLamaCpp, LoadConfig } from "llama-node/dist/llm/llama-cpp.js";
33
import path from "path";
44

5-
const model = path.resolve(process.cwd(), "./ggml-vicuna-7b-4bit-rev1.bin");
5+
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-4bit-rev1.bin");
66

77
const llama = new LLama(LLamaCpp);
88

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
import { LLama } from "../../src";
2-
import { LLamaCpp, LoadConfig } from "../../src/llm/llama-cpp";
1+
import { LLama } from "llama-node";
2+
import { LLamaCpp, LoadConfig } from "llama-node/dist/llm/llama-cpp.js";
33
import path from "path";
44

5-
const model = path.resolve(process.cwd(), "./ggml-vicuna-7b-4bit-rev1.bin");
5+
const model = path.resolve(process.cwd(), "../ggml-vicuna-7b-4bit-rev1.bin");
66

77
const llama = new LLama(LLamaCpp);
88

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
import { LLama } from "../../src";
2-
import { LLamaRS } from "../../src/llm/llama-rs";
1+
import { LLama } from "llama-node";
2+
import { LLamaRS } from "llama-node/dist/llm/llama-rs.js";
33
import path from "path";
44
import fs from "fs";
55

6-
const model = path.resolve(process.cwd(), "./ggml-alpaca-7b-q4.bin");
6+
const model = path.resolve(process.cwd(), "../ggml-alpaca-7b-q4.bin");
77

88
const llama = new LLama(LLamaRS);
99

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
import { LLama } from "../../src";
2-
import { LLamaRS } from "../../src/llm/llama-rs";
1+
import { LLama } from "llama-node";
2+
import { LLamaRS } from "llama-node/dist/llm/llama-rs.js";
33
import path from "path";
44

5-
const model = path.resolve(process.cwd(), "./ggml-alpaca-7b-q4.bin");
5+
const model = path.resolve(process.cwd(), "../ggml-alpaca-7b-q4.bin");
66

77
const llama = new LLama(LLamaRS);
88

File renamed without changes.
File renamed without changes.
File renamed without changes.

0 commit comments

Comments
 (0)