-
Notifications
You must be signed in to change notification settings - Fork 20
Expand file tree
/
Copy pathdocker-compose-vllm.yml
More file actions
104 lines (96 loc) · 2.56 KB
/
docker-compose-vllm.yml
File metadata and controls
104 lines (96 loc) · 2.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
version: "3.8"
services:
backend-llm-train-kb:
image: tilellm:latest
container_name: backend-llm-train-kb
environment:
- JWT_SECRET_KEY=la-tua-nuova-chiave-segreta-256-bit
- TIMEOUT=240
- REDIS_URL=redis://redis-kb:6379/0
- TOKENIZERS_PARALLELISM=false
- WORKERS=2
# - TILELLM_ROLE=train
ports:
- "8000:8000"
- "3009:3009"
deploy:
resources:
limits:
cpus: '1'
memory: '4096M'
reservations:
cpus: '1'
memory: '2048M'
devices:
- driver: nvidia
count: all
capabilities: [ gpu, utility, compute ]
backend-llm-query-kb:
image: tilellm:latest
container_name: backend-llm-query-kb
environment:
- JWT_SECRET_KEY=la-tua-nuova-chiave-segreta-256-bit
- TIMEOUT=240
- REDIS_URL=redis://redis-kb:6379/0
- TOKENIZERS_PARALLELISM=false
- WORKERS=3
ports:
- "8001:8000"
- "3010:3009"
deploy:
resources:
limits:
cpus: '1'
memory: '4096M'
reservations:
cpus: '1'
memory: '2048M'
devices:
- driver: nvidia
count: all
capabilities: [ gpu, utility, compute ]
redis-kb:
container_name: redis-kb
image: redis:latest
ports:
- "6379:6379"
qdrant-kb:
container_name: qdrant-kb
image: qdrant/qdrant
ports:
- "6333:6333"
- "6334:6334"
volumes:
- "~/sviluppo/qdrant_storage:/qdrant/storage"
vllm-generate:
image: vllm/vllm-openai:latest
container_name: vllm-generate
ports:
- "8002:8000"
volumes:
- "~/.cache/huggingface:/root/.cache/huggingface"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [ gpu, utility, compute ]
command: ["--max-model-len", "2048", "--runner", "generate", "--gpu-memory-utilization", "0.4", "--model", "KingNish/Qwen2.5-0.5b-Test-ft"]
ipc: host
vllm-pool:
image: vllm/vllm-openai:latest
container_name: vllm-pool
ports:
- "8003:8000"
volumes:
- "~/.cache/huggingface:/root/.cache/huggingface"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [ gpu, utility, compute ]
command: ["--max-model-len", "2048", "--runner", "pooling", "--gpu-memory-utilization", "0.4", "--model", "KingNish/Qwen2.5-0.5b-Test-ft"]
ipc: host