-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathdocker-compose.multi-gpu.yml
More file actions
executable file
·195 lines (182 loc) · 6.37 KB
/
docker-compose.multi-gpu.yml
File metadata and controls
executable file
·195 lines (182 loc) · 6.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
# Docker Compose for Multi-GPU Setups (Mixed Vendors)
#
# This configuration supports multiple GPUs from different vendors.
# SmarterRouter will automatically detect all GPUs and combine their VRAM.
#
# Supported combinations:
# - NVIDIA + AMD
# - NVIDIA + Intel
# - AMD + Intel
# - NVIDIA + AMD + Intel
#
# Note: Apple Silicon cannot be combined with other GPUs (runs on host only)
#
# Requirements:
# - All relevant drivers installed for each GPU vendor
# - NVIDIA Container Toolkit (for NVIDIA GPUs)
# - ROCm runtime (for AMD GPUs)
# - Intel i915 driver (for Intel GPUs)
#
# Quick Start:
# 1. Copy this file to your project root: cp docs/docker-compose.multi-gpu.yml docker-compose.yml
# 2. Copy environment template: cp ENV_DEFAULT .env
# 3. If using AMD GPUs, get group IDs and add to .env:
# echo "RENDER_GID=$(getent group render | cut -d: -f3)" >> .env
# echo "VIDEO_GID=$(getent group video | cut -d: -f3)" >> .env
# 4. Edit to uncomment only the GPU types you have
# 5. Run: docker-compose --compatibility up -d (for NVIDIA)
# OR: docker-compose up -d (without NVIDIA)
#
# Verify GPU detection:
# docker logs smarterrouter | grep -i "gpu\|vram"
services:
smarterrouter:
image: ghcr.io/peva3/smarterrouter:latest
container_name: smarterrouter
ports:
- "11436:11436"
env_file:
- .env
volumes:
- ./data:/app/data:rw
- type: tmpfs
target: /tmp
restart: unless-stopped
networks:
- smarterrouter-network
healthcheck:
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:11436/health')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# =========================================================================
# MULTI-GPU CONFIGURATION
# Uncomment the sections for the GPUs you have
# =========================================================================
# -------------------------------------------------------------------------
# NVIDIA GPUs (use with deploy.resources)
# -------------------------------------------------------------------------
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all # Use all NVIDIA GPUs
capabilities: [gpu]
# -------------------------------------------------------------------------
# AMD GPUs (use with devices + group_add)
# -------------------------------------------------------------------------
# Uncomment if you have AMD GPUs:
# devices:
# - /dev/kfd
# - /dev/dri
# group_add:
# - ${RENDER_GID:-109} # Get with: getent group render | cut -d: -f3
# - ${VIDEO_GID:-44} # Get with: getent group video | cut -d: -f3
# -------------------------------------------------------------------------
# Intel GPUs (use with devices)
# -------------------------------------------------------------------------
# Uncomment if you have Intel Arc GPUs (add to devices list above):
# devices:
# - /dev/dri
# -------------------------------------------------------------------------
# NVIDIA + AMD Combined
# -------------------------------------------------------------------------
# For systems with both NVIDIA and AMD GPUs, combine both configurations:
#
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# devices:
# - /dev/kfd
# - /dev/dri
# group_add: # Required for AMD
# - ${RENDER_GID:-109}
# - ${VIDEO_GID:-44}
# -------------------------------------------------------------------------
# NVIDIA + Intel Combined
# -------------------------------------------------------------------------
# For systems with both NVIDIA and Intel GPUs:
#
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# devices:
# - /dev/dri
# -------------------------------------------------------------------------
# AMD + Intel Combined
# -------------------------------------------------------------------------
# For systems with both AMD and Intel GPUs (no NVIDIA):
#
# devices:
# - /dev/kfd # AMD
# - /dev/dri # Both AMD and Intel
# group_add: # Required for AMD
# - ${RENDER_GID:-109}
# - ${VIDEO_GID:-44}
# -------------------------------------------------------------------------
# NVIDIA + AMD + Intel Combined
# -------------------------------------------------------------------------
# For systems with all three GPU types:
#
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# devices:
# - /dev/kfd
# - /dev/dri
# group_add: # Required for AMD
# - ${RENDER_GID:-109}
# - ${VIDEO_GID:-44}
# =========================================================================
# ENVIRONMENT CONFIGURATION
# =========================================================================
# environment:
# # Total VRAM limit across all GPUs (leave some buffer)
# - ROUTER_VRAM_MAX_TOTAL_GB=48
#
# # Optional: Pin a model to keep in VRAM
# - ROUTER_PINNED_MODEL=phi3:mini
#
# # Auto-unload when VRAM is pressured
# - ROUTER_VRAM_AUTO_UNLOAD_ENABLED=true
# - ROUTER_VRAM_UNLOAD_THRESHOLD_PCT=85
networks:
smarterrouter-network:
driver: bridge
# =========================================================================
# EXAMPLE CONFIGURATIONS
# =========================================================================
# Example: 2x NVIDIA RTX 3090 + 1x AMD Radeon Pro
# ---
# Total VRAM: 48GB (2x24) + 16GB = 64GB
# Recommended ROUTER_VRAM_MAX_TOTAL_GB: 60 (leave 4GB buffer)
#
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
# devices:
# - /dev/kfd
# - /dev/dri
# group_add:
# - ${RENDER_GID:-109}
# - ${VIDEO_GID:-44}
# environment:
# - ROUTER_VRAM_MAX_TOTAL_GB=60