Skip to content

Commit b3f5cce

Browse files
Merge pull request #422 from hotosm/feature/docker-compose-prod
Enhance : Docker Compose
2 parents 1002584 + bce89b0 commit b3f5cce

File tree

9 files changed

+954
-213
lines changed

9 files changed

+954
-213
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,3 +55,6 @@ ramp
5555
trainings
5656
fAIr-utilities
5757
**/.DS_Store
58+
59+
60+
.env*

backend/login/views.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ def get(self, request, format=None): # pragma: no cover
5959
# Generating token through osm_auth library method
6060
uri = request.build_absolute_uri()
6161
token = osm_auth.callback(uri)
62+
token["access_token"] = token.pop("user_data")
6263
return JsonResponse(token)
6364

6465

docker-compose.dev.yml

Lines changed: 219 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,219 @@
1+
# fAIr Application Development Stack
2+
3+
x-common-env: &common-env
4+
env_file:
5+
- ${ENV_FILE:-.env.dev}
6+
7+
services:
8+
postgres:
9+
<<: *common-env
10+
image: postgis/postgis:17-3.5-alpine
11+
platform: linux/amd64
12+
container_name: pgsql
13+
restart: unless-stopped
14+
environment:
15+
POSTGRES_DB: ${POSTGRES_DB:-ai}
16+
POSTGRES_USER: ${POSTGRES_USER:-postgres}
17+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-admin}
18+
volumes:
19+
- ${POSTGRES_DATA:-./fair-app/data/postgres}:/var/lib/postgresql/data
20+
ports:
21+
- "5434:5432"
22+
networks:
23+
- backend-network
24+
healthcheck:
25+
test: ["CMD-SHELL", "pg_isready -U postgres"]
26+
interval: 10s
27+
timeout: 5s
28+
retries: 5
29+
30+
redis:
31+
<<: *common-env
32+
image: redis:8-alpine
33+
platform: linux/amd64
34+
container_name: redis
35+
restart: unless-stopped
36+
command: redis-server
37+
volumes:
38+
- ${REDIS_DATA:-./fair-app/data/redis}:/data
39+
ports:
40+
- "6379:6379"
41+
networks:
42+
- backend-network
43+
healthcheck:
44+
test: ["CMD", "redis-cli", "ping"]
45+
interval: 10s
46+
timeout: 5s
47+
retries: 5
48+
49+
backend-api:
50+
<<: *common-env
51+
image: ghcr.io/hotosm/fair-api:develop
52+
build:
53+
context: ./backend
54+
dockerfile: Dockerfile.API
55+
platform: linux/amd64
56+
container_name: api
57+
restart: unless-stopped
58+
command: python manage.py runserver_with_q 0.0.0.0:8000
59+
ports:
60+
- "127.0.0.1:8000:8000"
61+
volumes:
62+
- ${APP_LOGS:-./fair-app/data/log}:/app/log
63+
- ${RAMP_HOME:-./fair-app/data/ramp}:/RAMP_HOME
64+
- ${TRAINING_WORKSPACE:-./fair-app/data/trainings}:/TRAINING_WORKSPACE
65+
- ${APP_HOME:-./}backend/core:/app/core
66+
- ${APP_HOME:-./}backend/login:/app/login
67+
- ${APP_HOME:-./}backend/aiproject:/app/aiproject
68+
depends_on:
69+
redis:
70+
condition: service_healthy
71+
postgres:
72+
condition: service_healthy
73+
networks:
74+
- backend-network
75+
healthcheck:
76+
test:
77+
[
78+
"CMD-SHELL",
79+
'python -c "import sys, requests; r = requests.get(''http://127.0.0.1:8000/api/''); print(r); sys.exit(0 if r.status_code==200 else 1)"',
80+
]
81+
interval: 30s
82+
timeout: 10s
83+
retries: 2
84+
deploy:
85+
resources:
86+
limits:
87+
cpus: "1"
88+
memory: 1G
89+
90+
backend-worker-cpu:
91+
<<: *common-env
92+
profiles: ["cpu"]
93+
image: ghcr.io/hotosm/fair-worker:develop-cpu
94+
build:
95+
context: ./backend
96+
dockerfile: Dockerfile.workers
97+
args:
98+
- BUILD_TYPE=cpu
99+
platform: linux/amd64
100+
container_name: worker-cpu
101+
restart: unless-stopped
102+
command: celery -A aiproject worker --loglevel=INFO --concurrency=1 -Q ramp_training,yolo_training
103+
env_file:
104+
- ./backend/.env
105+
volumes:
106+
- ${APP_LOGS:-./fair-app/data/log}:/app/log
107+
- ${RAMP_HOME:-./fair-app/data/ramp}:/RAMP_HOME
108+
- ${TRAINING_WORKSPACE:-./fair-app/data/trainings}:/TRAINING_WORKSPACE
109+
- ${APP_HOME:-./}backend/core:/app/core
110+
- ${APP_HOME:-./}backend/login:/app/login
111+
- ${APP_HOME:-./}backend/aiproject:/app/aiproject
112+
depends_on:
113+
backend-api:
114+
condition: service_healthy
115+
redis:
116+
condition: service_healthy
117+
postgres:
118+
condition: service_healthy
119+
networks:
120+
- backend-network
121+
deploy:
122+
resources:
123+
limits:
124+
cpus: "1"
125+
memory: 2G
126+
127+
backend-worker-gpu:
128+
<<: *common-env
129+
profiles: ["gpu"]
130+
image: ghcr.io/hotosm/fair-worker:develop-gpu
131+
build:
132+
context: ./backend
133+
dockerfile: Dockerfile.workers
134+
args:
135+
- BUILD_TYPE=gpu
136+
platform: linux/amd64
137+
container_name: worker-gpu
138+
restart: unless-stopped
139+
command: celery -A aiproject worker --loglevel=INFO --concurrency=1 -Q ramp_training,yolo_training --pool=solo
140+
volumes:
141+
- ${APP_LOGS:-./fair-app/data/log}:/app/log
142+
- ${RAMP_HOME:-./fair-app/data/ramp}:/RAMP_HOME
143+
- ${TRAINING_WORKSPACE:-./fair-app/data/trainings}:/TRAINING_WORKSPACE
144+
- ${APP_HOME:-./}backend/core:/app/core
145+
- ${APP_HOME:-./}backend/login:/app/login
146+
- ${APP_HOME:-./}backend/aiproject:/app/aiproject
147+
depends_on:
148+
backend-api:
149+
condition: service_healthy
150+
redis:
151+
condition: service_healthy
152+
postgres:
153+
condition: service_healthy
154+
networks:
155+
- backend-network
156+
deploy:
157+
resources:
158+
reservations:
159+
devices:
160+
- driver: nvidia
161+
capabilities: [gpu]
162+
limits:
163+
memory: 4G
164+
165+
prediction-worker:
166+
<<: *common-env
167+
image: ghcr.io/hotosm/fair-offline-predictor:develop
168+
build:
169+
context: ./backend
170+
dockerfile: Dockerfile.API
171+
args:
172+
- BUILD_TARGET=predictor
173+
platform: linux/amd64
174+
container_name: prediction-worker
175+
restart: unless-stopped
176+
command: celery -A aiproject worker --loglevel=INFO --concurrency=1 -Q predictions --pool=solo
177+
volumes:
178+
- ${APP_LOGS:-./fair-app/data/log}:/app/log
179+
- ${RAMP_HOME:-./fair-app/data/ramp}:/RAMP_HOME
180+
- ${TRAINING_WORKSPACE:-./fair-app/data/trainings}:/TRAINING_WORKSPACE
181+
- ${APP_HOME:-./}backend/core:/app/core
182+
- ${APP_HOME:-./}backend/login:/app/login
183+
- ${APP_HOME:-./}backend/aiproject:/app/aiproject
184+
depends_on:
185+
backend-api:
186+
condition: service_healthy
187+
redis:
188+
condition: service_healthy
189+
postgres:
190+
condition: service_healthy
191+
networks:
192+
- backend-network
193+
deploy:
194+
resources:
195+
limits:
196+
cpus: "1"
197+
memory: 2G
198+
199+
flower:
200+
<<: *common-env
201+
image: mher/flower:2.0.1
202+
platform: linux/amd64
203+
container_name: flower
204+
restart: unless-stopped
205+
command: celery --broker=redis://${REDIS_USER:-redis}:${REDIS_PASSWORD:-}@redis:6379/0 flower --address=0.0.0.0 --port=5000
206+
ports:
207+
- "5500:5000"
208+
depends_on:
209+
redis:
210+
condition: service_healthy
211+
networks:
212+
- backend-network
213+
profiles:
214+
- cpu
215+
- gpu
216+
217+
networks:
218+
backend-network:
219+
driver: bridge

0 commit comments

Comments
 (0)