-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathdocker-compose.tailscale-dev.yml
More file actions
173 lines (164 loc) · 5.16 KB
/
docker-compose.tailscale-dev.yml
File metadata and controls
173 lines (164 loc) · 5.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
version: "3.8"
services:
# Traefik reverse proxy (matching k3s environment)
traefik:
container_name: atria-traefik-tailscale
image: traefik:v3.0
command:
- --api.insecure=true
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --log.level=INFO
- --accesslog=true
ports:
- "80:80"
- "443:443"
- "8080:8080" # Traefik dashboard
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
- default
# Redis for Socket.IO clustering and future caching
redis:
container_name: atria-redis-tailscale
image: redis:7-alpine
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru --save 900 1 --save 300 10
volumes:
- redis_data_tailscale:/data
ports:
- "6379:6379" # Expose for debugging/monitoring
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
networks:
- default
# Backend instance 1
backend-1:
container_name: atria-backend-tailscale-1
build:
context: ./backend/atria
dockerfile: Dockerfile
volumes:
- ./backend/atria:/app
env_file:
- ./backend/atria/.flaskenv
- .env.development
environment:
- SEED_DB=${SEED_DB:-true} # Seed on first instance (can override with SEED_DB=false)
- REDIS_URL=redis://redis:6379/0
- SOCKETIO_REDIS_URL=redis://redis:6379/1
- GUNICORN_WORKERS=2
- INSTANCE_ID=backend-1 # For debugging
labels:
- traefik.enable=true
# Router for API and Socket.IO - accepts both localhost and Tailscale IP
- traefik.http.routers.backend-1.rule=(Host(`localhost`) || Host(`100.67.207.5`)) && (PathPrefix(`/api`) || PathPrefix(`/socket.io`))
- traefik.http.routers.backend-1.entrypoints=web
- traefik.http.routers.backend-1.service=backend-lb
# Service configuration
- traefik.http.services.backend-lb.loadbalancer.server.port=5000
- traefik.http.services.backend-lb.loadbalancer.sticky.cookie=true
- traefik.http.services.backend-lb.loadbalancer.sticky.cookie.name=atria-affinity
- traefik.http.services.backend-lb.loadbalancer.sticky.cookie.httpOnly=true
- traefik.http.services.backend-lb.loadbalancer.sticky.cookie.sameSite=lax
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- default
# Backend instance 2 (for testing clustering)
backend-2:
container_name: atria-backend-tailscale-2
build:
context: ./backend/atria
dockerfile: Dockerfile
volumes:
- ./backend/atria:/app
env_file:
- ./backend/atria/.flaskenv
- .env.development
environment:
- SEED_DB=false # Don't seed from second instance
- REDIS_URL=redis://redis:6379/0
- SOCKETIO_REDIS_URL=redis://redis:6379/1
- GUNICORN_WORKERS=2
- INSTANCE_ID=backend-2 # For debugging
labels:
- traefik.enable=true
# Router for API and Socket.IO - accepts both localhost and Tailscale IP
- traefik.http.routers.backend-2.rule=(Host(`localhost`) || Host(`100.67.207.5`)) && (PathPrefix(`/api`) || PathPrefix(`/socket.io`))
- traefik.http.routers.backend-2.entrypoints=web
- traefik.http.routers.backend-2.service=backend-lb
# Share the same service as backend-1 for load balancing
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- default
# Frontend (Vite) - configured for Tailscale access
frontend-vite:
container_name: atria-client-vite-tailscale
image: node:20-alpine
working_dir: /app
command: sh -c "npm install && npm run dev -- --host"
ports:
- "5173:5173"
volumes:
- ./frontend:/app
- /app/node_modules
env_file:
- .env.development # Load variables from .env.development file
environment:
- VITE_API_URL=http://100.67.207.5/api # Tailscale IP through Traefik on port 80
- VITE_FORCE_HTTP_FALLBACK=${VITE_FORCE_HTTP_FALLBACK:-false} # Pass through from .env.development
networks:
- default
depends_on:
- backend-1
- backend-2
- traefik
# Database (unchanged)
db:
container_name: atria-db-tailscale
image: postgres:15-alpine
volumes:
- postgres_data_tailscale:/var/lib/postgresql/data
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
interval: 5s
timeout: 5s
retries: 5
networks:
- default
# DiceBear Avatar API for local avatar generation
dicebear-api:
container_name: atria-avatars-tailscale
image: dicebear/api:3
ports:
- "5001:3000"
tmpfs:
- /run
- /tmp
networks:
- default
restart: unless-stopped
volumes:
postgres_data_tailscale:
redis_data_tailscale:
networks:
default:
name: atria-tailscale-network