-
-
Notifications
You must be signed in to change notification settings - Fork 4.8k
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
45 lines (43 loc) · 1.71 KB
/
docker-compose.yml
File metadata and controls
45 lines (43 loc) · 1.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
services:
resume-matcher:
image: ghcr.io/srbhr/resume-matcher
build: .
container_name: resume-matcher
ports:
- "${PORT:-3000}:${FRONTEND_PORT:-3000}"
volumes:
- resume-data:/app/backend/data
#secrets:
# - llm_api_key
environment:
# Internal container ports (default: 3000 frontend, 8000 backend)
# Note: changing BACKEND_PORT requires rebuilding the image with
# matching BACKEND_ORIGIN for the Next.js proxy rewrites.
- FRONTEND_PORT=${FRONTEND_PORT:-3000}
- BACKEND_PORT=${BACKEND_PORT:-8000}
# domain in reverse proxy
- FRONTEND_BASE_URL=${FRONTEND_BASE_URL:-http://localhost:3000}
# Logging configuration: DEBUG, INFO, WARNING, ERROR
- LOG_LEVEL=${LOG_LEVEL:-INFO}
#- LOG_LEVEL_FILE=${LOG_LEVEL_FILE:-}
# Debug level for LiteLLM: DEBUG, INFO, WARNING, ERROR
- LOG_LLM=${LOG_LLM:-WARNING}
#- LOG_LLM_FILE=${LOG_LLM_FILE:-}
# LLM Configuration - configure via Settings UI or set env vars for explicit overrides
# Supported providers: openai, anthropic, openrouter, gemini, deepseek, ollama
# Defaults are defined in apps/backend/app/config.py
- LLM_PROVIDER=${LLM_PROVIDER:-openai}
- LLM_MODEL=${LLM_MODEL:-}
# Optional Docker Secret file path (same behavior as Postgres-style *_FILE)
#- LLM_API_KEY_FILE=${LLM_API_KEY_FILE:-/run/secrets/llm_api_key}
- LLM_API_KEY=${LLM_API_KEY:-}
# For Ollama running on host machine, use host.docker.internal:
# - LLM_API_BASE=http://host.docker.internal:11434
- LLM_API_BASE=${LLM_API_BASE:-}
restart: unless-stopped
#secrets:
# llm_api_key:
# file: ./secrets/llm_api_key
volumes:
resume-data:
driver: local