-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcompose.yaml
More file actions
100 lines (94 loc) · 3.38 KB
/
compose.yaml
File metadata and controls
100 lines (94 loc) · 3.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
version: '3.8'
services:
app:
build: . #build dockerfile in cwd
container_name: "HighVolumeIngestionService"
depends_on:
kafka:
condition: service_healthy # This is the CRITICAL line
postgres:
condition: service_healthy
ports:
- "8080:8080"
environment:
- SERVER_PORT=8080
- SPRING_KAFKA_BOOTSTRAP_SERVERS=kafka:9092
# Database Connection
- SPRING_DATASOURCE_URL=jdbc:postgresql://postgres:5432/${POSTGRES_DB}?reWriteBatchedInserts=true
# CRITICAL FIX: Pass these variables to Spring Boot so it can authenticate
- SPRING_DATASOURCE_USERNAME=${POSTGRES_USER}
- SPRING_DATASOURCE_PASSWORD=${POSTGRES_PASSWORD}
volumes:
- "${BASE_DIR}/build:/var/lib/build/data"
networks:
- HighVolumeIngestionServiceNETWORK
zookeeper:
image: "confluentinc/cp-zookeeper:7.5.0"
container_name: "zookeeper"
environment:
- ZOOKEEPER_CLIENT_PORT=2181
# Purge old snapshots automatically to keep the volume clean
- ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT=3
- ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL=1
ports:
- "2181:2181"
volumes:
- "${BASE_DIR}/zookeeper:/var/lib/zookeeper/data"
networks:
- HighVolumeIngestionServiceNETWORK
kafka:
image: "confluentinc/cp-kafka:7.5.0"
container_name: "kafka"
depends_on:
- zookeeper #waits for zookeeper to start before starting kafka
# Give Kafka 30 seconds to flush data to D: drive before killing it
stop_grace_period: 30s
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
# Define two ways to connect:
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_LOG_RETENTION_BYTES: 1073741824
KAFKA_LOG_RETENTION_HOURS: 1
# Wait longer for Zookeeper to clear the "stale" session of the old container
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: 18000
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 18000
ports:
- "9092:9092"
- "29092:29092"
volumes:
- "${BASE_DIR}/kafka:/var/lib/kafka/data"
networks:
- HighVolumeIngestionServiceNETWORK
healthcheck:
# This command checks if the KafkaConfig port is actually open and responding
test: [ "CMD-SHELL", "nc -z localhost 9092 || exit 1" ]
interval: 10s
timeout: 5s
retries: 5
postgres:
image: 'postgres:15-alpine'
container_name: postgres_db
environment:
- POSTGRES_DB=${POSTGRES_DB}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_USER=${POSTGRES_USER}
ports:
- '5432:5432'
healthcheck:
# CRITICAL FIX: Corrected syntax for healthcheck command
test: [ "CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}" ]
interval: 10s
timeout: 5s
retries: 5
volumes:
- "${BASE_DIR}/postgres:/var/lib/postgres/data"
networks:
- HighVolumeIngestionServiceNETWORK
# Create a dedicated network for DNS resolution
networks:
HighVolumeIngestionServiceNETWORK:
driver: bridge