-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathdocker-compose.yaml
More file actions
150 lines (141 loc) · 5.01 KB
/
docker-compose.yaml
File metadata and controls
150 lines (141 loc) · 5.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
services:
nbs-mssql:
image: ghcr.io/cdcent/nedssdb:latest
platform: linux/amd64
environment:
- DATABASE_VERSION=6.0.18.1
- DEPLOY_ADMIN_PASSWORD=${DATABASE_PASSWORD:-PizzaIsGood33!}
- DATABASE_PASSWORD=${DATABASE_PASSWORD:-PizzaIsGood33!}
- LIQUIBASE_USER_PASSWORD=${LIQUIBASE_USER_PASSWORD:-db_deploy_admin}
volumes:
- ./containers/db/initialize:/docker-entrypoint-initdb.d/
ports:
- 3433:1433
mem_limit: 2.5G
liquibase:
build:
dockerfile: ./liquibase-service/Dockerfile.local
environment:
- DB_USERNAME=db_deploy_admin
- DB_PASSWORD=${LIQUIBASE_USER_PASSWORD:-db_deploy_admin}
- DB_HOST=nbs-mssql
depends_on:
nbs-mssql:
condition: service_healthy
mem_limit: 200M
wildfly:
image: ghcr.io/cdcent/nedssdev:6.0.18.1
depends_on:
nbs-mssql:
condition: service_healthy
ports:
- "9991:9990"
- "7003:7001"
- "8788:8787"
kafka:
image: confluentinc/cp-kafka:7.8.7
ports:
- "9092:9092"
environment:
# KRaft Configuration
KAFKA_NODE_ID: 1
KAFKA_PROCESS_ROLES: broker,controller
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka:29093
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
CLUSTER_ID: RwB4L8JMReqMdSiPoP-9Og # generated with docker run --rm confluentinc/cp-kafka:7.3.0 kafka-storage random-uuid
# Listener Configuration
KAFKA_LISTENERS: INTERNAL://kafka:29092,EXTERNAL://kafka:9092,CONTROLLER://kafka:29093
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:29092,EXTERNAL://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
# Topic Configuration
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
healthcheck:
test: kafka-topics --bootstrap-server kafka:29092 --list
interval: 15s
timeout: 10s
retries: 5
kafka-connect:
build:
dockerfile: ./containers/kafka-connect/Dockerfile
depends_on:
kafka:
condition: service_healthy
nbs-mssql:
condition: service_healthy
liquibase:
condition: service_completed_successfully
ports:
- 8083:8083
volumes:
- ./containers/kafka-connect/initialize/:/kafka/healthcheck/
environment:
CONNECT_BOOTSTRAP_SERVERS: "kafka:29092"
CONNECT_GROUP_ID: "cp-kafka-connect.groupId"
CONNECT_CONFIG_STORAGE_TOPIC: "kafka-sink-connector-config"
CONNECT_OFFSET_STORAGE_TOPIC: "kafka-sink-connector-offset"
CONNECT_STATUS_STORAGE_TOPIC: "kafka-sink-connector-status"
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_REST_ADVERTISED_HOST_NAME: "kafka-connect"
CONNECT_CONSUMER_METADATA_MAX_AGE_MS: 1000
healthcheck:
test: ["CMD", "/kafka/healthcheck/healthcheck.sh"]
interval: 20s
timeout: 15s
retries: 5
debezium:
image: debezium/connect:2.4
depends_on:
kafka:
condition: service_healthy
nbs-mssql:
condition: service_healthy
liquibase:
condition: service_completed_successfully
ports:
- "8085:8083"
volumes:
- ./containers/debezium/initialize/:/kafka/healthcheck/
environment:
BOOTSTRAP_SERVERS: kafka:29092
GROUP_ID: connect-cluster
KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
OFFSET_STORAGE_FILE_FILENAME: /tmp/connect.offsets
OFFSET_FLUSH_INTERVAL_MS: 10000
PLUGIN_PATH: /kafka/connect
CONFIG_STORAGE_TOPIC: connect-configs
OFFSET_STORAGE_TOPIC: connect-offsets
STATUS_STORAGE_TOPIC: connect-status
CONFIG_STORAGE_REPLICATION_FACTOR: 1
OFFSET_STORAGE_REPLICATION_FACTOR: 1
STATUS_STORAGE_REPLICATION_FACTOR: 1
healthcheck:
test: ["CMD", "/kafka/healthcheck/healthcheck.sh"]
interval: 10s
timeout: 5s
retries: 5
reporting-pipeline-service:
build:
dockerfile: ./reporting-pipeline-service/Dockerfile
environment:
- DB_USERNAME=reporting_pipeline_service_rdb
- DB_PASSWORD=reporting_pipeline_service
- DB_HOST=jdbc:sqlserver://nbs-mssql:1433;databaseName=RDB_MODERN;encrypt=true;trustServerCertificate=true;
- KAFKA_BOOTSTRAP_SERVER=kafka:29092
depends_on:
kafka:
condition: service_healthy
nbs-mssql:
condition: service_healthy
liquibase:
condition: service_completed_successfully