Skip to content

Commit aff846d

Browse files
committed
add internal network for deps
1 parent c7f0b6e commit aff846d

File tree

8 files changed

+57
-8
lines changed

8 files changed

+57
-8
lines changed

infrastructure/backups/backup.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ elif [ "$REPLICAS" = "0" ]; then
155155
NETWORK=opencrvs_default
156156
echo "Working with no replicas"
157157
else
158-
NETWORK=dependencies_overlay_net
158+
NETWORK=dependencies_internal_net
159159
# Construct the HOST string rs0/mongo1,mongo2... based on the number of replicas
160160
HOST="rs0/"
161161
for (( i=1; i<=REPLICAS; i++ )); do

infrastructure/backups/restore.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ elif [ "$REPLICAS" = "0" ]; then
9393
NETWORK=opencrvs_default
9494
echo "Working with no replicas"
9595
else
96-
NETWORK=dependencies_overlay_net
96+
NETWORK=dependencies_internal_net
9797
# Construct the HOST string rs0/mongo1,mongo2... based on the number of replicas
9898
HOST="rs0/"
9999
for (( i=1; i<=REPLICAS; i++ )); do

infrastructure/clear-all-data.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ if [ "$REPLICAS" = "0" ]; then
4444
NETWORK=opencrvs_default
4545
echo "Working with no replicas"
4646
else
47-
NETWORK=dependencies_overlay_net
47+
NETWORK=dependencies_internal_net
4848
# Construct the HOST string rs0/mongo1,mongo2... based on the number of replicas
4949
HOST="rs0/"
5050
for (( i=1; i<=REPLICAS; i++ )); do

infrastructure/deployment/add-networks.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ function addNetworksToCompose(composeFile: string, networksList: string) {
3232
.map((network) => network.trim())
3333
.filter((network) => network.length > 0)
3434
.map((stack) => `${stack}_dependencies_net`)
35-
.concat('traefik_net')
3635

3736
// Add networks to each service
3837
for (const serviceName in composeObject.services) {

infrastructure/docker-compose.dependencies.yml

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@ services:
1212
# Only publish the exact ports that are required for OpenCRVS to work
1313
traefik:
1414
image: 'traefik:v2.10'
15+
networks:
16+
- traefik_net
17+
- internal_net
1518
ports:
1619
- target: 80
1720
published: 80
@@ -59,6 +62,9 @@ services:
5962

6063
filebeat:
6164
image: docker.elastic.co/beats/filebeat:8.14.3
65+
networks:
66+
- traefik_net
67+
- internal_net
6268
user: root
6369

6470
configs:
@@ -83,6 +89,9 @@ services:
8389

8490
metricbeat:
8591
image: docker.elastic.co/beats/metricbeat:8.14.3
92+
networks:
93+
- traefik_net
94+
- internal_net
8695
user: root
8796
cap_add:
8897
- SYS_PTRACE
@@ -120,6 +129,9 @@ services:
120129
tag: 'metricbeat'
121130
setup-kibana-config:
122131
image: curlimages/curl:7.88.1
132+
networks:
133+
- traefik_net
134+
- internal_net
123135
entrypoint:
124136
[
125137
'curl',
@@ -152,6 +164,9 @@ services:
152164
tag: 'setup-kibana-config'
153165
kibana:
154166
image: docker.elastic.co/kibana/kibana:8.14.3
167+
networks:
168+
- traefik_net
169+
- internal_net
155170
restart: always
156171
deploy:
157172
labels:
@@ -180,6 +195,9 @@ services:
180195
# Configure mongo nodes as a replica set
181196
mongo1:
182197
image: mongo:4.4
198+
networks:
199+
- traefik_net
200+
- internal_net
183201
restart: unless-stopped
184202
command: mongod --auth --replSet rs0 --keyFile /etc/mongodb-keyfile
185203
hostname: 'mongo1'
@@ -214,6 +232,9 @@ services:
214232
# Configure redis
215233
redis:
216234
image: redis:5
235+
networks:
236+
- traefik_net
237+
- internal_net
217238
restart: unless-stopped
218239

219240
deploy:
@@ -227,6 +248,9 @@ services:
227248
# Configure elasticsearch
228249
elasticsearch:
229250
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
251+
networks:
252+
- traefik_net
253+
- internal_net
230254
restart: unless-stopped
231255
volumes:
232256
- '/data/elasticsearch:/usr/share/elasticsearch/data'
@@ -262,6 +286,9 @@ services:
262286
# Configure elasticsearch
263287
minio:
264288
image: quay.io/minio/minio:RELEASE.2023-09-16T01-01-47Z.fips
289+
networks:
290+
- traefik_net
291+
- internal_net
265292
restart: unless-stopped
266293
environment:
267294
- MINIO_ROOT_USER=${MINIO_ROOT_USER}
@@ -298,6 +325,9 @@ services:
298325

299326
minio-mc:
300327
image: minio/mc
328+
networks:
329+
- traefik_net
330+
- internal_net
301331
entrypoint: >
302332
/bin/sh -c "
303333
/usr/bin/mc admin trace --path ocrvs/* minio
@@ -320,6 +350,9 @@ services:
320350

321351
elastalert:
322352
image: jertel/elastalert2:2.19.0
353+
networks:
354+
- traefik_net
355+
- internal_net
323356
restart: unless-stopped
324357
environment:
325358
- ES_USERNAME=elastic
@@ -343,6 +376,9 @@ services:
343376

344377
logstash:
345378
image: logstash:8.14.3
379+
networks:
380+
- traefik_net
381+
- internal_net
346382
command: logstash -f /etc/logstash/logstash.conf --verbose
347383
ports:
348384
- '12201:12201'
@@ -365,6 +401,9 @@ services:
365401
replicas: 1
366402
apm-server:
367403
image: docker.elastic.co/apm/apm-server:7.17.22
404+
networks:
405+
- traefik_net
406+
- internal_net
368407
cap_add: ['CHOWN', 'DAC_OVERRIDE', 'SETGID', 'SETUID']
369408
cap_drop: ['ALL']
370409
restart: always
@@ -399,6 +438,9 @@ services:
399438
# Configure influxdb
400439
influxdb:
401440
image: influxdb:1.8.10
441+
networks:
442+
- traefik_net
443+
- internal_net
402444
restart: unless-stopped
403445
volumes:
404446
- '/data/influxdb:/var/lib/influxdb'
@@ -447,3 +489,11 @@ configs:
447489
file: /opt/opencrvs/infrastructure/elasticsearch/jvm.options
448490
minio-mc-config.{{ts}}:
449491
file: /opt/opencrvs/infrastructure/mc-config/config.json
492+
493+
networks:
494+
traefik_net:
495+
driver: overlay
496+
name: traefik_net
497+
internal_net:
498+
driver: overlay
499+
attachable: true

infrastructure/elasticsearch/setup-elastalert-indices.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
set -e
1515

16-
docker_command="docker run --rm --network=dependencies_overlay_net curlimages/curl"
16+
docker_command="docker run --rm --network=dependencies_internal_net curlimages/curl"
1717

1818
echo 'Waiting for availability of Elasticsearch'
1919
ping_status_code=$($docker_command --connect-timeout 60 -u elastic:$ELASTICSEARCH_SUPERUSER_PASSWORD -o /dev/null -w '%{http_code}' "http://elasticsearch:9200")

infrastructure/monitoring/kibana/setup-config.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ response_text_from_curl_output() {
2626
}
2727

2828
curl_raw() {
29-
docker run --rm -v /opt/opencrvs/infrastructure/monitoring/kibana/config.ndjson:/config.ndjson --network=dependencies_overlay_net curlimages/curl -s -w "\n%{http_code}" "$@"
29+
docker run --rm -v /opt/opencrvs/infrastructure/monitoring/kibana/config.ndjson:/config.ndjson --network=dependencies_internal_net curlimages/curl -s -w "\n%{http_code}" "$@"
3030
}
3131

3232
parse_url_from_string() {
@@ -71,7 +71,7 @@ curl() {
7171
}
7272

7373
jq() {
74-
docker run --rm -i --network=dependencies_overlay_net ghcr.io/jqlang/jq "$@"
74+
docker run --rm -i --network=dependencies_internal_net ghcr.io/jqlang/jq "$@"
7575
}
7676

7777
# Initial API status check to ensure Kibana is ready

infrastructure/port-forward.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,6 @@ echo -e "Internal socat Port on Host: ${GREEN}$SOCAT_PORT${NC}"
3434
echo -e "Socat Container Name: ${GREEN}$CONTAINER_NAME${NC}"
3535

3636
ssh -tL $LOCAL_PORT:localhost:$SOCAT_PORT $SSH_USER@$TARGET_SERVER \
37-
'docker run --rm --name '$CONTAINER_NAME' --network=dependencies_overlay_net --publish '$SOCAT_PORT:$SOCAT_PORT' alpine/socat tcp-listen:'$SOCAT_PORT',fork,reuseaddr tcp-connect:'$TARGET_CONTAINER_NAME:$PORT''
37+
'docker run --rm --name '$CONTAINER_NAME' --network=dependencies_internal_net --publish '$SOCAT_PORT:$SOCAT_PORT' alpine/socat tcp-listen:'$SOCAT_PORT',fork,reuseaddr tcp-connect:'$TARGET_CONTAINER_NAME:$PORT''
3838

3939
echo -e "${GREEN}Port forwarding established and tunnel is online! Press Ctrl+C to close.${NC}"

0 commit comments

Comments
 (0)