-
Notifications
You must be signed in to change notification settings - Fork 91
Expand file tree
/
Copy pathinstall-kind-dependencies.sh
More file actions
executable file
·328 lines (264 loc) · 12.4 KB
/
install-kind-dependencies.sh
File metadata and controls
executable file
·328 lines (264 loc) · 12.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
#! /bin/bash
set -exu
SCRIPT_FULL_PATH=$(readlink -f "$0")
DIR=$(dirname "$0")
REPOSITORY_DIR=$(dirname "$SCRIPT_FULL_PATH")/../../..
SOLUTION_BASE_DIR=$REPOSITORY_DIR/solution-base
VERSION_FILE="${REPOSITORY_DIR}/VERSION"
source "${VERSION_FILE}"
ZK_OPERATOR_VERSION=0.2.15
CERT_MANAGER_VERSION=v1.13.3
KAFKA_OPERATOR_VERSION=0.25.1
INGRESS_NGINX_VERSION=controller-v1.10.3
PROMETHEUS_VERSION=v0.52.1
KEYCLOAK_VERSION=${KEYCLOAK_VERSION:-'18.4.4'}
MONGODB_ROOT_USERNAME=root
MONGODB_ROOT_PASSWORD=rootpass
MONGODB_APP_USERNAME=data
MONGODB_APP_PASSWORD=datapass
MONGODB_APP_DATABASE=${ZENKO_MONGODB_DATABASE:-datadb}
MONGODB_RS_KEY=0123456789abcdef
# Constants for valid topologies for CI tests
# We support multiple shards per host, or one shard per host, up to 9 nodes
# The first number is the number of nodes, the second is the number of shards
readonly MONGODB_VALID_TOPOLOGIES=(
"1:1" "1:2" "3:1" "3:3" "6:1" "6:2" "6:6" "9:1" "9:3" "9:9" "12:1" "12:4" "12:12"
)
MONGODB_SHARD_COUNT=${MONGODB_SHARD_COUNT:-1}
ENABLE_KEYCLOAK_HTTPS=${ENABLE_KEYCLOAK_HTTPS:-'false'}
KAFKA_CHART=banzaicloud-stable/kafka-operator
if [ $ENABLE_KEYCLOAK_HTTPS == 'true' ]; then
KEYCLOAK_INGRESS_OPTIONS="$DIR/configs/keycloak_ingress_https.yaml"
else
KEYCLOAK_INGRESS_OPTIONS="$DIR/configs/keycloak_ingress_http.yaml"
fi
helm_repo_add() {
helm repo list -o json 2>/dev/null | jq -e --arg n "$1" '.[] | select(.name == $n)' >/dev/null 2>&1 || helm repo add "$1" "$2"
}
helm_repo_add bitnami https://charts.bitnami.com/bitnami
helm_repo_add pravega https://charts.pravega.io
helm_repo_add codecentric https://codecentric.github.io/helm-charts/
# BanzaiCloud repo may not work, c.f. https://scality.atlassian.net/browse/AN-225
helm_repo_add banzaicloud-stable https://kubernetes-charts.banzaicloud.com || {
echo -n "::notice file=$(basename $0),line=$LINENO,title=Banzaicloud Charts not available::"
echo "Failed to add banzaicloud-stable repo, using local checkout"
kafka_operator="${DIR}/kafka-operator"
if [ ! -d "${kafka_operator}" ]; then
git -c advice.detachedHead=false clone -q --depth 1 -b "v${KAFKA_OPERATOR_VERSION}" \
https://github.com/banzaicloud/koperator "${kafka_operator}"
fi
KAFKA_CHART="${kafka_operator}/charts/kafka-operator"
}
helm repo update
# fluent-bit log collector — captures container logs before pod deletion
if [ "${CI:-}" = "true" ]; then
kubectl apply -f $DIR/configs/fluentbit.yaml
kubectl rollout status daemonset/fluent-bit --timeout=5m
fi
# nginx-controller
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/${INGRESS_NGINX_VERSION}/deploy/static/provider/kind/deploy.yaml
kubectl rollout status -n ingress-nginx deployment/ingress-nginx-controller --timeout=10m
# cert-manager
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/${CERT_MANAGER_VERSION}/cert-manager.yaml --wait
kubectl rollout status -n cert-manager deployment/cert-manager-webhook --timeout=10m
# === CERTIFICATE AUTHORITY SETUP ===
# We need a self-signed root CA certificate for signing certificates for mock services
# (Azure mock, AWS mock). This enables HTTPS testing with proper certificate validation.
echo "Waiting for cert-manager webhook to be ready..."
kubectl wait --for=condition=Available --timeout=60s deployment/cert-manager-webhook -n cert-manager
cat <<'EOF' | kubectl apply -f -
---
# Bootstrap self-signed ClusterIssuer for creating the root CA
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: bootstrap-selfsigned
spec:
selfSigned: {}
---
# Root CA certificate that will act as our custom CA
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: root-ca
namespace: cert-manager
spec:
secretName: root-ca
isCA: true
commonName: root-ca
issuerRef:
name: bootstrap-selfsigned
kind: ClusterIssuer
---
# Production ClusterIssuer that uses our root CA for signing service certificates
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: artesca-root-ca-issuer
spec:
ca:
secretName: root-ca
EOF
kubectl wait --for=condition=Ready --timeout=240s certificate/root-ca -n cert-manager
kubectl wait --for=condition=Ready --timeout=240s clusterissuer/artesca-root-ca-issuer
# Copy root CA secret to default namespace for applications to use
echo "Copying root CA certificate to default namespace..."
kubectl get secret root-ca -n cert-manager -o json |
jq '.metadata = {namespace: "default", name: "zenko-root-ca"}' |
kubectl apply -f -
# prometheus
prom_url=https://raw.githubusercontent.com/coreos/prometheus-operator/${PROMETHEUS_VERSION}/bundle.yaml
kubectl apply --server-side -f $prom_url
# wait for the resource to exist
kubectl wait --for=condition=established --timeout=10m crd/alertmanagers.monitoring.coreos.com
envsubst < configs/prometheus.yaml | kubectl apply -f -
# zookeeper
helm upgrade --install --version ${ZK_OPERATOR_VERSION} -n default zk-operator pravega/zookeeper-operator --set "watchNamespace=default"
# kafka
kafka_crd_url=https://github.com/banzaicloud/koperator/releases/download/v${KAFKA_OPERATOR_VERSION}/kafka-operator.crds.yaml
kubectl apply --server-side -f $kafka_crd_url
helm upgrade --install --version ${KAFKA_OPERATOR_VERSION} -n default kafka-operator ${KAFKA_CHART} \
--set prometheusMetrics.authProxy.image.repository=quay.io/brancz/kube-rbac-proxy \
--set prometheusMetrics.authProxy.image.tag=v0.21.0
# keycloak
envsubst < $DIR/configs/keycloak_config.json > $DIR/configs/keycloak-realm.json
kubectl create configmap keycloak-realm --from-file=$DIR/configs/keycloak-realm.json --dry-run=client -o yaml | kubectl apply -f -
helm upgrade --install --version ${KEYCLOAK_VERSION} keycloak codecentric/keycloak -f "$DIR/configs/keycloak_options.yaml" -f "${KEYCLOAK_INGRESS_OPTIONS}"
kubectl rollout status sts/keycloak --timeout=10m
# TODO: use zenko-operator install-deps
kubectl apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: mongodb-db-creds
stringData:
mongodb-root-username: $MONGODB_ROOT_USERNAME
mongodb-root-password: $MONGODB_ROOT_PASSWORD
mongodb-username: $MONGODB_APP_USERNAME
mongodb-password: $MONGODB_APP_PASSWORD
mongodb-database: $MONGODB_APP_DATABASE
mongodb-replica-set-key: $MONGODB_RS_KEY
EOF
# Validate that the current topology is correct
get_mongodb_topology_file() {
local node_count=$1
local shard_count=$2
local base_yaml_name="mongodb-sharded-${node_count}-node"
# Validate topology
local topology_key="${node_count}:${shard_count}"
if [[ ! " ${MONGODB_VALID_TOPOLOGIES[*]} " =~ " ${topology_key} " ]]; then
echo "Error: Invalid topology - ${node_count} nodes, ${shard_count} shards"
exit 1
fi
# Adjust base YAML name if there are multiple shards
[[ "$shard_count" -gt 1 ]] && base_yaml_name="${base_yaml_name}-${shard_count}-shards"
base_yaml_name="${base_yaml_name}.yaml"
# ensure base file exists
local base_yaml_path="${DIR}/_build/root/deploy/${base_yaml_name}"
if [ ! -f "$base_yaml_path" ]; then
echo "Error: Base YAML file not found at ${base_yaml_path}"
exit 1
fi
echo "$base_yaml_path"
}
# MongoDB selectors are not supported in the CI.
# So we remove them and let the provisioner handle the
# volume provisioning.
patch_mongodb_selector() {
local base_yaml_path=$1
local shard_count=$2
# Remove volume selectors from mongos StatefulSet
yq eval 'select(.kind == "StatefulSet" and .metadata.name == "data-db-mongodb-sharded-mongos") |= del(.spec.volumeClaimTemplates[].spec.selector)' -i "$base_yaml_path"
# Remove volume selectors from configsvr StatefulSet
yq eval 'select(.kind == "StatefulSet" and .metadata.name == "data-db-mongodb-sharded-configsvr") |= del(.spec.volumeClaimTemplates[].spec.selector)' -i "$base_yaml_path"
# Remove volume selectors from shard StatefulSets
for ((i=0; i<shard_count; i++)); do
yq eval "select(.kind == \"StatefulSet\" and .metadata.name == \"data-db-mongodb-sharded-shard${i}-data\") |= del(.spec.volumeClaimTemplates[].spec.selector)" -i "$base_yaml_path"
done
}
build_solution_base_manifests() {
echo 'build solution-base manifests'
MANIFEST_ONLY=true $SOLUTION_BASE_DIR/build.sh
sed -i 's/SOLUTION_ENV/default/g' $DIR/_build/root/deploy/*
sed -i 's/MONGODB_STORAGE_CLASS/standard/g' $DIR/_build/root/deploy/*
# Limits and requests for MongoDB are computed based on the current system
# Detect total system RAM in GiB
TOTAL_RAM_GB=$(awk '/MemTotal/ {printf "%.0f", $2/1024/1024}' /proc/meminfo)
# Compute MongoDB settings based on the total RAM
MONGODB_WIRETIGER_CACHE_SIZE_GB=$((TOTAL_RAM_GB * 335 / 1000))
MONGODB_MONGOS_RAM_LIMIT=$((TOTAL_RAM_GB * 165 / 1000))Gi
MONGODB_SHARDSERVER_RAM_LIMIT=$((2 * MONGODB_WIRETIGER_CACHE_SIZE_GB))Gi
MONGODB_SHARDSERVER_RAM_REQUEST=${MONGODB_WIRETIGER_CACHE_SIZE_GB}Gi
MONGODB_MONGOS_RAM_REQUEST=$((TOTAL_RAM_GB * 33 / 1000))Gi
# Replace values before deploying
sed -i "s/MONGODB_SHARDSERVER_EXTRA_FLAGS/--wiredTigerCacheSizeGB=${MONGODB_WIRETIGER_CACHE_SIZE_GB}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_MONGOS_RAM_LIMIT/${MONGODB_MONGOS_RAM_LIMIT}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_SHARDSERVER_RAM_LIMIT/${MONGODB_SHARDSERVER_RAM_LIMIT}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_SHARDSERVER_RAM_REQUEST/${MONGODB_SHARDSERVER_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_MONGOS_RAM_REQUEST/${MONGODB_MONGOS_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
}
get_image_from_deps() {
local dep_name=$1
source <( "$SOLUTION_BASE_DIR/mongodb_build_vars.sh" )
yq eval ".$dep_name | (.sourceRegistry // \"docker.io\") + \"/\" + .image + \":\" + .tag" $SOLUTION_BASE_DIR/deps.yaml |
sed '/ghcr.io\/scality\/zenko\/mongo/ s/$/-'"${MONGODB_BUILD_TREE_HASH}"'/'
}
retry() {
local count=0
local errMsg=${1:-'reached max retry attempts'}
while ! "$@" && [ $count -lt 10 ]; do
count=$(($count + 1))
sleep 5
done
if [ $count -ge 10 ]; then
echo $errMsg
exit 1
fi
}
mongodb_wait_for_shards() {
local count=$(kubectl exec -t data-db-mongodb-sharded-mongos-0 -- \
mongosh admin \
-u $MONGODB_ROOT_USERNAME \
-p $MONGODB_ROOT_PASSWORD \
--quiet \
--eval "db.runCommand({ listshards: 1 }).shards.length"
)
[ $count == "$MONGODB_SHARD_COUNT" ]
}
mongodb_sharded() {
local SOLUTION_REGISTRY=metalk8s-registry-from-config.invalid/zenko-base-${VERSION_FULL}
local node_count=${NODE_COUNT:-1}
local shard_count=${MONGODB_SHARD_COUNT}
local base_yaml_path=$(get_mongodb_topology_file $node_count $shard_count)
sed -i "s|${SOLUTION_REGISTRY}/mongodb-sharded:.*|$(get_image_from_deps mongodb-sharded)|g" "$base_yaml_path"
sed -i "s|${SOLUTION_REGISTRY}/os-shell:.*|$(get_image_from_deps mongodb-shell)|g" "$base_yaml_path"
sed -i "s|${SOLUTION_REGISTRY}/mongodb-exporter:.*|$(get_image_from_deps mongodb-sharded-exporter)|g" "$base_yaml_path"
# Ensure we use no selector as the provisioner cannot handle them
patch_mongodb_selector "$base_yaml_path" "$shard_count"
kubectl apply -f "$base_yaml_path"
# Hold mongos back until configsvr is ready to avoid a race where mongos
# tries to auth against configsvr before its replica set is initialized,
# causing mongosh to hang indefinitely (no timeout in the entrypoint).
local mongos_replicas=$(kubectl get statefulset \
-l app.kubernetes.io/name=mongodb-sharded,app.kubernetes.io/component=mongos \
-o jsonpath='{.items[0].spec.replicas}')
kubectl scale statefulset \
-l app.kubernetes.io/name=mongodb-sharded,app.kubernetes.io/component=mongos \
--replicas=0
kubectl rollout status statefulset data-db-mongodb-sharded-configsvr --timeout=5m
kubectl scale statefulset \
-l app.kubernetes.io/name=mongodb-sharded,app.kubernetes.io/component=mongos \
--replicas=$mongos_replicas
kubectl rollout status statefulset data-db-mongodb-sharded-mongos --timeout=5m
for ((i=0; i<MONGODB_SHARD_COUNT; i++)); do
kubectl rollout status statefulset "data-db-mongodb-sharded-shard${i}-data" --timeout=5m
done
retry mongodb_wait_for_shards "no shards found"
kubectl exec -t data-db-mongodb-sharded-mongos-0 -- \
mongosh admin \
-u $MONGODB_ROOT_USERNAME \
-p $MONGODB_ROOT_PASSWORD \
--eval "sh.enableSharding('$MONGODB_APP_DATABASE')"
}
build_solution_base_manifests
mongodb_sharded