Skip to content

Commit d3f8a9d

Browse files
committed
Code improvements
Issue: ZENKO-4641
1 parent 6ef2495 commit d3f8a9d

File tree

9 files changed

+76
-139
lines changed

9 files changed

+76
-139
lines changed

.github/scripts/end2end/configs/zenko.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@ kind: Zenko
44
metadata:
55
name: ${ZENKO_NAME}
66
${ZENKO_ANNOTATIONS}
7-
zenko.io/x-backbeat-oneshard-replicaset: data-db-mongodb-sharded-shard-0
8-
zenko.io/x-backbeat-oneshard-replicaset-hosts: ${ZENKO_BACKBEAT_SHARD_HOSTS}
97
spec:
108
version: ${ZENKO_VERSION_NAME}
119
replicas: 1

.github/scripts/end2end/deploy-zenko.sh

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -131,17 +131,7 @@ create_encryption_secret()
131131
export AZURE_SECRET_KEY_ENCRYPTED
132132
}
133133

134-
generate_shard_hosts() {
135-
local hosts=""
136-
for ((i=0; i<MONGODB_SHARD_COUNT; i++)); do
137-
if [ $i -gt 0 ]; then hosts+=","; fi
138-
hosts+="data-db-mongodb-sharded-shard${i}-data-0.data-db-mongodb-sharded-headless.default.svc.cluster.local:27017"
139-
done
140-
export ZENKO_BACKBEAT_SHARD_HOSTS="$hosts"
141-
}
142-
143134
create_encryption_secret
144-
generate_shard_hosts
145135

146136
env $(dependencies_env) envsubst < ${ZENKOVERSION_PATH} | kubectl -n ${NAMESPACE} apply -f -
147137
env $(dependencies_env) envsubst < ${ZENKO_CR_PATH} | kubectl -n ${NAMESPACE} apply -f -

.github/scripts/end2end/generate-kustomization.sh

Lines changed: 0 additions & 80 deletions
This file was deleted.

.github/scripts/end2end/install-kind-dependencies.sh

Lines changed: 65 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,14 @@ MONGODB_APP_PASSWORD=datapass
2424
MONGODB_APP_DATABASE=${ZENKO_MONGODB_DATABASE:-datadb}
2525
MONGODB_RS_KEY=0123456789abcdef
2626

27-
MONGODB_SHARD_COUNT=${MONGODB_SHARD_COUNT:-1}
27+
# Constants for valid topologies for CI tests
28+
# We support multiple shards per host, or one shard per host, up to 9 nodes
29+
# The first number is the number of nodes, the second is the number of shards
30+
readonly MONGODB_VALID_TOPOLOGIES=(
31+
"1:1" "1:2" "3:1" "3:3" "6:1" "6:2" "6:6" "9:1" "9:3" "9:9" "12:1" "12:4" "12:12"
32+
)
2833

29-
source "${DIR}/generate-kustomization.sh" && generate_kustomization "${NODE_COUNT:-1}" "${MONGODB_SHARD_COUNT}"
34+
MONGODB_SHARD_COUNT=${MONGODB_SHARD_COUNT:-1}
3035

3136
ENABLE_KEYCLOAK_HTTPS=${ENABLE_KEYCLOAK_HTTPS:-'false'}
3237

@@ -116,6 +121,53 @@ stringData:
116121
mongodb-replica-set-key: $MONGODB_RS_KEY
117122
EOF
118123

124+
# Validate that the current topology is correct
125+
get_mongodb_topology_file() {
126+
local node_count=$1
127+
local shard_count=$2
128+
129+
local base_yaml_name="mongodb-sharded-${node_count}-node"
130+
131+
# Validate topology
132+
local topology_key="${node_count}:${shard_count}"
133+
if [[ ! " ${MONGODB_VALID_TOPOLOGIES[*]} " =~ " ${topology_key} " ]]; then
134+
echo "Error: Invalid topology - ${node_count} nodes, ${shard_count} shards"
135+
exit 1
136+
fi
137+
138+
# Adjust base YAML name if there are multiple shards
139+
[[ "$shard_count" -gt 1 ]] && base_yaml_name="${base_yaml_name}-${shard_count}-shards"
140+
base_yaml_name="${base_yaml_name}.yaml"
141+
142+
# ensure base file exists
143+
local base_yaml_path="${DIR}/_build/root/deploy/${base_yaml_name}"
144+
if [ ! -f "$base_yaml_path" ]; then
145+
echo "Error: Base YAML file not found at ${base_yaml_path}"
146+
exit 1
147+
fi
148+
149+
echo "$base_yaml_path"
150+
}
151+
152+
# MongoDB selectors are not supported in the CI.
153+
# So we remove them and let the provisioner handle the
154+
# volume provisioning.
155+
patch_mongodb_selector() {
156+
local base_yaml_path=$1
157+
local shard_count=$2
158+
159+
# Remove volume selectors from mongos StatefulSet
160+
yq eval 'select(.kind == "StatefulSet" and .metadata.name == "data-db-mongodb-sharded-mongos") |= del(.spec.volumeClaimTemplates[].spec.selector)' -i "$base_yaml_path"
161+
162+
# Remove volume selectors from configsvr StatefulSet
163+
yq eval 'select(.kind == "StatefulSet" and .metadata.name == "data-db-mongodb-sharded-configsvr") |= del(.spec.volumeClaimTemplates[].spec.selector)' -i "$base_yaml_path"
164+
165+
# Remove volume selectors from shard StatefulSets
166+
for ((i=0; i<shard_count; i++)); do
167+
yq eval "select(.kind == \"StatefulSet\" and .metadata.name == \"data-db-mongodb-sharded-shard${i}-data\") |= del(.spec.volumeClaimTemplates[].spec.selector)" -i "$base_yaml_path"
168+
done
169+
}
170+
119171
build_solution_base_manifests() {
120172
echo 'build solution-base manifests'
121173
MANIFEST_ONLY=true $SOLUTION_BASE_DIR/build.sh
@@ -139,24 +191,6 @@ build_solution_base_manifests() {
139191
sed -i "s/MONGODB_SHARDSERVER_RAM_LIMIT/${MONGODB_SHARDSERVER_RAM_LIMIT}/g" $DIR/_build/root/deploy/*
140192
sed -i "s/MONGODB_SHARDSERVER_RAM_REQUEST/${MONGODB_SHARDSERVER_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
141193
sed -i "s/MONGODB_MONGOS_RAM_REQUEST/${MONGODB_MONGOS_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
142-
143-
# Limits and requests for MongoDB are computed based on the current system
144-
# Detect total system RAM in GiB
145-
TOTAL_RAM_GB=$(awk '/MemTotal/ {printf "%.0f", $2/1024/1024}' /proc/meminfo)
146-
147-
# Compute MongoDB settings based on the total RAM
148-
MONGODB_WIRETIGER_CACHE_SIZE_GB=$((TOTAL_RAM_GB * 335 / 1000))
149-
MONGODB_MONGOS_RAM_LIMIT=$((TOTAL_RAM_GB * 165 / 1000))Gi
150-
MONGODB_SHARDSERVER_RAM_LIMIT=$((2 * MONGODB_WIRETIGER_CACHE_SIZE_GB))Gi
151-
MONGODB_SHARDSERVER_RAM_REQUEST=${MONGODB_WIRETIGER_CACHE_SIZE_GB}Gi
152-
MONGODB_MONGOS_RAM_REQUEST=$((TOTAL_RAM_GB * 33 / 1000))Gi
153-
154-
# Replace values before deploying
155-
sed -i "s/MONGODB_SHARDSERVER_EXTRA_FLAGS/--wiredTigerCacheSizeGB=${MONGODB_WIRETIGER_CACHE_SIZE_GB}/g" $DIR/_build/root/deploy/*
156-
sed -i "s/MONGODB_MONGOS_RAM_LIMIT/${MONGODB_MONGOS_RAM_LIMIT}/g" $DIR/_build/root/deploy/*
157-
sed -i "s/MONGODB_SHARDSERVER_RAM_LIMIT/${MONGODB_SHARDSERVER_RAM_LIMIT}/g" $DIR/_build/root/deploy/*
158-
sed -i "s/MONGODB_SHARDSERVER_RAM_REQUEST/${MONGODB_SHARDSERVER_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
159-
sed -i "s/MONGODB_MONGOS_RAM_REQUEST/${MONGODB_MONGOS_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
160194
}
161195

162196
get_image_from_deps() {
@@ -193,13 +227,19 @@ mongodb_wait_for_shards() {
193227

194228
mongodb_sharded() {
195229
local SOLUTION_REGISTRY=metalk8s-registry-from-config.invalid/zenko-base-${VERSION_FULL}
230+
local node_count=${NODE_COUNT:-1}
231+
local shard_count=${MONGODB_SHARD_COUNT}
232+
233+
local base_yaml_path=$(get_mongodb_topology_file $node_count $shard_count)
234+
235+
sed -i "s|${SOLUTION_REGISTRY}/mongodb-sharded:.*|$(get_image_from_deps mongodb-sharded)|g" "$base_yaml_path"
236+
sed -i "s|${SOLUTION_REGISTRY}/os-shell:.*|$(get_image_from_deps mongodb-shell)|g" "$base_yaml_path"
237+
sed -i "s|${SOLUTION_REGISTRY}/mongodb-exporter:.*|$(get_image_from_deps mongodb-sharded-exporter)|g" "$base_yaml_path"
196238

197-
kustomize edit set image \
198-
$SOLUTION_REGISTRY/mongodb-sharded=$(get_image_from_deps mongodb-sharded) \
199-
$SOLUTION_REGISTRY/os-shell=$(get_image_from_deps mongodb-shell) \
200-
$SOLUTION_REGISTRY/mongodb-exporter=$(get_image_from_deps mongodb-sharded-exporter)
239+
# Ensure we use no selector as the provisioner cannot handle them
240+
patch_mongodb_selector "$base_yaml_path" "$shard_count"
201241

202-
kubectl apply -k "${DIR}"
242+
kubectl apply -f "$base_yaml_path"
203243

204244
kubectl rollout status statefulset data-db-mongodb-sharded-mongos --timeout=5m
205245
kubectl rollout status statefulset data-db-mongodb-sharded-configsvr --timeout=5m

.github/workflows/end2end.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,7 @@ jobs:
518518
with:
519519
username: "${{ github.repository_owner }}"
520520
password: "${{ github.token }}"
521-
registry: ghcr.io/zenko-dev/zenko-operator
521+
registry: ghcr.io
522522
- name: Get token to access private repositories
523523
uses: actions/create-github-app-token@v1
524524
id: app-token

monitoring/mongodb/alerts.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,7 @@ groups:
241241
) != (${replicas} - 1)
242242
for: 10m
243243
labels:
244+
rs_nm: "{{ $labels.rs_nm }}"
244245
severity: warning
245246
annotations:
246247
description: "MongoDB replica set `{{ $labels.rs_nm }}` is not in the expected state. It does not have the expected number of SECONDARY members. Please ensure that all instances are running properly."

solution-base/build.sh

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -32,19 +32,15 @@ SOLUTION_REGISTRY=metalk8s-registry-from-config.invalid/${PRODUCT_LOWERNAME}-${V
3232
MONGODB_SHARDED_SINGLE_NODE_PATH=${ISO_ROOT}/deploy/mongodb-sharded-1-node.yaml
3333
MONGODB_SHARDED_SINGLE_NODE_TWO_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-1-node-2-shards.yaml
3434
MONGODB_SHARDED_THREE_NODE_PATH=${ISO_ROOT}/deploy/mongodb-sharded-3-nodes.yaml
35-
MONGODB_SHARDED_THREE_NODE_TWO_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-3-nodes-2-shards.yaml
3635
MONGODB_SHARDED_THREE_NODE_THREE_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-3-nodes-3-shards.yaml
3736
MONGODB_SHARDED_SIX_NODE_PATH=${ISO_ROOT}/deploy/mongodb-sharded-6-nodes.yaml
3837
MONGODB_SHARDED_SIX_NODE_TWO_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-6-nodes-2-shards.yaml
39-
MONGODB_SHARDED_SIX_NODE_THREE_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-6-nodes-3-shards.yaml
4038
MONGODB_SHARDED_SIX_NODE_SIX_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-6-nodes-6-shards.yaml
4139
MONGODB_SHARDED_NINE_NODE_PATH=${ISO_ROOT}/deploy/mongodb-sharded-9-nodes.yaml
4240
MONGODB_SHARDED_NINE_NODE_THREE_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-9-nodes-3-shards.yaml
43-
MONGODB_SHARDED_NINE_NODE_SIX_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-9-nodes-6-shards.yaml
4441
MONGODB_SHARDED_NINE_NODE_NINE_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-9-nodes-9-shards.yaml
4542
MONGODB_SHARDED_TWELVE_NODE_PATH=${ISO_ROOT}/deploy/mongodb-sharded-12-nodes.yaml
4643
MONGODB_SHARDED_TWELVE_NODE_FOUR_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-12-nodes-4-shards.yaml
47-
MONGODB_SHARDED_TWELVE_NODE_EIGHT_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-12-nodes-8-shards.yaml
4844
MONGODB_SHARDED_TWELVE_NODE_TWELVE_SHARDS_PATH=${ISO_ROOT}/deploy/mongodb-sharded-12-nodes-12-shards.yaml
4945

5046
SOLUTION_ENV='SOLUTION_ENV'
@@ -89,8 +85,7 @@ function render_mongodb_sharded_yamls()
8985
local OUTPUT_PATH=${1:-${OPERATOR_PATH}}
9086
local SHARD_COUNT=${2:-1}
9187
local NODE_COUNT=${3:-1}
92-
local MAX_REPLICA_PER_SHARD=3
93-
local REPLICA_COUNT=$(( $3 > $MAX_REPLICA_PER_SHARD ? $MAX_REPLICA_PER_SHARD : $3 ))
88+
local DATA_REPLICA_COUNT=${4:-1}
9489

9590
echo creating mongodb-sharded ${NODE_COUNT}-node yamls
9691
CHART_PATH="$SOLUTION_BASE_DIR/mongodb/charts/mongodb-sharded"
@@ -103,7 +98,7 @@ function render_mongodb_sharded_yamls()
10398
--set shards=${SHARD_COUNT} \
10499
--set mongos.replicaCount=${NODE_COUNT} \
105100
--set mongos.useStatefulSet=true \
106-
--set shardsvr.dataNode.replicaCount=${REPLICA_COUNT} \
101+
--set shardsvr.dataNode.replicaCount=${DATA_REPLICA_COUNT} \
107102
--set shardsvr.persistence.enabled=true \
108103
--set shardsvr.persistence.storageClass=${MONGODB_STORAGE_CLASS} \
109104
--set configsvr.replicaCount=$(( $NODE_COUNT > 2 ? 3 : 1 )) \
@@ -161,23 +156,19 @@ function render_mongodb_sharded_yamls()
161156
function mongodb_sharded_yamls()
162157
{
163158
# For now we maximize the number of replicas to 3, so each shard is a P-S-S
164-
# Parameters are: shard count - node count - replica count
159+
# Parameters are: shard count - node count - data replica count
165160
render_mongodb_sharded_yamls "${MONGODB_SHARDED_SINGLE_NODE_PATH}" 1 1 1
166161
render_mongodb_sharded_yamls "${MONGODB_SHARDED_SINGLE_NODE_TWO_SHARDS_PATH}" 2 1 1
167162
render_mongodb_sharded_yamls "${MONGODB_SHARDED_THREE_NODE_PATH}" 1 3 3
168-
render_mongodb_sharded_yamls "${MONGODB_SHARDED_THREE_NODE_TWO_SHARDS_PATH}" 2 3 3
169163
render_mongodb_sharded_yamls "${MONGODB_SHARDED_THREE_NODE_THREE_SHARDS_PATH}" 3 3 3
170164
render_mongodb_sharded_yamls "${MONGODB_SHARDED_SIX_NODE_PATH}" 1 6 3
171165
render_mongodb_sharded_yamls "${MONGODB_SHARDED_SIX_NODE_TWO_SHARDS_PATH}" 2 6 3
172-
render_mongodb_sharded_yamls "${MONGODB_SHARDED_SIX_NODE_THREE_SHARDS_PATH}" 3 6 3
173166
render_mongodb_sharded_yamls "${MONGODB_SHARDED_SIX_NODE_SIX_SHARDS_PATH}" 6 6 3
174167
render_mongodb_sharded_yamls "${MONGODB_SHARDED_NINE_NODE_PATH}" 1 9 3
175168
render_mongodb_sharded_yamls "${MONGODB_SHARDED_NINE_NODE_THREE_SHARDS_PATH}" 3 9 3
176-
render_mongodb_sharded_yamls "${MONGODB_SHARDED_NINE_NODE_SIX_SHARDS_PATH}" 6 9 3
177169
render_mongodb_sharded_yamls "${MONGODB_SHARDED_NINE_NODE_NINE_SHARDS_PATH}" 9 9 3
178170
render_mongodb_sharded_yamls "${MONGODB_SHARDED_TWELVE_NODE_PATH}" 1 12 3
179171
render_mongodb_sharded_yamls "${MONGODB_SHARDED_TWELVE_NODE_FOUR_SHARDS_PATH}" 4 12 3
180-
render_mongodb_sharded_yamls "${MONGODB_SHARDED_TWELVE_NODE_EIGHT_SHARDS_PATH}" 8 12 3
181172
render_mongodb_sharded_yamls "${MONGODB_SHARDED_TWELVE_NODE_TWELVE_SHARDS_PATH}" 12 12 3
182173
}
183174

solution-base/mongodb/charts/mongodb-sharded/templates/shard/shard-data-podmonitor.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,11 @@ SPDX-License-Identifier: APACHE-2.0
44
*/}}
55

66
{{- if and .Values.shards .Values.metrics.enabled .Values.metrics.podMonitor.enabled }}
7+
{{- $i := 0 }}
78
apiVersion: monitoring.coreos.com/v1
89
kind: PodMonitor
910
metadata:
10-
name: {{ printf "%s-shards-data" (include "common.names.fullname" $ ) }}
11+
name: {{ printf "%s-shard%d-data" (include "common.names.fullname" $ ) $i }}
1112
namespace: {{ default (include "common.names.namespace" $) $.Values.metrics.podMonitor.namespace | quote }}
1213
labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }}
1314
app.kubernetes.io/component: shardsvr

solution-base/mongodb/patches/mongodb-sharded-fix-podmonitor.patch

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,18 @@
11
diff --git a/solution-base/mongodb/charts/mongodb-sharded/templates/shard/shard-data-podmonitor.yaml b/solution-base/mongodb/charts/mongodb-sharded/templates/shard/shard-data-podmonitor.yaml
2-
index d2c9c0cb..a01a0d68 100644
2+
index d2c9c0cb..657764ac 100644
33
--- a/solution-base/mongodb/charts/mongodb-sharded/templates/shard/shard-data-podmonitor.yaml
44
+++ b/solution-base/mongodb/charts/mongodb-sharded/templates/shard/shard-data-podmonitor.yaml
5-
@@ -4,12 +4,10 @@ SPDX-License-Identifier: APACHE-2.0
5+
@@ -4,8 +4,7 @@ SPDX-License-Identifier: APACHE-2.0
66
*/}}
77

88
{{- if and .Values.shards .Values.metrics.enabled .Values.metrics.podMonitor.enabled }}
99
-{{- $replicas := .Values.shards | int }}
1010
-{{- range $i, $e := until $replicas }}
11+
+{{- $i := 0 }}
1112
apiVersion: monitoring.coreos.com/v1
1213
kind: PodMonitor
1314
metadata:
14-
- name: {{ printf "%s-shard%d-data" (include "common.names.fullname" $ ) $i }}
15-
+ name: {{ printf "%s-shards-data" (include "common.names.fullname" $ ) }}
16-
namespace: {{ default (include "common.names.namespace" $) $.Values.metrics.podMonitor.namespace | quote }}
17-
labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }}
18-
app.kubernetes.io/component: shardsvr
19-
@@ -36,7 +34,4 @@ spec:
15+
@@ -36,7 +35,4 @@ spec:
2016
selector:
2117
matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }}
2218
app.kubernetes.io/component: shardsvr

0 commit comments

Comments
 (0)