Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/scripts/end2end/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ get_token() {
-d "username=${OIDC_USERNAME}" \
-d "password=${OIDC_PASSWORD}" \
-d "grant_type=password" \
-d 'scope=openid' \
-d "scope=openid" \
https://localhost/auth/realms/${OIDC_REALM}/protocol/openid-connect/token | \
jq -cr '.id_token'
}
Expand Down
2 changes: 0 additions & 2 deletions .github/scripts/end2end/configs/zenko.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@ kind: Zenko
metadata:
name: ${ZENKO_NAME}
${ZENKO_ANNOTATIONS}
zenko.io/x-backbeat-oneshard-replicaset: data-db-mongodb-sharded-shard-0
zenko.io/x-backbeat-oneshard-replicaset-hosts: data-db-mongodb-sharded-shard0-data-0.data-db-mongodb-sharded-headless.default.svc.cluster.local:27017
spec:
version: ${ZENKO_VERSION_NAME}
replicas: 1
Expand Down
87 changes: 87 additions & 0 deletions .github/scripts/end2end/enable-https.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
#!/bin/sh

set -exu

# This script enables HTTPS for an existing HTTP deployment of Zenko
DIR=$(dirname "$0")
KEYCLOAK_VERSION=${KEYCLOAK_VERSION:-'18.4.4'}

# Create a self-signed certificate for Zenko ingresses
kubectl apply -f - << EOF
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: zenko-tls
namespace: default
spec:
secretName: zenko-tls
issuerRef:
name: artesca-root-ca-issuer
kind: ClusterIssuer
dnsNames:
- ui.zenko.local
- management.zenko.local
- s3.zenko.local
- iam.zenko.local
- sts.zenko.local
- keycloak.zenko.local
- shell-ui.zenko.local
EOF

# Wait for certificate to be ready
kubectl wait --for=condition=Ready --timeout=2m certificate/zenko-tls

# Update Shell-UI ingress to use HTTPS
kubectl patch ingress shell-ui --type=json -p '[
{
"op": "replace",
"path": "/spec/tls",
"value": [{"hosts": ["shell-ui.zenko.local"], "secretName": "zenko-tls"}]
}
]'

# Get current Zenko instance name
ZENKO_NAME=$(kubectl get zenko -o jsonpath='{.items[0].metadata.name}')
NAMESPACE="default"

# Update Zenko CR to include TLS certificates
kubectl patch zenko/${ZENKO_NAME} --type=merge -p '{
"spec": {
"ingress": {
"certificates": [
{
"hosts": [
"ui.zenko.local",
"management.zenko.local",
"iam.zenko.local",
"sts.zenko.local",
"s3.zenko.local"
],
"secretName": "zenko-tls"
}
],
"annotations": {
"nginx.ingress.kubernetes.io/proxy-body-size": "0m",
"nginx.ingress.kubernetes.io/ssl-redirect": "false"
}
}
}
}'

# Wait for Zenko to be updated
kubectl wait --for condition=Available --timeout 5m zenko/${ZENKO_NAME}

# Update environment variables to use HTTPS URLs
echo "UI_ENDPOINT=https://ui.zenko.local" >> $GITHUB_ENV
echo "OIDC_ENDPOINT=https://keycloak.zenko.local" >> $GITHUB_ENV
echo "NAVBAR_ENDPOINT=https://shell-ui.zenko.local" >> $GITHUB_ENV
echo "OIDC_HOST=keycloak.zenko.local" >> $GITHUB_ENV
echo "ENABLE_KEYCLOAK_HTTPS=true" >> $GITHUB_ENV

# Set the HTTPS ingress options for Keycloak
KEYCLOAK_INGRESS_OPTIONS="$DIR/configs/keycloak_ingress_https.yaml"
KEYCLOAK_OPTIONS="$DIR/configs/keycloak_options.yaml"
helm upgrade --install keycloak codecentric/keycloak -f "${KEYCLOAK_OPTIONS}" -f "${KEYCLOAK_INGRESS_OPTIONS}" --version ${KEYCLOAK_VERSION}
kubectl rollout status sts/keycloak --timeout=5m

echo "HTTPS successfully enabled for Zenko deployment"
107 changes: 76 additions & 31 deletions .github/scripts/end2end/install-kind-dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ CERT_MANAGER_VERSION=v1.13.3
KAFKA_OPERATOR_VERSION=0.25.1
INGRESS_NGINX_VERSION=controller-v1.10.3
PROMETHEUS_VERSION=v0.52.1
KEYCLOAK_VERSION=18.4.4
KEYCLOAK_VERSION=${KEYCLOAK_VERSION:-'18.4.4'}

MONGODB_ROOT_USERNAME=root
MONGODB_ROOT_PASSWORD=rootpass
Expand All @@ -24,6 +24,15 @@ MONGODB_APP_PASSWORD=datapass
MONGODB_APP_DATABASE=${ZENKO_MONGODB_DATABASE:-datadb}
MONGODB_RS_KEY=0123456789abcdef

# Constants for valid topologies for CI tests
# We support multiple shards per host, or one shard per host, up to 9 nodes
# The first number is the number of nodes, the second is the number of shards
readonly MONGODB_VALID_TOPOLOGIES=(
"1:1" "1:2" "3:1" "3:3" "6:1" "6:2" "6:6" "9:1" "9:3" "9:9" "12:1" "12:4" "12:12"
)

MONGODB_SHARD_COUNT=${MONGODB_SHARD_COUNT:-1}

ENABLE_KEYCLOAK_HTTPS=${ENABLE_KEYCLOAK_HTTPS:-'false'}

KAFKA_CHART=banzaicloud-stable/kafka-operator
Expand All @@ -34,9 +43,7 @@ else
KEYCLOAK_INGRESS_OPTIONS="$DIR/configs/keycloak_ingress_http.yaml"
fi

# Older charts (bitnami/mongodb:7.8) have been removed from bitnami's helm repo: stick to the
# commit before removal for now
helm repo add --force-update bitnami https://raw.githubusercontent.com/bitnami/charts/defb094c658024e4aa8245622dab202874880cbc/bitnami
helm repo add --force-update bitnami https://charts.bitnami.com/bitnami
helm repo add --force-update pravega https://charts.pravega.io
helm repo add --force-update codecentric https://codecentric.github.io/helm-charts/
# BanzaiCloud repo may not work, c.f. https://scality.atlassian.net/browse/AN-225
Expand Down Expand Up @@ -114,6 +121,53 @@ stringData:
mongodb-replica-set-key: $MONGODB_RS_KEY
EOF

# Validate that the current topology is correct
get_mongodb_topology_file() {
local node_count=$1
local shard_count=$2

local base_yaml_name="mongodb-sharded-${node_count}-node"

# Validate topology
local topology_key="${node_count}:${shard_count}"
if [[ ! " ${MONGODB_VALID_TOPOLOGIES[*]} " =~ " ${topology_key} " ]]; then
echo "Error: Invalid topology - ${node_count} nodes, ${shard_count} shards"
exit 1
fi

# Adjust base YAML name if there are multiple shards
[[ "$shard_count" -gt 1 ]] && base_yaml_name="${base_yaml_name}-${shard_count}-shards"
base_yaml_name="${base_yaml_name}.yaml"

# ensure base file exists
local base_yaml_path="${DIR}/_build/root/deploy/${base_yaml_name}"
if [ ! -f "$base_yaml_path" ]; then
echo "Error: Base YAML file not found at ${base_yaml_path}"
exit 1
fi

echo "$base_yaml_path"
}

# MongoDB selectors are not supported in the CI.
# So we remove them and let the provisioner handle the
# volume provisioning.
patch_mongodb_selector() {
local base_yaml_path=$1
local shard_count=$2

# Remove volume selectors from mongos StatefulSet
yq eval 'select(.kind == "StatefulSet" and .metadata.name == "data-db-mongodb-sharded-mongos") |= del(.spec.volumeClaimTemplates[].spec.selector)' -i "$base_yaml_path"

# Remove volume selectors from configsvr StatefulSet
yq eval 'select(.kind == "StatefulSet" and .metadata.name == "data-db-mongodb-sharded-configsvr") |= del(.spec.volumeClaimTemplates[].spec.selector)' -i "$base_yaml_path"

# Remove volume selectors from shard StatefulSets
for ((i=0; i<shard_count; i++)); do
yq eval "select(.kind == \"StatefulSet\" and .metadata.name == \"data-db-mongodb-sharded-shard${i}-data\") |= del(.spec.volumeClaimTemplates[].spec.selector)" -i "$base_yaml_path"
done
}

build_solution_base_manifests() {
echo 'build solution-base manifests'
MANIFEST_ONLY=true $SOLUTION_BASE_DIR/build.sh
Expand All @@ -137,24 +191,6 @@ build_solution_base_manifests() {
sed -i "s/MONGODB_SHARDSERVER_RAM_LIMIT/${MONGODB_SHARDSERVER_RAM_LIMIT}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_SHARDSERVER_RAM_REQUEST/${MONGODB_SHARDSERVER_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_MONGOS_RAM_REQUEST/${MONGODB_MONGOS_RAM_REQUEST}/g" $DIR/_build/root/deploy/*

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note for reviewers: this was removed because we had this logic twice in the function, maybe a rebase issue at some point. See above.

# Limits and requests for MongoDB are computed based on the current system
# Detect total system RAM in GiB
TOTAL_RAM_GB=$(awk '/MemTotal/ {printf "%.0f", $2/1024/1024}' /proc/meminfo)

# Compute MongoDB settings based on the total RAM
MONGODB_WIRETIGER_CACHE_SIZE_GB=$((TOTAL_RAM_GB * 335 / 1000))
MONGODB_MONGOS_RAM_LIMIT=$((TOTAL_RAM_GB * 165 / 1000))Gi
MONGODB_SHARDSERVER_RAM_LIMIT=$((2 * MONGODB_WIRETIGER_CACHE_SIZE_GB))Gi
MONGODB_SHARDSERVER_RAM_REQUEST=${MONGODB_WIRETIGER_CACHE_SIZE_GB}Gi
MONGODB_MONGOS_RAM_REQUEST=$((TOTAL_RAM_GB * 33 / 1000))Gi

# Replace values before deploying
sed -i "s/MONGODB_SHARDSERVER_EXTRA_FLAGS/--wiredTigerCacheSizeGB=${MONGODB_WIRETIGER_CACHE_SIZE_GB}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_MONGOS_RAM_LIMIT/${MONGODB_MONGOS_RAM_LIMIT}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_SHARDSERVER_RAM_LIMIT/${MONGODB_SHARDSERVER_RAM_LIMIT}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_SHARDSERVER_RAM_REQUEST/${MONGODB_SHARDSERVER_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
sed -i "s/MONGODB_MONGOS_RAM_REQUEST/${MONGODB_MONGOS_RAM_REQUEST}/g" $DIR/_build/root/deploy/*
}

get_image_from_deps() {
Expand Down Expand Up @@ -186,22 +222,31 @@ mongodb_wait_for_shards() {
--eval "db.runCommand({ listshards: 1 }).shards.length"
)

[ $count == "1" ]
[ $count == "$MONGODB_SHARD_COUNT" ]
}

mongodb_sharded() {
local SOLUTION_REGISTRY=metalk8s-registry-from-config.invalid/zenko-base-${VERSION_FULL}
local node_count=${NODE_COUNT:-1}
local shard_count=${MONGODB_SHARD_COUNT}

kustomize edit set image \
$SOLUTION_REGISTRY/mongodb-sharded=$(get_image_from_deps mongodb-sharded) \
$SOLUTION_REGISTRY/os-shell=$(get_image_from_deps mongodb-shell) \
$SOLUTION_REGISTRY/mongodb-exporter=$(get_image_from_deps mongodb-sharded-exporter)
local base_yaml_path=$(get_mongodb_topology_file $node_count $shard_count)

kubectl apply -k .
sed -i "s|${SOLUTION_REGISTRY}/mongodb-sharded:.*|$(get_image_from_deps mongodb-sharded)|g" "$base_yaml_path"
sed -i "s|${SOLUTION_REGISTRY}/os-shell:.*|$(get_image_from_deps mongodb-shell)|g" "$base_yaml_path"
sed -i "s|${SOLUTION_REGISTRY}/mongodb-exporter:.*|$(get_image_from_deps mongodb-sharded-exporter)|g" "$base_yaml_path"

kubectl rollout status statefulset data-db-mongodb-sharded-mongos
kubectl rollout status statefulset data-db-mongodb-sharded-configsvr
kubectl rollout status statefulset data-db-mongodb-sharded-shard0-data
# Ensure we use no selector as the provisioner cannot handle them
patch_mongodb_selector "$base_yaml_path" "$shard_count"

kubectl apply -f "$base_yaml_path"

kubectl rollout status statefulset data-db-mongodb-sharded-mongos --timeout=5m
kubectl rollout status statefulset data-db-mongodb-sharded-configsvr --timeout=5m

for ((i=0; i<MONGODB_SHARD_COUNT; i++)); do
kubectl rollout status statefulset "data-db-mongodb-sharded-shard${i}-data" --timeout=5m
done

retry mongodb_wait_for_shards "no shards found"

Expand Down
39 changes: 0 additions & 39 deletions .github/scripts/end2end/kustomization.yaml

This file was deleted.

92 changes: 0 additions & 92 deletions .github/scripts/end2end/vault-e2e-test.sh

This file was deleted.

Loading
Loading