Skip to content

Commit a88ec9a

Browse files
committed
Merge branch 'improvement/ZENKO-4414' into q/2.11
2 parents 34cab6a + 0deadb5 commit a88ec9a

File tree

6 files changed

+123
-9
lines changed

6 files changed

+123
-9
lines changed

.github/actions/deploy/action.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@ inputs:
77
description: "The tag of the Zenko Operator image to use"
88
required: false
99
default: ""
10+
deploy_metadata:
11+
description: "Deploy a metadata cluster alongside Zenko"
12+
required: false
13+
default: "false"
1014
runs:
1115
using: composite
1216
steps:
@@ -83,6 +87,11 @@ runs:
8387
shell: bash
8488
run: sh tests/smoke/deploy-sorbet-resources.sh end2end
8589
working-directory: ./.github/scripts/end2end/operator
90+
- name: Deploy metadata
91+
shell: bash
92+
run: ./deploy-metadata.sh
93+
working-directory: ./.github/scripts/end2end
94+
if: ${{ inputs.deploy_metadata == 'true' }}
8695
- name: End-to-end configuration
8796
shell: bash
8897
run: bash configure-e2e.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "default"

.github/scripts/end2end/common.sh

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,55 @@ get_token() {
99
jq -cr '.id_token'
1010
}
1111

12+
wait_for_endpoint() {
13+
local host=$1
14+
local port=$2
15+
local timeout_s=$3
16+
17+
kubectl run wait-for-port \
18+
--image=busybox \
19+
--attach=True \
20+
--rm \
21+
--restart=Never \
22+
--pod-running-timeout=5m \
23+
--image-pull-policy=IfNotPresent \
24+
--env="HOST=${host}" \
25+
--env="PORT=${port}" \
26+
--env="TIMEOUT_S=${timeout_s}" \
27+
-- sh -c '
28+
wait_for_endpoint() {
29+
local count=0
30+
echo "waiting for $HOST:$PORT to be available"
31+
while ! nc -z -w 1 $HOST "$PORT"; do
32+
count=$((count + 1))
33+
[ "$count" -ge "$TIMEOUT_S" ] && echo "Error: timedout waiting for $HOST:$PORT after $TIMEOUT_S seconds" && return 1
34+
sleep 1
35+
done
36+
echo "$HOST:$PORT is now available."
37+
}
38+
wait_for_endpoint
39+
'
40+
}
41+
42+
wait_for_all_pods_behind_services() {
43+
local service=$1
44+
local namespace=$2
45+
local port_regex=$3
46+
local timeout_s=$4
47+
kubectl get pods -n $namespace -l app=$service -o jsonpath='{range .items[*]}{.metadata.deletionTimestamp}:{.status.podIP}:{.spec.containers[*].ports[*].containerPort}{"\n"}{end}' | while read -r output; do
48+
deletion_timestamp=$(echo $output | cut -d':' -f1)
49+
ip=$(echo $output | cut -d':' -f2)
50+
ports=$(echo $output | cut -d':' -f3)
51+
# skip pods that are terminating
52+
if [ -n "$deletion_timestamp" ] || [ -z "$ip" ] || [ -z "$ports" ]; then
53+
continue
54+
fi
55+
# waiting for all ports that match the port prefix in cases where
56+
# multiple containers are running within the same pod
57+
for port in $ports; do
58+
if [[ $port == $port_regex ]]; then
59+
wait_for_endpoint $ip $port $timeout_s
60+
fi
61+
done
62+
done
63+
}
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
#!/bin/sh
2+
3+
set -exu
4+
5+
. "$(dirname $0)/common.sh"
6+
7+
# create a separate namespace for metadata
8+
kubectl create namespace metadata
9+
10+
# clone the metadata repository
11+
git init metadata
12+
cd metadata
13+
git fetch --depth 1 --no-tags https://${GIT_ACCESS_TOKEN}@github.com/scality/metadata.git
14+
git checkout FETCH_HEAD
15+
16+
# install metadata chart in a separate namespace
17+
cd helm
18+
helm dependency update cloudserver/
19+
helm install -n metadata \
20+
--set metadata.persistentVolume.storageClass='' \
21+
--set metadata.sproxyd.persistentVolume.storageClass='' \
22+
s3c cloudserver/
23+
24+
# wait for the repds to be created
25+
kubectl -n metadata rollout status --watch --timeout=300s statefulset/s3c-metadata-repd
26+
# wait for all repd pods to start serving admin API ports
27+
wait_for_all_pods_behind_services metadata-repd metadata "91*" 60
28+
29+
# current chart uses an old version of bucketd that has issues reconnecting to the repd
30+
# when bucketd is started first. Restarting bucketd after repd is ready.
31+
kubectl -n metadata rollout restart deployment/s3c-metadata-bucketd
32+
# wait for the bucketd pods to be created
33+
kubectl -n metadata rollout status --watch --timeout=300s deploy/s3c-metadata-bucketd
34+
# wait for all bucketd pods to start serving port 9000
35+
wait_for_all_pods_behind_services metadata-bucketd metadata 9000 60
36+
37+
# manually add "s3c.local" to the rest endpoints list as it's not configurable in the chart
38+
current_config=$(kubectl get configmap/s3c-cloudserver-config-json -n metadata -o jsonpath='{.data.config\.json}')
39+
updated_config=$(echo "$current_config" | jq '.restEndpoints["s3c.local"] = "us-east-1"')
40+
kubectl patch configmap/s3c-cloudserver-config-json -n metadata --type='merge' -p="$(jq -n --arg v "$updated_config" '{"data": {"config.json": $v}}')"
41+
42+
# restarting cloudserver to take the new configmap changes into account
43+
kubectl -n metadata rollout restart deployment/s3c-cloudserver
44+
# wait for the cloudserver pods to be created
45+
kubectl -n metadata rollout status --watch --timeout=300s deployment/s3c-cloudserver
46+
# wait for the cloudserver pods to start serving port 8000
47+
wait_for_all_pods_behind_services cloudserver metadata 8000 60

.github/scripts/end2end/patch-coredns.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ corefile="
3636
rewrite name exact prom.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local
3737
rewrite name exact shell-ui.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local
3838
rewrite name exact website.mywebsite.com ingress-nginx-controller.ingress-nginx.svc.cluster.local
39+
rewrite name exact s3c.local s3c-cloudserver.metadata.svc.cluster.local
3940
kubernetes cluster.local in-addr.arpa ip6.arpa {
4041
pods insecure
4142
fallthrough in-addr.arpa ip6.arpa

.github/workflows/end2end.yaml

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,11 @@ env:
7676
GCP_BACKEND_SERVICE_EMAIL: ${{ secrets.GCP_BACKEND_SERVICE_EMAIL }}
7777
# Enable this for Ring tests
7878
ENABLE_RING_TESTS: "false"
79-
RING_S3C_ACCESS_KEY: ${{ secrets.RING_S3C_BACKEND_ACCESS_KEY }}
80-
RING_S3C_SECRET_KEY: ${{ secrets.RING_S3C_BACKEND_SECRET_KEY }}
81-
RING_S3C_ENDPOINT: ${{ secrets.RING_S3C_BACKEND_ENDPOINT }}
79+
RING_S3C_ACCESS_KEY: accessKey1
80+
RING_S3C_SECRET_KEY: verySecretKey1
81+
RING_S3C_ENDPOINT: http://s3c.local:8000
8282
RING_S3C_BACKEND_SOURCE_LOCATION: rings3cbackendingestion
83-
RING_S3C_INGESTION_SRC_BUCKET_NAME: ingestion-test-src-bucket-${{ github.run_id }}-${{ github.run_attempt }}
83+
RING_S3C_INGESTION_SRC_BUCKET_NAME: ingestion-test-src-bucket-${{ github.run_id }}
8484
# CTST end2end tests
8585
NOTIF_DEST_NAME: "destination1"
8686
NOTIF_DEST_TOPIC: "destination-topic-1"
@@ -320,9 +320,6 @@ jobs:
320320
run: |-
321321
cd tests/zenko_tests
322322
envsubst < 'e2e-config.yaml.template' > 'e2e-config.yaml'
323-
if [[ "${ENABLE_RING_TESTS}" == "false" ]]; then
324-
yq -i 'del(.locations[] | select(.locationType == "location-scality-ring-s3-v1"))' e2e-config.yaml
325-
fi
326323
cat e2e-config.yaml
327324
echo 'Generated e2e-config.yaml file'
328325
- name: Build and push CI image
@@ -556,8 +553,8 @@ jobs:
556553
needs: [build-kafka, build-test-image]
557554
runs-on:
558555
- ubuntu-22.04-8core
559-
# Enable this for Ring-based tests
560-
# - scality-cloud
556+
env:
557+
ENABLE_RING_TESTS: "true"
561558
steps:
562559
- name: Checkout
563560
uses: actions/checkout@v4
@@ -575,6 +572,8 @@ jobs:
575572
uses: ./.github/actions/deploy
576573
env:
577574
GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }}
575+
with:
576+
deploy_metadata: ${{ env.ENABLE_RING_TESTS }}
578577
- name: Run backbeat end to end tests
579578
run: bash run-e2e-test.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "backbeat" "default"
580579
working-directory: ./.github/scripts/end2end

tests/zenko_tests/e2e_config/locations.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#!/usr/bin/env python
22

33
import logging
4+
import os
45

56
_log = logging.getLogger("end2end configuration")
67

@@ -11,6 +12,11 @@ def create_location(client, uuid, location):
1112
:param uuid: zenko instance uuid
1213
:param location: location details
1314
"""
15+
16+
ENABLE_RING_TESTS = os.environ['ENABLE_RING_TESTS']
17+
if ENABLE_RING_TESTS == "false" and location["locationType"] == "location-scality-ring-s3-v1":
18+
return
19+
1420
try:
1521
Location_V1 = client.get_model('location-v1')
1622
if "bootstrapList" not in location["details"]:

0 commit comments

Comments
 (0)