Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .github/actions/deploy/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ inputs:
description: "The tag of the Zenko Operator image to use"
required: false
default: ""
deploy_metadata:
description: "Deploy a metadata cluster alongside Zenko"
required: false
default: "false"
runs:
using: composite
steps:
Expand Down Expand Up @@ -83,6 +87,11 @@ runs:
shell: bash
run: sh tests/smoke/deploy-sorbet-resources.sh end2end
working-directory: ./.github/scripts/end2end/operator
- name: Deploy metadata
shell: bash
run: ./deploy-metadata.sh
working-directory: ./.github/scripts/end2end
if: ${{ inputs.deploy_metadata == 'true' }}
- name: End-to-end configuration
shell: bash
run: bash configure-e2e.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "default"
Expand Down
52 changes: 52 additions & 0 deletions .github/scripts/end2end/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,55 @@ get_token() {
jq -cr '.id_token'
}

wait_for_endpoint() {
local host=$1
local port=$2
local timeout_s=$3

kubectl run wait-for-port \
--image=busybox \
--attach=True \
--rm \
--restart=Never \
--pod-running-timeout=5m \
--image-pull-policy=IfNotPresent \
--env="HOST=${host}" \
--env="PORT=${port}" \
--env="TIMEOUT_S=${timeout_s}" \
-- sh -c '
wait_for_endpoint() {
local count=0
echo "waiting for $HOST:$PORT to be available"
while ! nc -z -w 1 $HOST "$PORT"; do
count=$((count + 1))
[ "$count" -ge "$TIMEOUT_S" ] && echo "Error: timedout waiting for $HOST:$PORT after $TIMEOUT_S seconds" && return 1
sleep 1
done
echo "$HOST:$PORT is now available."
}
wait_for_endpoint
'
}

wait_for_all_pods_behind_services() {
local service=$1
local namespace=$2
local port_regex=$3
local timeout_s=$4
kubectl get pods -n $namespace -l app=$service -o jsonpath='{range .items[*]}{.metadata.deletionTimestamp}:{.status.podIP}:{.spec.containers[*].ports[*].containerPort}{"\n"}{end}' | while read -r output; do
deletion_timestamp=$(echo $output | cut -d':' -f1)
ip=$(echo $output | cut -d':' -f2)
ports=$(echo $output | cut -d':' -f3)
# skip pods that are terminating
if [ -n "$deletion_timestamp" ] || [ -z "$ip" ] || [ -z "$ports" ]; then
continue
fi
# waiting for all ports that match the port prefix in cases where
# multiple containers are running within the same pod
for port in $ports; do
if [[ $port == $port_regex ]]; then
wait_for_endpoint $ip $port $timeout_s
fi
done
done
}
47 changes: 47 additions & 0 deletions .github/scripts/end2end/deploy-metadata.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
#!/bin/sh

set -exu

. "$(dirname $0)/common.sh"

# create a separate namespace for metadata
kubectl create namespace metadata

# clone the metadata repository
git init metadata
cd metadata
git fetch --depth 1 --no-tags https://${GIT_ACCESS_TOKEN}@github.com/scality/metadata.git
git checkout FETCH_HEAD

# install metadata chart in a separate namespace
cd helm
helm dependency update cloudserver/
helm install -n metadata \
--set metadata.persistentVolume.storageClass='' \
--set metadata.sproxyd.persistentVolume.storageClass='' \
s3c cloudserver/

# wait for the repds to be created
kubectl -n metadata rollout status --watch --timeout=300s statefulset/s3c-metadata-repd
# wait for all repd pods to start serving admin API ports
wait_for_all_pods_behind_services metadata-repd metadata "91*" 60

# current chart uses an old version of bucketd that has issues reconnecting to the repd
# when bucketd is started first. Restarting bucketd after repd is ready.
kubectl -n metadata rollout restart deployment/s3c-metadata-bucketd
# wait for the bucketd pods to be created
kubectl -n metadata rollout status --watch --timeout=300s deploy/s3c-metadata-bucketd
# wait for all bucketd pods to start serving port 9000
wait_for_all_pods_behind_services metadata-bucketd metadata 9000 60

# manually add "s3c.local" to the rest endpoints list as it's not configurable in the chart
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why do we need s3c.local ?

i did not need such patch on my artesca cluster (in namespace default i think), just installed the chart and created the artesca location pointed at s3c-cloudserver.default.svc.cluster.local

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Cloudserver responds with an error when the host used is not declared in its config file. I've tried with the k8s service endpoint and it doesn't work.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe this comes from the namespace ("metadata") you deploy to, which may not be taken into account?
in my tests this is using the default namespace, with no issue...

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No even with the default namespace i got the same behaviour, which to me, is the expected behaviour. The normal cloudserver we deploy in Zenko has all the k8s endpoints in its config.

current_config=$(kubectl get configmap/s3c-cloudserver-config-json -n metadata -o jsonpath='{.data.config\.json}')
updated_config=$(echo "$current_config" | jq '.restEndpoints["s3c.local"] = "us-east-1"')
kubectl patch configmap/s3c-cloudserver-config-json -n metadata --type='merge' -p="$(jq -n --arg v "$updated_config" '{"data": {"config.json": $v}}')"

# restarting cloudserver to take the new configmap changes into account
kubectl -n metadata rollout restart deployment/s3c-cloudserver
# wait for the cloudserver pods to be created
kubectl -n metadata rollout status --watch --timeout=300s deployment/s3c-cloudserver
# wait for the cloudserver pods to start serving port 8000
wait_for_all_pods_behind_services cloudserver metadata 8000 60
1 change: 1 addition & 0 deletions .github/scripts/end2end/patch-coredns.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ corefile="
rewrite name exact prom.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local
rewrite name exact shell-ui.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local
rewrite name exact website.mywebsite.com ingress-nginx-controller.ingress-nginx.svc.cluster.local
rewrite name exact s3c.local s3c-cloudserver.metadata.svc.cluster.local
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
Expand Down
17 changes: 8 additions & 9 deletions .github/workflows/end2end.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ env:
GCP_BACKEND_SERVICE_EMAIL: ${{ secrets.GCP_BACKEND_SERVICE_EMAIL }}
# Enable this for Ring tests
ENABLE_RING_TESTS: "false"
RING_S3C_ACCESS_KEY: ${{ secrets.RING_S3C_BACKEND_ACCESS_KEY }}
RING_S3C_SECRET_KEY: ${{ secrets.RING_S3C_BACKEND_SECRET_KEY }}
RING_S3C_ENDPOINT: ${{ secrets.RING_S3C_BACKEND_ENDPOINT }}
RING_S3C_ACCESS_KEY: accessKey1
RING_S3C_SECRET_KEY: verySecretKey1
RING_S3C_ENDPOINT: http://s3c.local:8000
RING_S3C_BACKEND_SOURCE_LOCATION: rings3cbackendingestion
RING_S3C_INGESTION_SRC_BUCKET_NAME: ingestion-test-src-bucket-${{ github.run_id }}-${{ github.run_attempt }}
RING_S3C_INGESTION_SRC_BUCKET_NAME: ingestion-test-src-bucket-${{ github.run_id }}
# CTST end2end tests
NOTIF_DEST_NAME: "destination1"
NOTIF_DEST_TOPIC: "destination-topic-1"
Expand Down Expand Up @@ -320,9 +320,6 @@ jobs:
run: |-
cd tests/zenko_tests
envsubst < 'e2e-config.yaml.template' > 'e2e-config.yaml'
if [[ "${ENABLE_RING_TESTS}" == "false" ]]; then
yq -i 'del(.locations[] | select(.locationType == "location-scality-ring-s3-v1"))' e2e-config.yaml
fi
cat e2e-config.yaml
echo 'Generated e2e-config.yaml file'
- name: Build and push CI image
Expand Down Expand Up @@ -556,8 +553,8 @@ jobs:
needs: [build-kafka, build-test-image]
runs-on:
- ubuntu-22.04-8core
# Enable this for Ring-based tests
# - scality-cloud
env:
ENABLE_RING_TESTS: "true"
steps:
- name: Checkout
uses: actions/checkout@v4
Expand All @@ -575,6 +572,8 @@ jobs:
uses: ./.github/actions/deploy
env:
GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }}
with:
deploy_metadata: ${{ env.ENABLE_RING_TESTS }}
- name: Run backbeat end to end tests
run: bash run-e2e-test.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "backbeat" "default"
working-directory: ./.github/scripts/end2end
Expand Down
6 changes: 6 additions & 0 deletions tests/zenko_tests/e2e_config/locations.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#!/usr/bin/env python

import logging
import os

_log = logging.getLogger("end2end configuration")

Expand All @@ -11,6 +12,11 @@ def create_location(client, uuid, location):
:param uuid: zenko instance uuid
:param location: location details
"""

ENABLE_RING_TESTS = os.environ['ENABLE_RING_TESTS']
if ENABLE_RING_TESTS == "false" and location["locationType"] == "location-scality-ring-s3-v1":
return

try:
Location_V1 = client.get_model('location-v1')
if "bootstrapList" not in location["details"]:
Expand Down
Loading