Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions .github/actions/e2e-setup-common/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ inputs:
ref:
description: "Git ref to checkout"
required: true
deploy_csi_driver:
description: "Whether to deploy the CSI driver via Helm"
required: false
default: "true"
runs:
using: "composite"
steps:
Expand Down Expand Up @@ -39,3 +43,42 @@ runs:
run: |
docker pull ghcr.io/${{ github.repository }}:${{ github.sha }}
kind load docker-image ghcr.io/${{ github.repository }}:${{ github.sha }} --name helm-test-cluster

- name: Deploy Scality Storage
shell: bash
run: |-
set -e -o pipefail;
mkdir -p logs/s3 logs/iam logs/cosi_driver data/vaultdb
chown -R runner:docker logs
chmod -R ugo+rwx data
docker compose --profile s3 up -d --quiet-pull
bash ../scripts/wait_for_local_port.bash 8000 30
working-directory: .github/scality-storage-deployment
if: ${{ inputs.deploy_csi_driver == 'true' }}

- name: Set S3 Endpoint URL environment variable
shell: bash
run: |
echo "S3_ENDPOINT_URL=http://$(hostname -I | awk '{print $1}'):8000" >> $GITHUB_ENV
echo "S3 Endpoint URL: http://$(hostname -I | awk '{print $1}'):8000"
if: ${{ inputs.deploy_csi_driver == 'true' }}

- name: Setup Helm
uses: azure/[email protected]
if: ${{ inputs.deploy_csi_driver == 'true' }}

- name: Install CSI Driver with Helm
if: ${{ inputs.deploy_csi_driver == 'true' }}
shell: bash
run: |
helm upgrade --install mountpoint-s3-csi-driver --namespace kube-system ./charts/scality-mountpoint-s3-csi-driver --values \
./charts/scality-mountpoint-s3-csi-driver/values.yaml \
--set image.repository=ghcr.io/${{ github.repository }} \
--set image.tag=${{ github.sha }} \
--set node.serviceAccount.create=true \
--set node.podInfoOnMountCompat.enable=true \
--set experimental.podMounter=systemd \
--set config.s3EndpointUrl=${{ env.S3_ENDPOINT_URL }}
kubectl rollout status daemonset s3-csi-node -n kube-system --timeout=60s
kubectl get pods -A
echo "s3-csi-node-image: $(kubectl get daemonset s3-csi-node -n kube-system -o jsonpath="{$.spec.template.spec.containers[:1].image}")"
49 changes: 49 additions & 0 deletions .github/scality-storage-deployment/cloudserver-config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
{
"port": 8000,
"listenOn": [],
"restEndpoints": {
"localhost": "us-east-1",
"127.0.0.1": "us-east-1",
"cloudserver-front": "us-east-1",
"s3.docker.test": "us-east-1",
"127.0.0.2": "us-east-1",
"s3.amazonaws.com": "us-east-1"
},
"websiteEndpoints": [
"s3-website-us-east-1.amazonaws.com",
"s3-website.us-east-2.amazonaws.com",
"s3-website-us-west-1.amazonaws.com",
"s3-website-us-west-2.amazonaws.com",
"s3-website.ap-south-1.amazonaws.com",
"s3-website.ap-northeast-2.amazonaws.com",
"s3-website-ap-southeast-1.amazonaws.com",
"s3-website-ap-southeast-2.amazonaws.com",
"s3-website-ap-northeast-1.amazonaws.com",
"s3-website.eu-central-1.amazonaws.com",
"s3-website-eu-west-1.amazonaws.com",
"s3-website-sa-east-1.amazonaws.com",
"s3-website.localhost",
"s3-website.scality.test"
],
"vaultd": {
"host": "localhost",
"port": 8500
},
"clusters": 1,
"log": {
"logLevel": "trace",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"recordLog": {
"enabled": false,
"recordLogName": "s3-recordlog"
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
}
}
11 changes: 11 additions & 0 deletions .github/scality-storage-deployment/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
services:
s3:
profiles: ['s3']
image: ${CLOUDSERVER_IMAGE}
network_mode: host
environment:
S3_CONFIG_FILE: /conf/config.json
command: /bin/sh -c "yarn run mem_backend > /logs/s3/s3.log 2>&1"
volumes:
- ./cloudserver-config.json:/conf/config.json:ro
- ./logs/s3:/logs/s3
28 changes: 28 additions & 0 deletions .github/scripts/wait_for_local_port.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/usr/bin/env bash
wait_for_local_port() {
local port=$1
local timeout=$2
local count=0
local ret=1
echo "waiting for storage-service:$port"
while [[ "$ret" -eq "1" && "$count" -lt "$timeout" ]] ; do
nc -z -w 1 localhost $port
ret=$?
if [ ! "$ret" -eq "0" ]; then
echo -n .
sleep 1
count=$(($count+1))
fi
done

echo ""

if [[ "$count" -eq "$timeout" ]]; then
echo "Server did not start in less than $timeout seconds. Exiting..."
exit 1
fi

echo "Server got ready in ~${count} seconds. Starting test now..."
}

wait_for_local_port $1 $2
41 changes: 40 additions & 1 deletion .github/workflows/ci-and-e2e-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@ on:
branches:
- '**'

env:
CLOUDSERVER_IMAGE: ${{ vars.CLOUDSERVER_IMAGE }}
AWS_ACCESS_KEY_ID: "accessKey1"
AWS_SECRET_ACCESS_KEY: "verySecretKey1"
KUBECONFIG: "/home/runner/.kube/config"

jobs:
dev-image:
name: Dev Image
Expand All @@ -20,7 +26,7 @@ jobs:

controller-e2e-tests:
name: E2E Controller Tests
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
needs: dev-image
steps:
- name: Check out repository
Expand All @@ -30,6 +36,39 @@ jobs:
uses: ./.github/actions/e2e-setup-common
with:
ref: ${{ github.sha }}
deploy_csi_driver: "false"

- name: Run Controller Tests
run: make e2e-controller

# systemd mounter is used when the mounter is launched within the CSI driver pod.
systemd-mounter-e2e-tests:
name: E2E Systemd Mounter Tests
runs-on: ubuntu-24.04
needs: dev-image
steps:
- name: Check out repository
uses: actions/checkout@v4

- name: Run Common Setup
uses: ./.github/actions/e2e-setup-common
with:
ref: ${{ github.sha }}

- name: Run E2E Tests (SystemdMounter)
env:
S3_ENDPOINT_URL: ${{ env.S3_ENDPOINT_URL }}
run: |
set +e
pushd tests/e2e-kubernetes
KUBECONFIG=/home/runner/.kube/config ginkgo -p -vv -timeout 60m -- \
--bucket-region=us-east-1 \
--commit-id=${{ github.sha }} \
--bucket-prefix=helm-test-cluster
EXIT_CODE=$?
kubectl logs -l app=s3-csi-node -n kube-system --kubeconfig ${KUBECONFIG}
kubectl version --kubeconfig ${KUBECONFIG}
kubectl get nodes -o wide --kubeconfig ${KUBECONFIG}
popd
cat tests/e2e-kubernetes/csi-test-artifacts/output.json
exit $EXIT_CODE
Loading