forked from awslabs/mountpoint-s3-csi-driver
-
Notifications
You must be signed in to change notification settings - Fork 0
171 lines (146 loc) · 5.82 KB
/
e2e-openshift.yaml
File metadata and controls
171 lines (146 loc) · 5.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
name: E2E Tests (on-demand)
on:
# Manual trigger from Actions tab
workflow_dispatch:
# PR label trigger: add "test-openshift" label to run
pull_request:
types: [labeled]
# PR comment trigger: comment "/test-openshift" to run
issue_comment:
types: [created]
env:
CLUSTER_TYPE: openshift
jobs:
openshift-e2e:
name: OpenShift CRC
runs-on: ubuntu-22.04-8core
# Run when:
# 1. Manual dispatch (workflow_dispatch)
# 2. PR labeled with "test-openshift"
# 3. PR comment contains "/test-openshift"
if: >
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'pull_request' &&
contains(github.event.pull_request.labels.*.name, 'test-openshift')) ||
(github.event_name == 'issue_comment' &&
github.event.issue.pull_request &&
contains(github.event.comment.body, '/test-openshift'))
timeout-minutes: 60
env:
CLOUDSERVER_TAG: ${{ vars.CLOUDSERVER_RING_9_5 }}
steps:
- name: Resolve PR ref
id: resolve_ref
# github.sha works for workflow_dispatch (selected branch HEAD) and
# pull_request/labeled (PR merge commit). Only issue_comment is different:
# GitHub sets github.sha to the default branch, not the PR, so we look
# up the PR head SHA via the API.
run: |
if [ "${{ github.event_name }}" = "issue_comment" ]; then
PR_NUMBER="${{ github.event.issue.number }}"
REF=$(gh pr view "$PR_NUMBER" --json headRefOid -q .headRefOid)
echo "ref=$REF" >> $GITHUB_OUTPUT
else
echo "ref=${{ github.sha }}" >> $GITHUB_OUTPUT
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Check out repository
uses: actions/checkout@v5
with:
ref: ${{ steps.resolve_ref.outputs.ref }}
fetch-depth: 0
- name: CRC Setup
uses: ./.github/actions/crc-setup
with:
pull_secret: ${{ secrets.CRC_PULL_SECRET }}
- name: Setup Helm
uses: azure/setup-helm@v4.3.1
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: "tests/e2e/go.mod"
# Disable cache: E2E builds produce a ~16GB build cache whose upload
# (~35 min at runner speeds) exceeds the job timeout and gets cancelled.
# This workflow runs on-demand so the cache hit rate is too low to justify it.
cache: false
- name: Install Mage and Ginkgo
run: |
go install github.com/magefile/mage@latest
go install github.com/onsi/ginkgo/v2/ginkgo@v2.25.2
echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
- name: Configure Docker for CRC Registry
run: |
REGISTRY=default-route-openshift-image-registry.apps-crc.testing
echo "{\"insecure-registries\": [\"$REGISTRY\"]}" | sudo tee /etc/docker/daemon.json
sudo systemctl restart docker
- name: Build Container Image
run: mage buildImage
- name: Load Image to CRC
run: mage loadImageToCluster
- name: Deploy CloudServer (S3)
run: mage e2e:deployS3
- name: Get Host IP Address
id: get_ip
run: |
# Try multiple methods to find the IP reachable from CRC VM
# Method 1: virsh bridge gateway
if command -v virsh &>/dev/null; then
BRIDGE_IP=$(sudo virsh net-dumpxml crc 2>/dev/null | grep -oP "ip address='\K[^']+" || true)
fi
# Method 2: ip route default gateway
if [ -z "$BRIDGE_IP" ]; then
BRIDGE_IP=$(ip route get 1.1.1.1 | grep -oP 'src \K\S+' || true)
fi
# Method 3: hostname -I
if [ -z "$BRIDGE_IP" ]; then
BRIDGE_IP=$(hostname -I | awk '{print $1}')
fi
echo "host_ip=${BRIDGE_IP}" >> $GITHUB_OUTPUT
echo "Detected host IP: ${BRIDGE_IP}"
- name: Configure hosts file for S3 FQDN
run: |
echo "${{ steps.get_ip.outputs.host_ip }} s3.scality.com" | sudo tee -a /etc/hosts
cat /etc/hosts | grep s3.scality.com
- name: Configure OpenShift DNS for S3 FQDN
run: |
S3_HOST_IP=${{ steps.get_ip.outputs.host_ip }} \
CLUSTER_TYPE=openshift \
mage e2e:configureCIDNS
- name: Apply SecurityContextConstraints
run: oc apply -f .github/openshift/scc.yaml
- name: Grant Privileged SCC to Test Namespaces
run: |
# Cache tests run privileged init containers for chmod on hostPath.
# Grant the built-in privileged SCC to all SAs in the cluster so
# dynamically-created test namespaces can schedule these pods.
oc adm policy add-scc-to-group privileged system:serviceaccounts
- name: Start Kubernetes Event and Log Capture
run: mage e2e:startCapture
- name: Apply CRDs
run: mage e2e:applyCRDs
- name: Run OpenShift E2E Tests
run: |
mkdir -p test-results
S3_ENDPOINT_URL=http://s3.scality.com:8000 \
CSI_IMAGE_TAG=local \
CSI_IMAGE_REPOSITORY=image-registry.openshift-image-registry.svc:5000/kube-system/mountpoint-s3-csi-driver \
JUNIT_REPORT=./test-results/openshift-e2e-results.xml \
mage e2e:openShiftAll
- name: Stop Capture and Collect Artifacts
if: always()
run: mage e2e:stopCapture
- name: Upload Test Artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: openshift-e2e-artifacts
path: artifacts
- name: Upload test results to Codecov
if: always()
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./tests/e2e/test-results/openshift-e2e-results.xml
flags: e2e_tests,openshift
slug: scality/mountpoint-s3-csi-driver