-
Notifications
You must be signed in to change notification settings - Fork 1
228 lines (197 loc) · 9.64 KB
/
e2e.yml
File metadata and controls
228 lines (197 loc) · 9.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
name: "End to End (GitLab)"
on:
# Triggered by GitLab webhook
repository_dispatch:
types:
- gitlab-push
- gitlab-merge-request
# Allow manual triggering
workflow_dispatch:
inputs:
gitlab_branch:
description: 'GitLab branch to test'
required: false
default: 'enterprise'
concurrency:
group: ${{ github.head_ref || github.run_id }}/e2e
cancel-in-progress: true
permissions:
contents: read
jobs:
e2e:
name: FUSE Mount
runs-on: ubuntu-22.04
timeout-minutes: 60
steps:
- name: Checkout artifactory repo
uses: actions/checkout@v5
# Free disk space for large builds
- name: Free Disk Space
run: |
echo "Available disk space before cleanup:"
df -h
sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL
sudo apt-get clean
sudo rm -rf /var/lib/apt/lists/*
sudo docker system prune -af --volumes
echo "Available disk space after cleanup:"
df -h
- name: Clone private GitLab repository
env:
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
run: |
# Determine branch from webhook or manual input
BRANCH="${{ github.event_name == 'repository_dispatch' && github.event.client_payload.ref || github.event.inputs.gitlab_branch || 'enterprise' }}"
BRANCH=$(echo "$BRANCH" | sed 's|refs/heads/||')
if [ "$BRANCH" = "null" ]; then
BRANCH="enterprise"
fi
echo "Cloning branch: $BRANCH"
git clone -b $BRANCH https://gitlab-ci-token:${GITLAB_TOKEN}@gitlab.com/chrislusf/seaweedfs.git seaweedfs-source
cd seaweedfs-source
echo "Cloned commit: $(git rev-parse HEAD)"
echo "COMMIT_SHA=$(git rev-parse --short=8 HEAD)" >> $GITHUB_ENV
- name: Set up Go 1.x
uses: actions/setup-go@v6
with:
go-version-file: 'seaweedfs-source/go.mod'
id: go
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
# Configure BuildKit with Docker Hub mirror to reduce rate limit hits
- name: Create BuildKit config
run: |
mkdir -p ~/.docker
cat > ~/.docker/daemon.json <<EOF
{
"registry-mirrors": ["https://mirror.gcr.io"]
}
EOF
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-e2e-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-e2e-
- name: Install dependencies
run: |
# Use faster mirrors and install with timeout
sudo rm -f /etc/apt/sources.list.d/azure-cli.list /etc/apt/sources.list.d/microsoft-prod.list
echo "deb http://azure.archive.ubuntu.com/ubuntu/ $(lsb_release -cs) main restricted universe multiverse" | sudo tee /etc/apt/sources.list
echo "deb http://azure.archive.ubuntu.com/ubuntu/ $(lsb_release -cs)-updates main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
sudo apt-get update --fix-missing
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends fuse
# Verify FUSE installation
echo "FUSE version: $(fusermount --version 2>&1 || echo 'fusermount not found')"
echo "FUSE device: $(ls -la /dev/fuse 2>&1 || echo '/dev/fuse not found')"
- name: Start SeaweedFS
timeout-minutes: 20
working-directory: seaweedfs-source/docker
run: |
# Enable Docker buildkit for better caching
export DOCKER_BUILDKIT=1
export COMPOSE_DOCKER_CLI_BUILD=1
# Build with retry logic
for i in {1..3}; do
echo "Build attempt $i/3"
if make build_e2e; then
echo "Build successful on attempt $i"
break
elif [ $i -eq 3 ]; then
echo "Build failed after 3 attempts"
exit 1
else
echo "Build attempt $i failed, retrying in 30 seconds..."
sleep 30
fi
done
# Start services with wait
docker compose -f ./compose/e2e-mount.yml up --wait
- name: Run FIO 4k
timeout-minutes: 15
working-directory: seaweedfs-source/docker
run: |
echo "Starting FIO at: $(date)"
# Concurrent r/w
echo 'Run randrw with size=16M bs=4k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --group_reporting --runtime=30 --time_based=1
echo "Verify FIO at: $(date)"
# Verified write
echo 'Run randwrite with size=16M bs=4k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
- name: Run FIO 128k
timeout-minutes: 15
working-directory: seaweedfs-source/docker
run: |
echo "Starting FIO at: $(date)"
# Concurrent r/w
echo 'Run randrw with size=16M bs=128k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
echo "Verify FIO at: $(date)"
# Verified write
echo 'Run randwrite with size=16M bs=128k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
- name: Run FIO 1MB
timeout-minutes: 15
working-directory: seaweedfs-source/docker
run: |
echo "Starting FIO at: $(date)"
# Concurrent r/w
echo 'Run randrw with size=16M bs=1m'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
echo "Verify FIO at: $(date)"
# Verified write
echo 'Run randwrite with size=16M bs=1m'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
- name: Save logs
if: always()
working-directory: seaweedfs-source/docker
run: |
docker compose -f ./compose/e2e-mount.yml logs > output.log
echo 'Showing last 500 log lines of mount service:'
docker compose -f ./compose/e2e-mount.yml logs --tail 500 mount
- name: Check for data races
if: always()
continue-on-error: true # TODO: remove this comment to enable build failure on data races (after all are fixed)
working-directory: seaweedfs-source/docker
run: grep -A50 'DATA RACE' output.log && exit 1 || exit 0
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: e2e-output-logs-${{ env.COMMIT_SHA }}
path: seaweedfs-source/docker/output.log
- name: E2E Test Summary
if: always()
run: |
echo "## 🚀 End-to-End FUSE Test Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### GitLab Source" >> $GITHUB_STEP_SUMMARY
echo "- **Repository**: https://gitlab.com/chrislusf/seaweedfs" >> $GITHUB_STEP_SUMMARY
echo "- **Commit**: ${{ env.COMMIT_SHA }}" >> $GITHUB_STEP_SUMMARY
echo "- **Branch**: $(echo '${{ github.event.client_payload.ref || github.event.inputs.gitlab_branch || 'enterprise' }}' | sed 's|refs/heads/||')" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Test Components" >> $GITHUB_STEP_SUMMARY
echo "- 🐳 **Docker Compose**: SeaweedFS cluster with FUSE mount" >> $GITHUB_STEP_SUMMARY
echo "- 📊 **FIO Tests**: Performance testing with multiple block sizes" >> $GITHUB_STEP_SUMMARY
echo "- 🔍 **Data Race Detection**: Automated race condition checking" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Performance Tests" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **4K Block Size**: Random read/write operations" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **128K Block Size**: Medium block operations" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **1MB Block Size**: Large block operations" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Verification" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Data Integrity**: CRC32C verification enabled" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Concurrent Operations**: Multi-job stress testing" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Direct I/O**: Bypass page cache for realistic performance" >> $GITHUB_STEP_SUMMARY
- name: Cleanup
if: always()
working-directory: seaweedfs-source/docker
run: docker compose -f ./compose/e2e-mount.yml down --volumes --remove-orphans --rmi all