forked from NVIDIA/cuda-quantum
-
Notifications
You must be signed in to change notification settings - Fork 0
437 lines (385 loc) · 18 KB
/
python_wheels.yml
File metadata and controls
437 lines (385 loc) · 18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
on:
workflow_call:
inputs:
platform:
type: string
required: false
default: linux/amd64
python_version:
type: string
required: true
cuda_version:
required: true
type: string
description: The CUDA version used for the build (e.g. 12.3).
devdeps_image:
required: false
type: string
devdeps_cache:
required: false
type: string
devdeps_archive:
required: false
type: string
create_staging_info:
required: false
type: boolean
default: false
outputs:
cudaq_version:
description: "The version of the built wheels."
value: ${{ jobs.build_wheel.outputs.cudaq_version }}
secrets:
DOCKERHUB_USERNAME:
required: true
DOCKERHUB_READONLY_TOKEN:
required: true
name: Python wheels
jobs:
build_wheel:
name: Build Python ${{ inputs.python_version }} wheel
runs-on: ${{ (contains(inputs.platform, 'arm') && 'linux-arm64-cpu8') || 'linux-amd64-cpu8' }}
permissions:
contents: read
packages: read
outputs:
artifact_name: ${{ steps.prereqs.outputs.artifact_name }}
cudaq_version: ${{ steps.prereqs.outputs.cudaq_version }}
# Needed for making local images available to the docker/build-push-action.
# See also https://stackoverflow.com/a/63927832.
services:
registry:
image: registry:2
ports:
- 5000:5000
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_READONLY_TOKEN }}
- name: Login to GitHub CR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ github.token }}
- name: Restore build environment
if: inputs.devdeps_cache && inputs.devdeps_archive
id: restore
uses: actions/cache/restore@v4
with:
path: ${{ inputs.devdeps_archive }}
key: ${{ inputs.devdeps_cache }}
fail-on-cache-miss: true
- name: Load prerequisites
id: prereqs
run: |
if ${{ steps.restore.outcome != 'skipped' }}; then
load_output=`docker load --input "${{ inputs.devdeps_archive }}"`
base_image=`echo "$load_output" | grep -o 'Loaded image: \S*:\S*' | head -1 | cut -d ' ' -f 3`
echo "Base image: $base_image" >> $GITHUB_STEP_SUMMARY
# Push the image to the local registry to make it available within
# the containered environment that docker/build-push-action uses.
docker push $base_image
rm -rf "${{ inputs.devdeps_archive }}"
elif ${{ inputs.devdeps_image != '' }}; then
base_image=${{ inputs.devdeps_image }}
echo "Base image: $base_image" >> $GITHUB_STEP_SUMMARY
docker pull $base_image
else
echo "::error file=python_wheels.yml::Missing configuration for development dependencies. Either specify the image (i.e. provide devdeps_image) or cache (i.e. provide devdeps_cache and devdeps_archive) that should be used for the build."
exit 1
fi
devenv_tag=`docker inspect $base_image --format='{{json .Config.Labels}}' | jq -r '."org.opencontainers.image.version"'`
docker image rm $base_image && docker image prune --force
cache_id=`echo ${{ inputs.python_version }}-$devenv_tag | tr . -`
mkdir -p "/tmp/wheels"
is_versioned=${{ github.ref_type == 'tag' || startsWith(github.ref_name, 'releases/') || startsWith(github.ref_name, 'staging/') }}
if ${is_versioned}; then
cudaq_version=`echo ${{ github.ref_name }} | egrep -o "([0-9]{1,}\.)+[0-9]{1,}"`
else
cudaq_version=0.0.0
fi
echo "cudaq_version=$cudaq_version" >> $GITHUB_OUTPUT
echo "base_image=$base_image" >> $GITHUB_OUTPUT
echo "docker_output=type=local,dest=/tmp/wheels" >> $GITHUB_OUTPUT
# do not change this prefix without adjusting other workflows
echo "artifact_name=pycudaq-$cache_id" >> $GITHUB_OUTPUT
- name: Set up context for buildx
run: |
docker context create builder_context
- name: Set up buildx runner
uses: docker/setup-buildx-action@v3
with:
endpoint: builder_context
version: v0.19.0
driver-opts: |
network=host
image=moby/buildkit:v0.19.0
- name: Build wheel
id: wheel_build
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/release/cudaq.wheel.Dockerfile
build-args: |
base_image=${{ steps.prereqs.outputs.base_image }}
release_version=${{ steps.prereqs.outputs.cudaq_version }}
python_version=${{ inputs.python_version }}
outputs: ${{ steps.prereqs.outputs.docker_output }}
- name: Upload wheel
uses: actions/upload-artifact@v4
with:
name: ${{ steps.prereqs.outputs.artifact_name }}
path: /tmp/wheels
retention-days: 1
if-no-files-found: error
create_test_config:
name: Prepare validation
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
json: "${{ steps.config.outputs.json }}"
steps:
- name: Checkout repository
uses: actions/checkout@v4
- id: config
run: |
python_config=`cat .github/workflows/config/validation_config.json | jq ".python"`
operating_systems=`echo "$python_config" | jq '.[] | select(.version=="${{ inputs.python_version }}").operating_systems'`
echo "json={\"os_images\":$(echo $operating_systems)}" >> $GITHUB_OUTPUT
validation:
name: Validate wheel
needs: [build_wheel, create_test_config]
runs-on: ${{ (contains(inputs.platform, 'arm') && 'linux-arm64-cpu8') || 'linux-amd64-cpu8' }}
permissions:
contents: read
packages: read
outputs:
artifact_id: "${{ steps.job_summary.outputs.artifact_id }}"
strategy:
matrix:
os_image: ${{ fromJSON(needs.create_test_config.outputs.json).os_images }}
pip_install_flags: ['', '--user']
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to GitHub CR
if: inputs.environment == ''
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ github.token }}
- name: Load cuda-quantum wheel
uses: actions/download-artifact@v4
with:
name: ${{ needs.build_wheel.outputs.artifact_name }}
- name: Install wheel in clean environment
run: |
os_image=${{ matrix.os_image }}
distr=`echo $os_image | cut -d : -f 1 | cut -d / -f 1`
if ${{ inputs.environment != '' }}; then
# Validate against the public images
base_image=$os_image
else
# If we are not staging, we instead validate the images
# pushed to GHCR to limit our pull rate on public registries.
repo_owner=${{ github.repository_owner }}
image_tag=`echo $os_image | sed -e 's@.*\.io/\(\)@\1@'`
base_image=ghcr.io/${repo_owner,,}/${image_tag#${repo_owner,,}}
fi
wheelname=`echo cuda_quantum*-manylinux_*_$(uname -m).whl`
docker build --network host -t wheel_validation:local -f "docker/test/wheels/$distr.Dockerfile" . \
--build-arg base_image=$base_image \
--build-arg python_version=${{ inputs.python_version }} \
--build-arg cuda_quantum_wheel=$wheelname \
--build-arg preinstalled_modules="pytest psutil" \
--build-arg pip_install_flags=${{ matrix.pip_install_flags }}
docker run --rm -dit --name wheel-validation wheel_validation:local
(docker exec wheel-validation python${{ inputs.python_version }} -c "import cudaq") && imported=true || imported=false
docker stop wheel-validation
if ! $imported; then echo "Failed to import cudaq module." >> /tmp/validation.out
else echo "Successfully imported cudaq module." >> /tmp/validation.out; fi
- name: Run Python tests
uses: ./.github/actions/run-in-docker
with:
image: wheel_validation:local
shell: bash
run: |
python${{ inputs.python_version }} -m pytest -v /tmp/tests/ \
--ignore /tmp/tests/backends
pytest_status=$?
if [ ! $pytest_status -eq 0 ]; then
echo "::error file=python_wheel.yml::Python tests failed with status $pytest_status."
exit 1
fi
python${{ inputs.python_version }} -m pip install --user fastapi uvicorn llvmlite
for backendTest in /tmp/tests/backends/*.py; do
python${{ inputs.python_version }} -m pytest $backendTest
pytest_status=$?
# Exit code 5 indicates that no tests were collected,
# i.e. all tests in this file were skipped.
if [ ! $pytest_status -eq 0 ] && [ ! $pytest_status -eq 5 ]; then
echo "::error file=python_wheel.yml::Python $backendTest tests failed with status $pytest_status."
exit 1
fi
done
- name: Run Python MPI tests
if: matrix.os_image == 'redhat/ubi9:9.2'
uses: ./.github/actions/run-in-docker
with:
image: wheel_validation:local
shell: bash
run: |
# Uninstall pip-installed cuda-quantum in the `wheel_validation:local` image.
# In this step, we test the full installation workflow with conda;
# hence we don't want to mix conda and pip packages.
cuda_major=$(echo ${{ inputs.cuda_version }} | cut -d . -f1)
python${{ inputs.python_version }} -m pip uninstall -y cuda-quantum-cu${cuda_major}
# Install cuda-quantum wheel and all dependencies (including MPI) using conda (README instructions)
dnf install -y --nobest --setopt=install_weak_deps=False wget unzip openssh-clients
mkdir -p ~/miniconda3
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-$(uname -m).sh -O ~/miniconda3/miniconda.sh
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
~/miniconda3/bin/conda init bash
source ~/.bashrc
# Extract the setup script from Python wheel's readme
cudaq_wheel=/tmp/cuda_quantum_cu*-manylinux_*_$(uname -m).whl
wheelname=${cudaq_wheel##*/} && archive_name=${wheelname%.*}.zip
tmploc=/tmp/scratch && mkdir $tmploc
cp $cudaq_wheel $tmploc/$wheelname && mv $tmploc/$wheelname $tmploc/$archive_name
unzip $tmploc/$archive_name -d $tmploc/cudaq_wheel && rm -rf $tmploc/$archive_name
metadata=$tmploc/cudaq_wheel/*.dist-info/METADATA
# Parse README content to install openmpi
conda_script="$(awk '/(Begin conda install)/{flag=1;next}/(End conda install)/{flag=0}flag' $metadata | grep . | sed '/^```/d')"
while IFS= read -r line; do
# Replace Python version
line=$(echo $line | sed -E "s/python(=)?3.[0-9]{1,}/python\1${{ inputs.python_version }}/g")
# Replace openmpi version requirement - version 4.0.5 was being installed which was not built with
# cuda support, thus causing a seg fault. See https://github.com/NVIDIA/cuda-quantum/issues/2623
line=$(echo "$line" | sed -E 's/\bopenmpi\b/openmpi\\>=5.0.7/g')
# Install the wheel file
line=${line//pip install cuda-quantum-cu${cuda_major}/pip install ${cudaq_wheel}}
eval "$line"
done <<< "$conda_script"
ompi_script="$(awk '/(Begin ompi setup)/{flag=1;next}/(End ompi setup)/{flag=0}flag' $metadata | grep . | sed '/^```/d')"
while IFS= read -r line; do
eval "$line"
done <<< "$ompi_script"
# Run the MPI test
python${{ inputs.python_version }} -m pip install pytest numpy
mpirun --allow-run-as-root -np 4 python${{ inputs.python_version }} -m pytest -v /tmp/tests/parallel/test_mpi_api.py
pytest_mpi_status=$?
if [ ! $pytest_mpi_status -eq 0 ]; then
echo "::error file=python_wheel.yml::Python MPI plugin test failed with status $pytest_mpi_status."
exit 1
fi
- name: Validate Python snippets in docs
run: |
docker run --rm -dit --name wheel-validation-snippets wheel_validation:local
status_sum=0
for ex in `find docs/sphinx/snippets/python -name '*.py' -not -path '*/platform/*' -not -path '*/nvqc/*' -not -path '*/backends/*'`; do
file="${ex#docs/sphinx/snippets/python/}"
echo "__Snippet ${file}:__" >> /tmp/validation.out
(docker exec wheel-validation-snippets bash -c "python${{ inputs.python_version }} /tmp/snippets/$file" >> /tmp/validation.out) && success=true || success=false
if $success; then
echo "Executed successfully." >> /tmp/validation.out
else
status_sum=$((status_sum+1))
echo "Failed." >> /tmp/validation.out
fi
done
docker stop wheel-validation-snippets
if [ ! $status_sum -eq 0 ]; then
echo "::error::$status_sum snippets failed; see step summary for a list of failures."
exit $status_sum
fi
- name: Validate Python examples
run: |
docker run --rm -dit --name wheel-validation-examples wheel_validation:local
status_sum=0
for ex in `find docs/sphinx/examples/python -name '*.py' -not -path '*/building_kernels.py' -not -path '*/dynamics/*'`; do
file="${ex#docs/sphinx/examples/python/}"
echo "__Example ${file}:__" >> /tmp/validation.out
(docker exec wheel-validation-examples bash -c "python${{ inputs.python_version }} /tmp/examples/$file" >> /tmp/validation.out) && success=true || success=false
if $success; then
echo "Executed successfully." >> /tmp/validation.out
else
status_sum=$((status_sum+1))
echo "Failed." >> /tmp/validation.out
fi
done
docker stop wheel-validation-examples
if [ ! $status_sum -eq 0 ]; then
echo "::error::$status_sum examples failed; see step summary for a list of failures."
exit $status_sum
fi
- name: Create job summary
id: job_summary
if: always() && !cancelled()
run: |
cuda_major=`echo ${{ inputs.cuda_version }} | cut -d . -f1`
platform_id=`echo "${{ inputs.platform }}" | sed 's/linux\///g' | tr -d ' ' | tr ',' -`
artifact_id=python_${platform_id}_cu$cuda_major # changing the artifact name requires updating other workflows
os_id=`echo ${{ matrix.os_image}} | tr ':' _ | tr '/' -`
echo "os_id=$os_id" >> $GITHUB_OUTPUT
echo "artifact_id=$artifact_id" >> $GITHUB_OUTPUT
if [ -f /tmp/validation.out ]; then
if [ -n "${{ matrix.pip_install_flags }}" ]; then
installation_details="(installed with ${{ matrix.pip_install_flags }})"
else
installation_details="(default installation)"
fi
summary=/tmp/summary.txt
echo "## Validation on ${{ matrix.os_image }} $installation_details" > $summary
echo "The validation of the cuda-quantum Python wheel produced the following output:" >> $summary
cat /tmp/validation.out >> $summary
echo "validation_summary=$summary" >> $GITHUB_OUTPUT
fi
- name: Upload job summary
if: steps.job_summary.outputs.validation_summary != '' && matrix.pip_install_flags == ''
uses: actions/upload-artifact@v4
with:
name: ${{ steps.job_summary.outputs.artifact_id }}_${{ steps.job_summary.outputs.os_id }}_py${{ inputs.python_version }}_validation
path: ${{ steps.job_summary.outputs.validation_summary }}
retention-days: 1
if-no-files-found: warn
staging:
name: Staging
needs: [build_wheel, validation]
if: inputs.create_staging_info
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Create build info
id: staging
run: |
wheeldeps_hash=${{ inputs.devdeps_image }}
platform_id=`echo "${{ inputs.platform }}" | sed 's/linux\///g' | tr -d ' ' | tr ',' -`
artifact_name=${{ needs.validation.outputs.artifact_id }}_publishing # changing the artifact name requires updating other workflows
echo "platform_id=$platform_id" >> $GITHUB_OUTPUT
echo "artifact_name=$artifact_name" >> $GITHUB_OUTPUT
info_file="$artifact_name.txt"
echo "info_file=$info_file" >> $GITHUB_OUTPUT
mkdir -p "$(dirname "$info_file")" && rm -rf "$info_file"
echo "source-sha: ${{ github.sha }}" >> "$info_file"
echo "cuda-quantum-wheeldeps-image: $wheeldeps_hash" >> "$info_file"
echo "platform: ${{ inputs.platform }}" >> "$info_file"
echo "cuda-version: ${{ inputs.cuda_version }}" >> "$info_file"
cat .github/workflows/config/gitlab_commits.txt >> "$info_file"
- name: Upload build info
uses: actions/upload-artifact@v4
with:
name: ${{ steps.staging.outputs.artifact_name }} # changing the artifact name requires updating other workflows
path: ${{ steps.staging.outputs.info_file }}
retention-days: 30
if-no-files-found: error