Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .github/workflows/all_libs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,10 @@ jobs:
LD_LIBRARY_PATH: ${{ env.MPI_PATH }}/lib:${{ env.LD_LIBRARY_PATH }}
shell: bash
run: |
pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} torch lightning ml_collections mpi4py transformers quimb opt_einsum torch nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09
# Install the correct torch first.
cuda_no_dot=$(echo ${{ matrix.cuda_version }} | sed 's/\.//')
pip install torch==2.9.0 --index-url https://download.pytorch.org/whl/cu${cuda_no_dot}
pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} lightning ml_collections mpi4py transformers quimb opt_einsum nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09
# The following tests are needed for docs/sphinx/examples/qec/python/tensor_network_decoder.py.
if [ "$(uname -m)" == "x86_64" ]; then
# Stim is not currently available on manylinux ARM wheels, so only
Expand Down
48 changes: 33 additions & 15 deletions .github/workflows/all_libs_release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,11 @@ on:
assets_repo:
type: string
description: Retrieve assets from a draft release from this repo (e.g. NVIDIA/cudaqx)
default: 'NVIDIA/cudaqx'
required: false
assets_tag:
type: string
description: Retrieve assets from a draft release with this tag (e.g. installed_files-1)
description: Retrieve assets from a draft release with this tag (e.g. docker-files-123)
required: false

jobs:
Expand All @@ -26,19 +27,29 @@ jobs:
{ arch: arm64, gpu: a100 },
{ arch: amd64, gpu: a100 },
]
cuda_version: ['12.6']
cuda_version: ['12.6', '13.0']
runs-on: linux-${{ matrix.runner.arch }}-gpu-${{ matrix.runner.gpu }}-latest-1
container:
image: ${{ format('ghcr.io/nvidia/cudaqx-dev:{0}-{1}', inputs.release-number, matrix.runner.arch) }}
image: >-
${{
inputs.release-number == '0.0.0'
&& format('ghcr.io/nvidia/cudaqx-dev:{0}-{1}-cu{2}', 'latest', matrix.runner.arch, matrix.cuda_version)
|| format('ghcr.io/nvidia/cudaqx-dev:{0}-{1}', inputs.release-number, matrix.runner.arch)
}}
env:
NVIDIA_VISIBLE_DEVICES: ${{ env.NVIDIA_VISIBLE_DEVICES }}
permissions: write-all
steps:
- name: Install dependencies
run: |
apt update && apt install -y --no-install-recommends zip unzip patchelf git-lfs

- name: Checkout repository
uses: actions/checkout@v4
with:
ref: releases/v${{ inputs.release-number }}
ref: ${{ inputs.release-number == '0.0.0' && 'main' || format('releases/v{0}', inputs.release-number) }}
set-safe-directory: true
lfs: true # download assets file(s) for TRT tests

- name: Set git safe directory
run: git config --global --add safe.directory $GITHUB_WORKSPACE
Expand All @@ -52,10 +63,6 @@ jobs:
echo "Setting CUDAQX_QEC_VERSION=${{ inputs.release-number }}" >> $GITHUB_STEP_SUMMARY
echo "Setting CUDAQX_SOLVERS_VERSION=${{ inputs.release-number }}" >> $GITHUB_STEP_SUMMARY

- name: Install dependencies
run: |
apt update && apt install -y --no-install-recommends zip unzip patchelf

- name: Fetch assets and set QEC_EXTERNAL_DECODERS
env:
GH_TOKEN: ${{ github.token }}
Expand All @@ -71,8 +78,8 @@ jobs:
gh release view -R ${{ inputs.assets_repo }} ${{ inputs.assets_tag }} >> $GITHUB_STEP_SUMMARY
# Extract the decoder that needs to be embedded in the release
mkdir -p tmp
unzip -d tmp installed_files-${{ matrix.runner.arch }}.zip
echo "QEC_EXTERNAL_DECODERS=$(pwd)/tmp/lib/decoder-plugins/libcudaq-qec-nv-qldpc-decoder.so" >> $GITHUB_ENV
tar -C tmp -xzvf nv-qldpc-decoder-${{ matrix.runner.arch }}_ubuntu24.04_cuda${{ matrix.cuda_version }}_release.tar.gz
echo "QEC_EXTERNAL_DECODERS=$(pwd)/tmp/libcudaq-qec-nv-qldpc-decoder.so" >> $GITHUB_ENV
fi
shell: bash

Expand All @@ -97,15 +104,15 @@ jobs:

- name: Save build artifacts
run: |
cmake --build ${{ steps.build.outputs.build-dir }} --target zip_installed_files
cmake --build ${{ steps.build.outputs.build-dir }} --target zip_installed_files --parallel
cd ${{ steps.build.outputs.build-dir }}
mv installed_files.zip installed_files-${{ matrix.runner.arch }}-cu${{ matrix.cuda_version}}.zip
mv installed_files.zip installed_files-${{ matrix.runner.arch }}-cu${{ matrix.cuda_version }}.zip

- name: Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: installed_files-${{ matrix.runner.arch }}-cu${{ matrix.cuda_version}}
path: ${{ steps.build.outputs.build-dir }}/installed_files-${{ matrix.runner.arch }}-cu${{ matrix.cuda_version}}.zip
name: installed_files-${{ matrix.runner.arch }}-cu${{ matrix.cuda_version }}
path: ${{ steps.build.outputs.build-dir }}/installed_files-${{ matrix.runner.arch }}-cu${{ matrix.cuda_version }}.zip

# ========================================================================
# Run tests
Expand All @@ -121,7 +128,18 @@ jobs:
- name: Install python requirements
env:
LD_LIBRARY_PATH: ${{ env.MPI_PATH }}/lib:${{ env.LD_LIBRARY_PATH }}
run: pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} torch lightning ml_collections mpi4py transformers quimb opt_einsum torch
shell: bash
run: |
# Install the correct torch first.
cuda_no_dot=$(echo ${{ matrix.cuda_version }} | sed 's/\.//')
pip install torch==2.9.0 --index-url https://download.pytorch.org/whl/cu${cuda_no_dot}
pip install numpy pytest cupy-cuda${{ steps.config.outputs.cuda_major }}x cuquantum-cu${{ steps.config.outputs.cuda_major }} lightning ml_collections mpi4py transformers quimb opt_einsum nvidia-cublas cuquantum-python-cu${{ steps.config.outputs.cuda_major }}==25.09
# The following tests are needed for docs/sphinx/examples/qec/python/tensor_network_decoder.py.
if [ "$(uname -m)" == "x86_64" ]; then
# Stim is not currently available on manylinux ARM wheels, so only
# install for x86_64.
pip install stim beliefmatching
fi

- name: Run Python tests
env:
Expand Down
10 changes: 4 additions & 6 deletions .github/workflows/build_wheels.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,11 @@ on:
assets_repo:
type: string
description: Retrieve assets from a draft release from this repo (e.g. NVIDIA/cudaqx)
default: 'NVIDIA/cudaqx'
required: false
assets_tag:
type: string
description: Retrieve assets from a draft release with this tag (e.g. installed_files-1)
description: Retrieve assets from a draft release with this tag (e.g. wheels-123)
required: false
artifacts_from_run:
type: string
Expand Down Expand Up @@ -147,16 +148,13 @@ jobs:
.github/workflows/scripts/build_cudaq.sh --python-version ${{ matrix.python }}

- name: Build CUDA-QX wheels
env:
SUFFIX: ${{ matrix.platform == 'amd64' && 'x86_64' || 'aarch64' }}
shell: bash
run: |
if [[ -n "${{ inputs.assets_repo }}" ]] && [[ -n "${{ inputs.assets_tag }}" ]]; then
# Extract the decoder that needs to be embedded in the wheel
mkdir -p tmp
PYVER=$(echo ${{ matrix.python }} | tr -d '.')
unzip -d tmp cudaq_qec-*-cp${PYVER}-cp${PYVER}-manylinux*${SUFFIX}*.whl
export QEC_EXTERNAL_DECODERS=$(pwd)/tmp/cudaq_qec/lib/decoder-plugins/libcudaq-qec-nv-qldpc-decoder.so
tar -C tmp -xzvf nv-qldpc-decoder-${{ matrix.platform }}_ubuntu24.04_cuda${{ matrix.cuda_version }}_py${{ matrix.python }}_release.tar.gz
export QEC_EXTERNAL_DECODERS=$(pwd)/tmp/libcudaq-qec-nv-qldpc-decoder.so
fi
# This is needed to allow the "git rev-parse" commands in the build
# scripts to work.
Expand Down
2 changes: 1 addition & 1 deletion libs/qec/pyproject.toml.cu12
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ maintainers = [{name = "NVIDIA Corporation & Affiliates"}]
requires-python = ">=3.11"
readme = "README.md"
dependencies = [
'cuda-quantum-cu12 >= 0.12',
'cuda-quantum-cu12 >= 0.13',
]
classifiers = [
'Intended Audience :: Science/Research',
Expand Down
2 changes: 1 addition & 1 deletion libs/qec/pyproject.toml.cu13
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ maintainers = [{name = "NVIDIA Corporation & Affiliates"}]
requires-python = ">=3.11"
readme = "README.md"
dependencies = [
'cuda-quantum-cu13 >= 0.12',
'cuda-quantum-cu13 >= 0.13',
]
classifiers = [
'Intended Audience :: Science/Research',
Expand Down
18 changes: 18 additions & 0 deletions libs/qec/python/tests/test_decoding_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,21 @@

# nv_qldpc_decoder_config tests


def is_nv_qldpc_decoder_available():
"""
Helper function to check if the NV-QLDPC decoder is available.
"""
try:
H_list = [[1, 0, 0, 1, 0, 1, 1], [0, 1, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 1, 1, 1]]
H_np = np.array(H_list, dtype=np.uint8)
nv_dec_gpu_and_cpu = qec.get_decoder("nv-qldpc-decoder", H_np)
return True
except Exception as e:
return False


FIELDS = {
"use_sparsity": (bool, True, False),
"error_rate": (float, 1e-3, 5e-2),
Expand Down Expand Up @@ -280,6 +295,9 @@ def test_configure_decoders_from_str_smoke():
decoder_config.H_sparse = [1, 2, 3, -1, 6, 7, 8, -1, -1]
decoder_config.set_decoder_custom_args(nv)
yaml_str = decoder_config.to_yaml_str()
# Do not instantiate the decoder if it is not available.
if not is_nv_qldpc_decoder_available():
return
status = qec.configure_decoders_from_str(yaml_str)
assert isinstance(status, int)
qec.finalize_decoders()
Expand Down
2 changes: 1 addition & 1 deletion libs/solvers/pyproject.toml.cu12
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ maintainers = [{name = "NVIDIA Corporation & Affiliates"}]
requires-python = ">=3.11"
readme = "README.md"
dependencies = [
'cuda-quantum-cu12 >= 0.12',
'cuda-quantum-cu12 >= 0.13',
'fastapi',
'networkx',
'pyscf',
Expand Down
2 changes: 1 addition & 1 deletion libs/solvers/pyproject.toml.cu13
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ maintainers = [{name = "NVIDIA Corporation & Affiliates"}]
requires-python = ">=3.11"
readme = "README.md"
dependencies = [
'cuda-quantum-cu13 >= 0.12',
'cuda-quantum-cu13 >= 0.13',
'fastapi',
'networkx',
'pyscf',
Expand Down