diff --git a/.github/workflows/docker_images.yml b/.github/workflows/docker_images.yml
index 4a6ab0b33f5..f549db071b5 100644
--- a/.github/workflows/docker_images.yml
+++ b/.github/workflows/docker_images.yml
@@ -494,9 +494,7 @@ jobs:
platform_tag=${{ needs.metadata.outputs.platform_tag }}
cuda_major=`echo ${{ inputs.cuda_version }} | cut -d . -f1`
- if [ "$cuda_major" == "11" ]; then
- deprecation_notice="**Note**: Support for CUDA 11 will be removed in future releases. Please update to CUDA 12."
- fi
+ deprecation_notice=""
image_tag=${platform_tag:+$platform_tag-}${cuda_major:+cu${cuda_major}-}
if ${{ github.event.pull_request.number != '' }} || [ -n "$(echo ${{ github.ref_name }} | grep pull-request/)" ]; then
pr_number=`echo ${{ github.ref_name }} | grep -o [0-9]*`
diff --git a/.github/workflows/publishing.yml b/.github/workflows/publishing.yml
index fd23898dead..edd81554fef 100644
--- a/.github/workflows/publishing.yml
+++ b/.github/workflows/publishing.yml
@@ -964,24 +964,15 @@ jobs:
dpkg -i cuda-keyring_1.1-1_all.deb
cuda_version_suffix="$(echo ${{ matrix.cuda_version }} | tr . -)"
apt-get update
- if [ $(echo ${{ matrix.cuda_version }} | cut -d . -f1) -gt 11 ]; then
- apt-get install -y --no-install-recommends \
- cuda-cudart-$cuda_version_suffix \
- cuda-nvrtc-$cuda_version_suffix \
- libnvjitlink-$cuda_version_suffix \
- libcurand-$cuda_version_suffix \
- libcublas-$cuda_version_suffix \
- libcusparse-$cuda_version_suffix \
- libcusolver-$cuda_version_suffix
- else
- apt-get install -y --no-install-recommends \
- cuda-cudart-$cuda_version_suffix \
- cuda-nvrtc-$cuda_version_suffix \
- libcurand-$cuda_version_suffix \
- libcublas-$cuda_version_suffix \
- libcusparse-$cuda_version_suffix \
- libcusolver-$cuda_version_suffix
- fi
+ apt-get install -y --no-install-recommends \
+ cuda-cudart-$cuda_version_suffix \
+ cuda-cudart-dev-$cuda_version_suffix \
+ cuda-nvrtc-$cuda_version_suffix \
+ libnvjitlink-$cuda_version_suffix \
+ libcurand-$cuda_version_suffix \
+ libcublas-$cuda_version_suffix \
+ libcusparse-$cuda_version_suffix \
+ libcusolver-$cuda_version_suffix
- name: Runtime dependencies (dnf)
if: startsWith(matrix.os_image, 'redhat')
@@ -1048,7 +1039,7 @@ jobs:
strategy:
matrix:
platform: ['amd64-gpu-a100', 'arm64-gpu-a100']
- cuda_major: ['', '11', '12']
+ cuda_major: ['', '12', '13']
fail-fast: false
runs-on: linux-${{ matrix.platform }}-latest-1
@@ -1086,20 +1077,22 @@ jobs:
# These simple steps are only expected to work for
# test cases that don't require MPI.
# Create clean python3 environment.
- apt-get update && apt-get install -y --no-install-recommends python3 python3-pip
- mkdir -p /tmp/packages && mv /tmp/wheels/* /tmp/packages && rmdir /tmp/wheels
+ apt-get update && apt-get install -y --no-install-recommends python3 python3-pip python3-venv
- python3 -m pip install pypiserver
- server=`find / -name pypi-server -executable -type f`
- $server run -p 8080 /tmp/packages &
+ # Make a place for local wheels
+ mkdir -p /tmp/packages && mv /tmp/wheels/* /tmp/packages && rmdir /tmp/wheels
+ # Create and activate virtual environment
+ python3 -m venv /opt/cudaq-venv
+ source /opt/cudaq-venv/bin/activate
+
if [ -n "${{ matrix.cuda_major }}" ]; then
pip install cuda-quantum-cu${{ matrix.cuda_major }}==${{ needs.assets.outputs.cudaq_version }} -v \
- --extra-index-url http://localhost:8080
+ --find-links "file:///tmp/packages"
else
pip install --upgrade pip
pip install cudaq==${{ needs.assets.outputs.cudaq_version }} -v \
- --extra-index-url http://localhost:8080 \
+ --find-links "file:///tmp/packages" \
2>&1 | tee /tmp/install.out
if [ -z "$(cat /tmp/install.out | grep -o 'Autodetection succeeded')" ]; then
diff --git a/.github/workflows/python_metapackages.yml b/.github/workflows/python_metapackages.yml
index 9d3ff0249d5..cc434a2f334 100644
--- a/.github/workflows/python_metapackages.yml
+++ b/.github/workflows/python_metapackages.yml
@@ -60,7 +60,7 @@ jobs:
package_name=cudaq
cuda_version_requirement="12.x or 13.x"
cuda_version_conda=12.4.0 # only used as example in the install script
- deprecation_notice="**Note**: Support for CUDA 11 will be removed in future releases. Please update to CUDA 12."
+ deprecation_notice=""
cat python/README.md.in > python/metapackages/README.md
for variable in package_name cuda_version_requirement cuda_version_conda deprecation_notice; do
sed -i "s/.{{[ ]*$variable[ ]*}}/${!variable}/g" python/metapackages/README.md
diff --git a/.licenserc.yaml b/.licenserc.yaml
index 70a23a33914..c981aca94bb 100644
--- a/.licenserc.yaml
+++ b/.licenserc.yaml
@@ -41,6 +41,7 @@ header:
- 'include/cudaq/Optimizer/CodeGen/OptUtils.h'
- 'lib/Optimizer/CodeGen/OptUtils.cpp'
- 'runtime/cudaq/algorithms/optimizers/nlopt/nlopt-src'
+ - 'python/metapackages/MANIFEST.in'
comment: on-failure
diff --git a/docker/build/assets.Dockerfile b/docker/build/assets.Dockerfile
index bba44004ee5..b5a18f4c72d 100644
--- a/docker/build/assets.Dockerfile
+++ b/docker/build/assets.Dockerfile
@@ -164,7 +164,8 @@ RUN source /cuda-quantum/scripts/configure_build.sh && \
## [Python support]
FROM prereqs AS python_build
-ADD "pyproject.toml" /cuda-quantum/pyproject.toml
+# Bring all possible templates into the image, then pick the exact one
+ADD pyproject.toml.cu* /cuda-quantum/
ADD "python" /cuda-quantum/python
ADD "cmake" /cuda-quantum/cmake
ADD "include" /cuda-quantum/include
@@ -186,22 +187,13 @@ RUN dnf install -y --nobest --setopt=install_weak_deps=False ${PYTHON}-devel &&
${PYTHON} -m ensurepip --upgrade && \
${PYTHON} -m pip install numpy build auditwheel patchelf
-RUN cd /cuda-quantum && source scripts/configure_build.sh && \
- if [ "${CUDA_VERSION#12.}" != "${CUDA_VERSION}" ]; then \
- cublas_version=12.0 && \
- cusolver_version=11.4 && \
- cuda_runtime_version=12.0 && \
- cuda_nvrtc_version=12.0 && \
- cupy_version=13.4.1 && \
- sed -i "s/-cu13/-cu12/g" pyproject.toml && \
- sed -i "s/-cuda13/-cuda12/g" pyproject.toml && \
- sed -i -E "s/cupy-cuda[0-9]+x/cupy-cuda12x/g" pyproject.toml && \
- sed -i -E "s/(cupy-cuda[0-9]+x? ~= )[0-9\.]*/\1${cupy_version}/g" pyproject.toml && \
- sed -i -E "s/(nvidia-cublas-cu[0-9]* ~= )[0-9\.]*/\1${cublas_version}/g" pyproject.toml && \
- sed -i -E "s/(nvidia-cusolver-cu[0-9]* ~= )[0-9\.]*/\1${cusolver_version}/g" pyproject.toml && \
- sed -i -E "s/(nvidia-cuda-nvrtc-cu[0-9]* ~= )[0-9\.]*/\1${cuda_nvrtc_version}/g" pyproject.toml && \
- sed -i -E "s/(nvidia-cuda-runtime-cu[0-9]* ~= )[0-9\.]*/\1${cuda_runtime_version}/g" pyproject.toml; \
- fi && \
+RUN cd /cuda-quantum && \
+ . scripts/configure_build.sh && \
+ case "${CUDA_VERSION%%.*}" in \
+ 12) cp pyproject.toml.cu12 pyproject.toml || true ;; \
+ 13) cp pyproject.toml.cu13 pyproject.toml || true ;; \
+ *) echo "Unsupported CUDA_VERSION=${CUDA_VERSION}"; exit 1 ;; \
+ esac && \
# Needed to retrigger the LLVM build, since the MLIR Python bindings
# are not built in the prereqs stage.
rm -rf "${LLVM_INSTALL_PREFIX}" && \
diff --git a/docker/release/cudaq.ext.Dockerfile b/docker/release/cudaq.ext.Dockerfile
index 46fe28fd3d4..38ef5dec59b 100644
--- a/docker/release/cudaq.ext.Dockerfile
+++ b/docker/release/cudaq.ext.Dockerfile
@@ -49,6 +49,9 @@ RUN if [ -x "$(command -v pip)" ]; then \
pip install --no-cache-dir mpi4py~=3.1; \
fi; \
fi
+RUN cuda_version_suffix=$(echo ${CUDA_VERSION} | tr . -) && \
+ pip install nvidia-curand-cu${cuda_version_suffix}
+
# Make sure that apt-get remains updated at the end!;
# If we don't do that, then apt-get will get confused when some CUDA
# components are already installed but not all of them.
diff --git a/docker/release/cudaq.wheel.Dockerfile b/docker/release/cudaq.wheel.Dockerfile
index ea382ac299f..6e6624d6201 100644
--- a/docker/release/cudaq.wheel.Dockerfile
+++ b/docker/release/cudaq.wheel.Dockerfile
@@ -37,23 +37,14 @@ RUN echo "Building MLIR bindings for python${python_version}" && \
LLVM_CMAKE_CACHE=/cmake/caches/LLVM.cmake LLVM_SOURCE=/llvm-project \
bash /scripts/build_llvm.sh -c Release -v
-# Patch the pyproject.toml file to change the CUDA version if needed
-RUN cd cuda-quantum && sed -i "s/README.md.in/README.md/g" pyproject.toml && \
- if [ "${CUDA_VERSION#12.}" != "${CUDA_VERSION}" ]; then \
- cublas_version=12.0 && \
- cusolver_version=11.4 && \
- cuda_runtime_version=12.0 && \
- cuda_nvrtc_version=12.0 && \
- cupy_version=13.4.1 && \
- sed -i "s/-cu13/-cu12/g" pyproject.toml && \
- sed -i "s/-cuda13/-cuda12/g" pyproject.toml && \
- sed -i -E "s/cupy-cuda[0-9]+x/cupy-cuda12x/g" pyproject.toml && \
- sed -i -E "s/(cupy-cuda[0-9]+x? ~= )[0-9\.]*/\1${cupy_version}/g" pyproject.toml && \
- sed -i -E "s/(nvidia-cublas-cu[0-9]* ~= )[0-9\.]*/\1${cublas_version}/g" pyproject.toml && \
- sed -i -E "s/(nvidia-cusolver-cu[0-9]* ~= )[0-9\.]*/\1${cusolver_version}/g" pyproject.toml && \
- sed -i -E "s/(nvidia-cuda-nvrtc-cu[0-9]* ~= )[0-9\.]*/\1${cuda_nvrtc_version}/g" pyproject.toml && \
- sed -i -E "s/(nvidia-cuda-runtime-cu[0-9]* ~= )[0-9\.]*/\1${cuda_runtime_version}/g" pyproject.toml; \
- fi
+# Configure the build based on the CUDA version
+RUN cd /cuda-quantum && \
+ . scripts/configure_build.sh && \
+ case "${CUDA_VERSION%%.*}" in \
+ 12) cp pyproject.toml.cu12 pyproject.toml || true ;; \
+ 13) cp pyproject.toml.cu13 pyproject.toml || true ;; \
+ *) echo "Unsupported CUDA_VERSION=${CUDA_VERSION}"; exit 1 ;; \
+ esac
# Create the README
RUN cd cuda-quantum && cat python/README.md.in > python/README.md && \
diff --git a/docs/sphinx/applications/python/hybrid_quantum_neural_networks.ipynb b/docs/sphinx/applications/python/hybrid_quantum_neural_networks.ipynb
index cc3f054e614..4c08b3e0ae4 100644
--- a/docs/sphinx/applications/python/hybrid_quantum_neural_networks.ipynb
+++ b/docs/sphinx/applications/python/hybrid_quantum_neural_networks.ipynb
@@ -34,7 +34,7 @@
"source": [
"# Install the relevant packages.\n",
"\n",
- "!pip install matplotlib==3.8.4 torch==2.0.1+cu118 torchvision==0.15.2+cu118 scikit-learn==1.4.2 -q --extra-index-url https://download.pytorch.org/whl/cu118"
+ "!pip install matplotlib==3.8.4 torch==2.9.0+cu126 torchvision==0.24.0+cu126 scikit-learn==1.4.2 -q --extra-index-url https://download.pytorch.org/whl/cu126"
]
},
{
diff --git a/docs/sphinx/using/install/data_center_install.rst b/docs/sphinx/using/install/data_center_install.rst
index 941dc5dcf8b..d52d45242c6 100644
--- a/docs/sphinx/using/install/data_center_install.rst
+++ b/docs/sphinx/using/install/data_center_install.rst
@@ -255,8 +255,8 @@ Python-specific tools:
.. note::
- The wheel build by default is configured to depend on CUDA 12. To build a wheel for CUDA 11,
- you need to adjust the dependencies and project name in the `pyproject.toml` file.
+ The wheel build by default is configured to depend on CUDA 13. To build a wheel for CUDA 12,
+ you need to copy the `pyproject.toml.cu12` file as `pyproject.toml`.
From within the folder where you cloned the CUDA-Q repository, run the following
command to build the CUDA-Q Python wheel:
diff --git a/docs/sphinx/using/install/local_installation.rst b/docs/sphinx/using/install/local_installation.rst
index 4aa49182803..3ccd1e96233 100644
--- a/docs/sphinx/using/install/local_installation.rst
+++ b/docs/sphinx/using/install/local_installation.rst
@@ -834,10 +834,10 @@ by running the command
.. note::
Please check if you have an existing installation of the `cuda-quantum`,
- `cudaq-quantum-cu11`, or `cuda-quantum-cu12` package,
+ `cudaq-quantum-cu12`, or `cuda-quantum-cu13` package,
and uninstall it prior to installing `cudaq`. The `cudaq` package supersedes the
`cuda-quantum` package and will install a suitable binary distribution (either
- `cuda-quantum-cu11` or `cuda-quantum-cu12`) for your system. Multiple versions
+ `cuda-quantum-cu12` or `cuda-quantum-cu13`) for your system. Multiple versions
of a CUDA-Q binary distribution will conflict with each other and not work properly.
If you previously installed the CUDA-Q pre-built binaries, you should first uninstall your
@@ -892,9 +892,11 @@ The following table summarizes the required components.
* - NVIDIA GPU with Compute Capability
- 7.5+
* - CUDA
- - 12.x (Driver 525.60.13+), 13.x (Driver 580.65.06+)
-
-Detailed information about supported drivers for different CUDA versions and be found `here `__.
+ - • 12.x (Driver 525.60.13+) – For GPUs that support CUDA Forward Compatibility
+ • 12.6+ (Driver 560.35.05+) – For all GPUs with supported architecture
+ • 13.x (Driver 580.65.06+)
+
+Detailed information about supported drivers for different CUDA versions and be found `here `__. For more information on GPU forward capabilities, please refer to `this page `__.
.. note::
diff --git a/docs/sphinx/using/quick_start.rst b/docs/sphinx/using/quick_start.rst
index 2872daae844..fe7a1fc351c 100644
--- a/docs/sphinx/using/quick_start.rst
+++ b/docs/sphinx/using/quick_start.rst
@@ -42,7 +42,7 @@ Install CUDA-Q
To develop CUDA-Q applications using C++, please make sure you have a C++ toolchain installed
that supports C++20, for example `g++` version 11 or newer.
- Download the `install_cuda_quantum` file for your processor architecture and CUDA version (`_cu11` suffix for CUDA 11 and `_cu12` suffix for CUDA 12)
+ Download the `install_cuda_quantum` file for your processor architecture and CUDA version (`_cu12` suffix for CUDA 12 and `_cu13` suffix for CUDA 13)
from the assets of the respective `GitHub release `__;
that is, the file with the `aarch64` extension for ARM processors, and the one with `x86_64` for, e.g., Intel and AMD processors.
diff --git a/pyproject.toml b/pyproject.toml
deleted file mode 100644
index a2423d5fa7b..00000000000
--- a/pyproject.toml
+++ /dev/null
@@ -1,83 +0,0 @@
-# ============================================================================ #
-# Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. #
-# All rights reserved. #
-# #
-# This source code and the accompanying materials are made available under #
-# the terms of the Apache License 2.0 which accompanies this distribution. #
-# ============================================================================ #
-
-[project]
-name = "cuda-quantum-cu13"
-dynamic = ["version"]
-keywords = [ "cudaq", "cuda-quantum", "cuda", "quantum", "quantum computing", "nvidia", "high-performance computing" ]
-description="Python bindings for the CUDA-Q toolkit for heterogeneous quantum-classical workflows."
-authors = [{name = "NVIDIA Corporation & Affiliates"}]
-maintainers = [{name = "NVIDIA Corporation & Affiliates"}]
-readme = { file="python/README.md.in", content-type = "text/markdown"}
-requires-python = ">=3.10"
-license = { file="LICENSE" }
-dependencies = [
- 'astpretty ~= 3.0',
- 'cuquantum-cu13 == 25.09',
- 'numpy >= 1.24',
- 'scipy >= 1.10.1',
- 'requests >= 2.31',
- 'nvidia-cublas-cu12 ~= 12.9',
- 'nvidia-curand-cu12 ~= 10.3',
- 'nvidia-cusparse-cu12 ~= 12.5',
- 'nvidia-cuda-runtime-cu12 ~= 12.9',
- 'nvidia-cusolver-cu12 ~= 11.7',
- 'nvidia-cuda-nvrtc-cu12 ~= 12.9',
- 'cupy-cuda13x ~= 13.6.0'
-]
-classifiers = [
- 'Intended Audience :: Science/Research',
- 'Intended Audience :: Developers',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.10',
- 'Programming Language :: Python :: 3.11',
- 'Programming Language :: Python :: 3.12',
- 'Programming Language :: Python :: 3.13',
- "Environment :: GPU :: NVIDIA CUDA",
- "Environment :: GPU :: NVIDIA CUDA :: 11",
- "Environment :: GPU :: NVIDIA CUDA :: 12",
- 'Topic :: Software Development',
- 'Topic :: Scientific/Engineering',
-]
-
-[project.urls]
-Homepage = "https://developer.nvidia.com/cuda-q"
-Documentation = "https://nvidia.github.io/cuda-quantum"
-Repository = "https://github.com/NVIDIA/cuda-quantum"
-Releases = "https://nvidia.github.io/cuda-quantum/latest/releases.html"
-
-# We must use h5py<3.11 because 3.11 doesn't include aarch64 Linux wheels.
-# https://github.com/h5py/h5py/issues/2408
-[project.optional-dependencies]
-chemistry = [ "openfermionpyscf==0.5", "h5py<3.11" ]
-visualization = [ "qutip<5" , "matplotlib>=3.5" ]
-# Additional torch-based integrator
-integrators = [ "torchdiffeq" ]
-
-[build-system]
-requires = ["scikit-build-core==0.9.10", "cmake>=3.27,<3.29", "numpy>=1.24", "pytest==8.2.0"]
-build-backend = "scikit_build_core.build"
-
-[tool.scikit-build]
-wheel.packages = ["python/cudaq"]
-wheel.license-files = [ "LICENSE", "NOTICE", "CITATION.cff" ]
-build-dir = "_skbuild"
-metadata.version.provider = "scikit_build_core.metadata.setuptools_scm"
-cmake.minimum-version = "3.27"
-cmake.build-type = "Release"
-cmake.verbose = false
-cmake.args = [
- "-DCUDAQ_ENABLE_PYTHON=TRUE",
- "-DCUDAQ_DISABLE_CPP_FRONTEND=TRUE",
- "-DCUDAQ_DISABLE_TOOLS=TRUE",
- "-DCUDAQ_BUILD_TESTS=TRUE"
-]
-
-[tool.setuptools_scm]
-write_to = "_version.py"
diff --git a/pyproject.toml b/pyproject.toml
new file mode 120000
index 00000000000..aedc7fe21e0
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1 @@
+pyproject.toml.cu13
\ No newline at end of file
diff --git a/pyproject.toml.cu12 b/pyproject.toml.cu12
new file mode 100644
index 00000000000..703330389d5
--- /dev/null
+++ b/pyproject.toml.cu12
@@ -0,0 +1,81 @@
+# ============================================================================ #
+# Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. #
+# All rights reserved. #
+# #
+# This source code and the accompanying materials are made available under #
+# the terms of the Apache License 2.0 which accompanies this distribution. #
+# ============================================================================ #
+
+[project]
+name = "cuda-quantum-cu12"
+dynamic = ["version"]
+keywords = [ "cudaq", "cuda-quantum", "cuda", "quantum", "quantum computing", "nvidia", "high-performance computing" ]
+description="Python bindings for the CUDA-Q toolkit for heterogeneous quantum-classical workflows."
+authors = [{name = "NVIDIA Corporation & Affiliates"}]
+maintainers = [{name = "NVIDIA Corporation & Affiliates"}]
+readme = { file="python/README.md.in", content-type = "text/markdown"}
+requires-python = ">=3.10"
+license = { file="LICENSE" }
+dependencies = [
+ 'astpretty ~= 3.0',
+ 'cuquantum-cu12 == 25.09',
+ 'numpy >= 1.24',
+ 'scipy >= 1.10.1',
+ 'requests >= 2.31',
+ 'nvidia-cublas-cu12 ~= 12.0',
+ 'nvidia-curand-cu12 ~= 10.3',
+ 'nvidia-cusparse-cu12 ~= 12.5',
+ 'nvidia-cuda-runtime-cu12 ~= 12.0',
+ 'nvidia-cusolver-cu12 ~= 11.4',
+ 'nvidia-cuda-nvrtc-cu12 ~= 12.0',
+ 'cupy-cuda12x ~= 13.6.0'
+]
+classifiers = [
+ 'Intended Audience :: Science/Research',
+ 'Intended Audience :: Developers',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.11',
+ 'Programming Language :: Python :: 3.12',
+ 'Programming Language :: Python :: 3.13',
+ "Environment :: GPU :: NVIDIA CUDA",
+ "Environment :: GPU :: NVIDIA CUDA :: 12",
+ 'Topic :: Software Development',
+ 'Topic :: Scientific/Engineering',
+]
+
+[project.urls]
+Homepage = "https://developer.nvidia.com/cuda-q"
+Documentation = "https://nvidia.github.io/cuda-quantum"
+Repository = "https://github.com/NVIDIA/cuda-quantum"
+Releases = "https://nvidia.github.io/cuda-quantum/latest/releases.html"
+
+# We must use h5py<3.11 because 3.11 doesn't include aarch64 Linux wheels.
+# https://github.com/h5py/h5py/issues/2408
+[project.optional-dependencies]
+chemistry = [ "openfermionpyscf==0.5", "h5py<3.11" ]
+visualization = [ "qutip<5" , "matplotlib>=3.5" ]
+# Additional torch-based integrator
+integrators = [ "torchdiffeq" ]
+
+[build-system]
+requires = ["scikit-build-core==0.9.10", "cmake>=3.27,<3.29", "numpy>=1.24", "pytest==8.2.0"]
+build-backend = "scikit_build_core.build"
+
+[tool.scikit-build]
+wheel.packages = ["python/cudaq"]
+wheel.license-files = [ "LICENSE", "NOTICE", "CITATION.cff" ]
+build-dir = "_skbuild"
+metadata.version.provider = "scikit_build_core.metadata.setuptools_scm"
+cmake.minimum-version = "3.27"
+cmake.build-type = "Release"
+cmake.verbose = false
+cmake.args = [
+ "-DCUDAQ_ENABLE_PYTHON=TRUE",
+ "-DCUDAQ_DISABLE_CPP_FRONTEND=TRUE",
+ "-DCUDAQ_DISABLE_TOOLS=TRUE",
+ "-DCUDAQ_BUILD_TESTS=TRUE"
+]
+
+[tool.setuptools_scm]
+write_to = "_version.py"
diff --git a/pyproject.toml.cu13 b/pyproject.toml.cu13
new file mode 100644
index 00000000000..6695c365c88
--- /dev/null
+++ b/pyproject.toml.cu13
@@ -0,0 +1,82 @@
+# ============================================================================ #
+# Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. #
+# All rights reserved. #
+# #
+# This source code and the accompanying materials are made available under #
+# the terms of the Apache License 2.0 which accompanies this distribution. #
+# ============================================================================ #
+
+[project]
+name = "cuda-quantum-cu13"
+dynamic = ["version"]
+keywords = [ "cudaq", "cuda-quantum", "cuda", "quantum", "quantum computing", "nvidia", "high-performance computing" ]
+description="Python bindings for the CUDA-Q toolkit for heterogeneous quantum-classical workflows."
+authors = [{name = "NVIDIA Corporation & Affiliates"}]
+maintainers = [{name = "NVIDIA Corporation & Affiliates"}]
+readme = { file="python/README.md.in", content-type = "text/markdown"}
+requires-python = ">=3.10"
+license = { file="LICENSE" }
+dependencies = [
+ 'astpretty ~= 3.0',
+ 'cuquantum-cu13 == 25.09',
+ 'numpy >= 1.24',
+ 'scipy >= 1.10.1',
+ 'requests >= 2.31',
+ 'nvidia-cublas ~= 13.1',
+ 'nvidia-curand ~= 10.4',
+ 'nvidia-cusparse ~= 12.6',
+ 'nvidia-cuda-runtime ~= 13.0',
+ 'nvidia-cusolver ~= 12.0',
+ 'nvidia-cuda-nvrtc ~= 13.0',
+ 'cupy-cuda13x ~= 13.6.0'
+]
+classifiers = [
+ 'Intended Audience :: Science/Research',
+ 'Intended Audience :: Developers',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.11',
+ 'Programming Language :: Python :: 3.12',
+ 'Programming Language :: Python :: 3.13',
+ "Environment :: GPU :: NVIDIA CUDA",
+ "Environment :: GPU :: NVIDIA CUDA :: 12",
+ "Environment :: GPU :: NVIDIA CUDA :: 13",
+ 'Topic :: Software Development',
+ 'Topic :: Scientific/Engineering',
+]
+
+[project.urls]
+Homepage = "https://developer.nvidia.com/cuda-q"
+Documentation = "https://nvidia.github.io/cuda-quantum"
+Repository = "https://github.com/NVIDIA/cuda-quantum"
+Releases = "https://nvidia.github.io/cuda-quantum/latest/releases.html"
+
+# We must use h5py<3.11 because 3.11 doesn't include aarch64 Linux wheels.
+# https://github.com/h5py/h5py/issues/2408
+[project.optional-dependencies]
+chemistry = [ "openfermionpyscf==0.5", "h5py<3.11" ]
+visualization = [ "qutip<5" , "matplotlib>=3.5" ]
+# Additional torch-based integrator
+integrators = [ "torchdiffeq" ]
+
+[build-system]
+requires = ["scikit-build-core==0.9.10", "cmake>=3.27,<3.29", "numpy>=1.24", "pytest==8.2.0"]
+build-backend = "scikit_build_core.build"
+
+[tool.scikit-build]
+wheel.packages = ["python/cudaq"]
+wheel.license-files = [ "LICENSE", "NOTICE", "CITATION.cff" ]
+build-dir = "_skbuild"
+metadata.version.provider = "scikit_build_core.metadata.setuptools_scm"
+cmake.minimum-version = "3.27"
+cmake.build-type = "Release"
+cmake.verbose = false
+cmake.args = [
+ "-DCUDAQ_ENABLE_PYTHON=TRUE",
+ "-DCUDAQ_DISABLE_CPP_FRONTEND=TRUE",
+ "-DCUDAQ_DISABLE_TOOLS=TRUE",
+ "-DCUDAQ_BUILD_TESTS=TRUE"
+]
+
+[tool.setuptools_scm]
+write_to = "_version.py"
diff --git a/python/README.md.in b/python/README.md.in
index 34493a472ed..d65f7bf50b8 100644
--- a/python/README.md.in
+++ b/python/README.md.in
@@ -40,7 +40,7 @@ ${{ deprecation_notice }}
> **Important:**
> Please check if you have an existing installation of the `cuda-quantum`,
-`cudaq-quantum-cu11`, or `cuda-quantum-cu12` package, and uninstall it prior to
+`cudaq-quantum-cu11`, `cuda-quantum-cu12`, or `cuda-quantum-cu13` package, and uninstall it prior to
installation. Different CUDA-Q binary distributions may conflict with each
other causing issues.
diff --git a/python/cudaq/__init__.py b/python/cudaq/__init__.py
index ced6751ee5a..19df5b6b69d 100644
--- a/python/cudaq/__init__.py
+++ b/python/cudaq/__init__.py
@@ -18,31 +18,79 @@
# LinkedLibraryHolder.
if not "CUDAQ_DYNLIBS" in os.environ and not cuda_major is None:
try:
- custatevec_libs = get_library_path(f"custatevec-cu{cuda_major}")
- custatevec_path = os.path.join(custatevec_libs, "libcustatevec.so.1")
-
- cutensornet_libs = get_library_path(f"cutensornet-cu{cuda_major}")
- cutensornet_path = os.path.join(cutensornet_libs, "libcutensornet.so.2")
-
- cudensitymat_libs = get_library_path(f"cudensitymat-cu{cuda_major}")
- cudensitymat_path = os.path.join(cudensitymat_libs,
- "libcudensitymat.so.0")
-
- cutensor_libs = get_library_path(f"cutensor-cu{cuda_major}")
- cutensor_path = os.path.join(cutensor_libs, "libcutensor.so.2")
-
- curand_libs = get_library_path(f"nvidia-curand-cu{cuda_major}")
- curand_path = os.path.join(curand_libs, "libcurand.so.10")
-
- cudart_libs = get_library_path(f"nvidia-cuda_runtime-cu{cuda_major}")
- cudart_path = os.path.join(cudart_libs, f"libcudart.so.{cuda_major}")
-
- cuda_nvrtc_libs = get_library_path(f"nvidia-cuda_nvrtc-cu{cuda_major}")
- cuda_nvrtc_path = os.path.join(cuda_nvrtc_libs,
- f"libnvrtc.so.{cuda_major}")
-
- os.environ[
- "CUDAQ_DYNLIBS"] = f"{custatevec_path}:{cutensornet_path}:{cudensitymat_path}:{cutensor_path}:{cudart_path}:{curand_path}:{cuda_nvrtc_path}"
+ if cuda_major == 12:
+ custatevec_libs = get_library_path(f"custatevec-cu{cuda_major}")
+ custatevec_path = os.path.join(custatevec_libs,
+ "libcustatevec.so.1")
+
+ cutensornet_libs = get_library_path(f"cutensornet-cu{cuda_major}")
+ cutensornet_path = os.path.join(cutensornet_libs,
+ "libcutensornet.so.2")
+
+ cudensitymat_libs = get_library_path(f"cudensitymat-cu{cuda_major}")
+ cudensitymat_path = os.path.join(cudensitymat_libs,
+ "libcudensitymat.so.0")
+
+ cutensor_libs = get_library_path(f"cutensor-cu{cuda_major}")
+ cutensor_path = os.path.join(cutensor_libs, "libcutensor.so.2")
+
+ curand_libs = get_library_path(f"nvidia-curand-cu{cuda_major}")
+ curand_path = os.path.join(curand_libs, "libcurand.so.10")
+
+ cudart_libs = get_library_path(
+ f"nvidia-cuda_runtime-cu{cuda_major}")
+ cudart_path = os.path.join(cudart_libs,
+ f"libcudart.so.{cuda_major}")
+
+ cuda_nvrtc_libs = get_library_path(
+ f"nvidia-cuda_nvrtc-cu{cuda_major}")
+ cuda_nvrtc_path = os.path.join(cuda_nvrtc_libs,
+ f"libnvrtc.so.{cuda_major}")
+
+ os.environ[
+ "CUDAQ_DYNLIBS"] = f"{custatevec_path}:{cutensornet_path}:{cudensitymat_path}:{cutensor_path}:{cudart_path}:{curand_path}:{cuda_nvrtc_path}"
+ else: # CUDA 13
+ custatevec_libs = get_library_path(f"custatevec-cu{cuda_major}")
+ custatevec_path = os.path.join(custatevec_libs,
+ "libcustatevec.so.1")
+
+ cutensornet_libs = get_library_path(f"cutensornet-cu{cuda_major}")
+ cutensornet_path = os.path.join(cutensornet_libs,
+ "libcutensornet.so.2")
+
+ cudensitymat_libs = get_library_path(f"cudensitymat-cu{cuda_major}")
+ cudensitymat_path = os.path.join(cudensitymat_libs,
+ "libcudensitymat.so.0")
+
+ cutensor_libs = get_library_path(f"cutensor-cu{cuda_major}")
+ cutensor_path = os.path.join(cutensor_libs, "libcutensor.so.2")
+
+ curand_libs = get_library_path(f"nvidia-curand")
+ curand_path = os.path.join(curand_libs, "libcurand.so.10")
+
+ cudart_libs = get_library_path(f"nvidia-cuda_runtime")
+ cudart_path = os.path.join(cudart_libs,
+ f"libcudart.so.{cuda_major}")
+
+ cuda_nvrtc_libs = get_library_path(f"nvidia-cuda_nvrtc")
+ cuda_nvrtc_path = os.path.join(cuda_nvrtc_libs,
+ f"libnvrtc.so.{cuda_major}")
+ cuda_nvrtc_builtin_path = os.path.join(
+ cuda_nvrtc_libs, f"libnvrtc-builtins.so.{cuda_major}.0")
+
+ cublas_libs = get_library_path(f"nvidia-cublas")
+ cublas_path = os.path.join(cublas_libs,
+ f"libcublas.so.{cuda_major}")
+ cublaslt_path = os.path.join(cublas_libs,
+ f"libcublasLt.so.{cuda_major}")
+
+ cusolver_libs = get_library_path(f"nvidia-cusolver")
+ cusolver_path = os.path.join(cusolver_libs, f"libcusolver.so.12")
+ cusolvermg_path = os.path.join(cusolver_libs,
+ f"libcusolverMg.so.12")
+
+ os.environ[
+ "CUDAQ_DYNLIBS"] = f"{cudart_path}:{curand_path}:{cuda_nvrtc_path}:{cuda_nvrtc_builtin_path}:{cublas_path}:{cublaslt_path}:{cusolver_path}:{cusolvermg_path}:{cutensor_path}:{custatevec_path}:{cutensornet_path}:{cudensitymat_path}"
except:
import importlib.util
package_spec = importlib.util.find_spec(f"cuda-quantum-cu{cuda_major}")
diff --git a/python/cudaq/_packages.py b/python/cudaq/_packages.py
index a419b6b6ec4..3147a94d0fb 100644
--- a/python/cudaq/_packages.py
+++ b/python/cudaq/_packages.py
@@ -45,5 +45,8 @@ def get_library_path(package_name):
package_location = _find_package_location_by_root(package_name)
dirname = os.path.join(package_location, subdir, "lib")
+ if not os.path.isdir(dirname):
+ # Check for cu13 layout
+ dirname = os.path.join(package_location, subdir, "cu13/lib")
assert os.path.isdir(dirname)
return dirname
diff --git a/python/metapackages/MANIFEST.in b/python/metapackages/MANIFEST.in
new file mode 100644
index 00000000000..6989b1f7978
--- /dev/null
+++ b/python/metapackages/MANIFEST.in
@@ -0,0 +1,2 @@
+include pyproject.toml.cu12
+include pyproject.toml.cu13
diff --git a/python/metapackages/setup.py b/python/metapackages/setup.py
index 98663cf35a2..2dd18f335a8 100644
--- a/python/metapackages/setup.py
+++ b/python/metapackages/setup.py
@@ -183,25 +183,28 @@ def _infer_best_package() -> str:
"""
# Find the existing wheel installation
installed = []
- for pkg in ['cuda-quantum', 'cuda-quantum-cu11', 'cuda-quantum-cu12']:
+ for pkg in [
+ 'cuda-quantum', 'cuda-quantum-cu11', 'cuda-quantum-cu12',
+ 'cuda-quantum-cu13'
+ ]:
_log(f"Looking for existing installation of {pkg}.")
if _check_package_installed(pkg):
installed.append(pkg)
cuda_version = _get_cuda_version()
if cuda_version is None:
- cudaq_bdist = 'cuda-quantum-cu12'
- elif cuda_version < 11000:
- raise Exception(f'Your CUDA version ({cuda_version}) is too old.')
+ cudaq_bdist = 'cuda-quantum-cu13'
elif cuda_version < 12000:
- cudaq_bdist = 'cuda-quantum-cu11'
- elif cuda_version <= 13000:
+ raise Exception(f'Your CUDA version ({cuda_version}) is too old.')
+ elif cuda_version < 13000:
cudaq_bdist = 'cuda-quantum-cu12'
+ elif cuda_version < 14000:
+ cudaq_bdist = 'cuda-quantum-cu13'
else:
raise Exception(f'Your CUDA version ({cuda_version}) is too new.')
_log(f"Identified {cudaq_bdist} as the best package.")
- # Disallow -cu11 & -cu12 wheels from coexisting
+ # Disallow -cu11 & -cu12 & -cu13 wheels from coexisting
conflicting = ", ".join((pkg for pkg in installed if pkg != cudaq_bdist))
_log(f"Conflicting packages: {conflicting}")
if conflicting != '':
diff --git a/python/runtime/cudaq/algorithms/py_state.cpp b/python/runtime/cudaq/algorithms/py_state.cpp
index a96a056f6a0..966a4fa3c1c 100644
--- a/python/runtime/cudaq/algorithms/py_state.cpp
+++ b/python/runtime/cudaq/algorithms/py_state.cpp
@@ -182,6 +182,58 @@ state pyGetStateLibraryMode(py::object kernel, py::args args) {
});
}
+static py::buffer_info getCupyBufferInfo(py::buffer cupy_buffer) {
+ // Note: cupy 13.5+ arrays will bind (overload resolution) to a py::buffer
+ // type. However, we cannot access the underlying buffer info via a
+ // `.request()` as it will throw unless that is managed memory. Here, we
+ // retrieve and construct buffer_info from the CuPy array interface.
+
+ if (!py::hasattr(cupy_buffer, "__cuda_array_interface__")) {
+ throw std::runtime_error("Buffer is not a CuPy array");
+ }
+
+ py::dict cupy_array_info = cupy_buffer.attr("__cuda_array_interface__");
+ // Ref: https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html
+ // example: {'shape': (2, 2), 'typestr': '();
+ void *dataPtr = (void *)dataInfo[0].cast();
+ const bool readOnly = dataInfo[1].cast();
+ auto shapeTuple = cupy_array_info["shape"].cast();
+ std::vector extents;
+ for (std::size_t i = 0; i < shapeTuple.size(); i++) {
+ extents.push_back(shapeTuple[i].cast());
+ }
+ const std::string typeStr = cupy_array_info["typestr"].cast();
+ if (typeStr != "),
+ py::format_descriptor>::format())
+ : std::make_tuple(
+ sizeof(std::complex),
+ py::format_descriptor>::format());
+
+ std::vector strides(extents.size(), dataTypeSize);
+ for (size_t i = 1; i < extents.size(); ++i)
+ strides[i] = strides[i - 1] * extents[i - 1];
+
+ return py::buffer_info(dataPtr, dataTypeSize, /*itemsize */
+ desc, extents.size(), /* ndim */
+ extents, /* shape */
+ strides, /* strides */
+ readOnly /* readonly */
+ );
+}
+
/// @brief Bind the get_state cudaq function
void bindPyState(py::module &mod, LinkedLibraryHolder &holder) {
py::enum_(mod, "InitialStateType",
@@ -292,8 +344,16 @@ void bindPyState(py::module &mod, LinkedLibraryHolder &holder) {
.def_static(
"from_data",
[&](py::buffer data) {
- // This is by default host data
- auto info = data.request();
+ const bool isHostData =
+ !py::hasattr(data, "__cuda_array_interface__");
+ // Check that the target is GPU-based, i.e., can handle device
+ // pointer.
+ if (!holder.getTarget().config.GpuRequired && !isHostData)
+ throw std::runtime_error(fmt::format(
+ "Current target '{}' does not support CuPy arrays.",
+ holder.getTarget().name));
+
+ auto info = isHostData ? data.request() : getCupyBufferInfo(data);
if (info.format ==
py::format_descriptor>::format()) {
return state::from_data(std::make_pair(
@@ -318,10 +378,20 @@ void bindPyState(py::module &mod, LinkedLibraryHolder &holder) {
"Return a state from data.")
.def_static(
"from_data",
- [](const std::vector &tensors) {
+ [&holder](const std::vector &tensors) {
+ const bool isHostData =
+ tensors.empty() ||
+ !py::hasattr(tensors[0], "__cuda_array_interface__");
+ // Check that the target is GPU-based, i.e., can handle device
+ // pointer.
+ if (!holder.getTarget().config.GpuRequired && !isHostData)
+ throw std::runtime_error(fmt::format(
+ "Current target '{}' does not support CuPy arrays.",
+ holder.getTarget().name));
cudaq::TensorStateData tensorData;
for (auto &tensor : tensors) {
- auto info = tensor.request();
+ auto info =
+ isHostData ? tensor.request() : getCupyBufferInfo(tensor);
const std::vector extents(info.shape.begin(),
info.shape.end());
tensorData.emplace_back(
@@ -387,6 +457,8 @@ void bindPyState(py::module &mod, LinkedLibraryHolder &holder) {
.def_static(
"from_data",
[&holder](py::object opaqueData) {
+ // Note: This overload is no longer needed from cupy 13.5+ onward.
+ // We can remove it in future releases.
// Make sure this is a CuPy array
if (!py::hasattr(opaqueData, "data"))
throw std::runtime_error(
@@ -582,16 +654,26 @@ index pair.
"Compute the overlap between the provided :class:`State`'s.")
.def(
"overlap",
- [](state &self, py::buffer &other) {
+ [&holder](state &self, py::buffer &other) {
if (self.get_num_tensors() != 1)
throw std::runtime_error("overlap NumPy interop only supported "
"for vector and matrix state data.");
- py::buffer_info info = other.request();
+ const bool isHostData =
+ !py::hasattr(other, "__cuda_array_interface__");
+ // Check that the target is GPU-based, i.e., can handle device
+ // pointer.
+ if (!holder.getTarget().config.GpuRequired && !isHostData)
+ throw std::runtime_error(fmt::format(
+ "Current target '{}' does not support CuPy arrays.",
+ holder.getTarget().name));
+ py::buffer_info info =
+ isHostData ? other.request() : getCupyBufferInfo(other);
if (info.shape.size() > 2)
- throw std::runtime_error("overlap NumPy interop only supported "
- "for vector and matrix state data.");
+ throw std::runtime_error(
+ "overlap NumPy/CuPy interop only supported "
+ "for vector and matrix state data.");
// Check that the shapes are compatible
std::size_t otherNumElements = 1;
@@ -638,6 +720,8 @@ index pair.
.def(
"overlap",
[](state &self, py::object other) {
+ // Note: This overload is no longer needed from cupy 13.5+ onward.
+ // We can remove it in future releases.
// Make sure this is a CuPy array
if (!py::hasattr(other, "data"))
throw std::runtime_error(
diff --git a/python/utils/LinkedLibraryHolder.cpp b/python/utils/LinkedLibraryHolder.cpp
index f09b543f6ae..4fcb3ee5449 100644
--- a/python/utils/LinkedLibraryHolder.cpp
+++ b/python/utils/LinkedLibraryHolder.cpp
@@ -216,9 +216,16 @@ LinkedLibraryHolder::LinkedLibraryHolder() {
}
// Load all the defaults
- for (auto &p : libPaths)
- libHandles.emplace(p.string(),
- dlopen(p.string().c_str(), RTLD_GLOBAL | RTLD_NOW));
+ for (auto &p : libPaths) {
+ void *libHandle = dlopen(p.string().c_str(), RTLD_GLOBAL | RTLD_NOW);
+ libHandles.emplace(p.string(), libHandle);
+
+ if (!libHandle) {
+ char *error_msg = dlerror();
+ CUDAQ_INFO("Failed to load '{}': ERROR '{}'", p.string(),
+ (error_msg ? std::string(error_msg) : "unknown."));
+ }
+ }
// directory_iterator ordering is unspecified, so sort it to make it
// repeatable and consistent.
diff --git a/scripts/build_cudaq.sh b/scripts/build_cudaq.sh
index ccfdc4b789d..78e3a02f9b3 100755
--- a/scripts/build_cudaq.sh
+++ b/scripts/build_cudaq.sh
@@ -117,11 +117,11 @@ cuda_driver=${CUDACXX:-${CUDA_HOME:-/usr/local/cuda}/bin/nvcc}
cuda_version=`"$cuda_driver" --version 2>/dev/null | grep -o 'release [0-9]*\.[0-9]*' | cut -d ' ' -f 2`
cuda_major=`echo $cuda_version | cut -d '.' -f 1`
cuda_minor=`echo $cuda_version | cut -d '.' -f 2`
-if [ "$cuda_version" = "" ] || [ "$cuda_major" -lt "11" ] || ([ "$cuda_minor" -lt "8" ] && [ "$cuda_major" -eq "11" ]); then
- echo "CUDA version requirement not satisfied (required: >= 11.8, got: $cuda_version)."
+if [ "$cuda_version" = "" ] || [ "$cuda_major" -lt "12" ]; then
+ echo "CUDA version requirement not satisfied (required: >= 12.0, got: $cuda_version)."
echo "GPU-accelerated components will be omitted from the build."
unset cuda_driver
-else
+else
echo "CUDA version $cuda_version detected."
if [ -z "$CUQUANTUM_INSTALL_PREFIX" ] && [ -x "$(command -v pip)" ] && [ -n "$(pip list | grep -o cuquantum-python-cu$cuda_major)" ]; then
CUQUANTUM_INSTALL_PREFIX="$(pip show cuquantum-python-cu$cuda_major | sed -nE 's/Location: (.*)$/\1/p')/cuquantum"
diff --git a/scripts/build_docs.sh b/scripts/build_docs.sh
index 83acb0e776d..b6df224ecbd 100644
--- a/scripts/build_docs.sh
+++ b/scripts/build_docs.sh
@@ -146,7 +146,7 @@ echo "Creating README.md for cudaq package"
package_name=cudaq
cuda_version_requirement="12.x or 13.x"
cuda_version_conda=12.4.0 # only used as example in the install script
-deprecation_notice="**Note**: Support for CUDA 11 will be removed in future releases. Please update to CUDA 12."
+deprecation_notice=""
cat "$repo_root/python/README.md.in" > "$repo_root/python/README.md"
for variable in package_name cuda_version_requirement cuda_version_conda deprecation_notice; do
sed -i "s/.{{[ ]*$variable[ ]*}}/${!variable}/g" "$repo_root/python/README.md"