diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..1bcef5b8d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Normalize all text files to LF in the repo and working copy. +* text=auto eol=lf diff --git a/.github/scripts/check_license_headers.py b/.github/scripts/check_license_headers.py index 936c3227d..30f623f4d 100644 --- a/.github/scripts/check_license_headers.py +++ b/.github/scripts/check_license_headers.py @@ -16,6 +16,10 @@ from pathlib import Path from typing import List, Optional, Tuple +# Ensure emoji and other Unicode characters can be printed on Windows (cp1252) +if hasattr(sys.stdout, "reconfigure"): + sys.stdout.reconfigure(encoding="utf-8") + # Expected license headers for different file types CPP_LICENSE_PATTERNS = [ # Full MIT license header - standard C-style comment with line breaks diff --git a/.gitignore b/.gitignore index e808604fc..6f41d8522 100644 --- a/.gitignore +++ b/.gitignore @@ -17,11 +17,13 @@ dist/ downloads/ eggs/ .eggs/ +install/ lib/ lib64/ parts/ sdist/ var/ +vcpkg_installed/ wheels/ share/python-wheels/ *.egg-info/ diff --git a/.pipelines/pip-scripts/windows-build-clang-cmake.ps1 b/.pipelines/pip-scripts/windows-build-clang-cmake.ps1 new file mode 100644 index 000000000..fc0622988 --- /dev/null +++ b/.pipelines/pip-scripts/windows-build-clang-cmake.ps1 @@ -0,0 +1,417 @@ +# windows-build-clang-cmake.ps1 +# Local script to build and test the QDK Chemistry Python package on Windows using clang-cl and CMake. +# Run from the repo root in an elevated PowerShell (admin) if VS Build Tools need installing. +# +# Usage: +# .\windows-build-clang-cmake.ps1 +# +# This will run a full build. It will check for prerequisites (VS Build Tools with C++ and Clang components, vcpkg, ud) +# and dependencies, and install them if missing. Then it will configure, build, and test the C++ library, install it, +# and finally build and test the Python package. By default, it uses static linking for dependencies (no DLLs). +# +# Optional switches: +# -DynamicDeps # Use dynamic linking for dependencies (DLLs) instead of static. +# # This requires copying DLLs to the Python package folder. +# -SkipPrereqs # Skip prerequisite installation (VS Build tools, vcpkg, etc) +# -SkipCpp # Skip C++ build, only do pip install step (assumes C++ library is already built and installed) +# -SkipConfigure # Skip CMake configure, incremental build only +# -SkipPython # Skip Python build, only do C++ +# -SkipTests # Skip test runs + +param( + [switch]$DynamicDeps, + [switch]$SkipPrereqs, + [switch]$SkipCpp, + [switch]$SkipConfigure, + [switch]$SkipPython, + [switch]$SkipTests +) + +$ErrorActionPreference = "Stop" +$RepoRoot = Get-Location +if (-not (Test-Path "$RepoRoot\cpp\CMakeLists.txt")) { + Write-Error "This script must be run from the repository root." + exit 1 +} +$BuildDir = "$RepoRoot\cpp\build" +$InstallDir = "$RepoRoot\install" +$VcpkgInstalledDir = "$RepoRoot\vcpkg_installed" +# vcpkg triplets: https://learn.microsoft.com/en-us/vcpkg/users/platforms/windows +# Using dynamic (DLL) dependencies requires copying the corresponding DLL files to qdk-chemistry's Python package +# installation folder. Else, Windows won't find them at runtime and the Python package will fail to import. +# This is because Windows does not have a system-wide DLL search path configuration like Linux's ldconfig. +if ($DynamicDeps) { + $VcpkgTriplet = "x64-windows" +} else { + $VcpkgTriplet = "x64-windows-static-md" +} +$QDK_UARCH = "x86-64-v3" + +$linkMode = if ($DynamicDeps) { "dynamic" } else { "static" } +Write-Host "============================================" -ForegroundColor Cyan +Write-Host " QDK Chemistry - Windows Local Build Test " -ForegroundColor Cyan +Write-Host "============================================" -ForegroundColor Cyan +Write-Host "Repo root: $RepoRoot" +Write-Host "Triplet: $VcpkgTriplet ($linkMode)" +Write-Host "" + +# -------------------------------------------------------------------------- +# Helper: ensure a command exists +# -------------------------------------------------------------------------- +function Assert-Command($Name) { + if (-not (Get-Command $Name -ErrorAction SilentlyContinue)) { + Write-Error "$Name not found in PATH. Please install it first." + exit 1 + } +} + +# ========================================================================== +# STEP 0 - Prerequisites +# ========================================================================== +if (-not $SkipPrereqs) { + Write-Host "" + Write-Host "=== Step 0: Checking / installing prerequisites ===" -ForegroundColor Yellow + + # --- 0a. VS Build Tools with clang-cl --- + $vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" + $clangCl = $null + + # Search existing VS installations for clang-cl (-products * includes BuildTools) + if (Test-Path $vswhere) { + $vsPath = & $vswhere -latest -products * -property installationPath 2>$null + if ($vsPath) { + $candidates = @( + "$vsPath\VC\Tools\Llvm\x64\bin\clang-cl.exe", + "$vsPath\VC\Tools\Llvm\bin\clang-cl.exe" + ) + foreach ($c in $candidates) { + if (Test-Path $c) { $clangCl = $c; break } + } + } + } + + if (-not $clangCl) { + Write-Host "clang-cl not found. Installing VS Build Tools with C++ and Clang components..." -ForegroundColor Magenta + Write-Host "This requires an elevated (admin) PowerShell and will take several minutes." + Write-Host "" + + $installerUrl = "https://aka.ms/vs/17/release/vs_BuildTools.exe" + $installerPath = "$env:TEMP\vs_BuildTools.exe" + + if (-not (Test-Path $installerPath)) { + Write-Host "Downloading VS Build Tools installer..." + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath -UseBasicParsing + } + + Write-Host "Running VS Build Tools installer (this may take 10-20 minutes)..." + $installArgs = @( + "--quiet", "--wait", "--norestart", + "--add", "Microsoft.VisualStudio.Workload.VCTools", + "--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang", + "--add", "Microsoft.VisualStudio.Component.VC.Llvm.ClangToolset", + "--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", + "--add", "Microsoft.VisualStudio.Component.Windows11SDK.26100", + "--includeRecommended" + ) + $proc = Start-Process -FilePath $installerPath -ArgumentList $installArgs -Wait -PassThru + if ($proc.ExitCode -ne 0 -and $proc.ExitCode -ne 3010) { + Write-Error "VS Build Tools installation failed with exit code $($proc.ExitCode)" + exit 1 + } + Write-Host "VS Build Tools installed successfully." -ForegroundColor Green + + # Re-search for clang-cl + $vsPath = & $vswhere -latest -products * -property installationPath 2>$null + if ($vsPath) { + $candidates = @( + "$vsPath\VC\Tools\Llvm\x64\bin\clang-cl.exe", + "$vsPath\VC\Tools\Llvm\bin\clang-cl.exe" + ) + foreach ($c in $candidates) { + if (Test-Path $c) { $clangCl = $c; break } + } + } + + if (-not $clangCl) { + Write-Error "clang-cl still not found after installing VS Build Tools." + exit 1 + } + } + + $clangDir = Split-Path $clangCl + Write-Host "Using clang-cl: $clangCl" -ForegroundColor Green + & $clangCl --version + + # Add clang-cl to PATH for this session + $env:PATH = "$clangDir;$env:PATH" + + # --- 0b. Set up MSVC environment (vcvarsall) --- + Write-Host "" + Write-Host "Setting up MSVC developer environment..." + $vsPath = & $vswhere -latest -products * -property installationPath + $vcvarsall = "$vsPath\VC\Auxiliary\Build\vcvarsall.bat" + if (-not (Test-Path $vcvarsall)) { + Write-Error "vcvarsall.bat not found at $vcvarsall" + exit 1 + } + + # Capture environment from vcvarsall + $envBefore = @{} + Get-ChildItem env: | ForEach-Object { $envBefore[$_.Name] = $_.Value } + + $tempFile = [System.IO.Path]::GetTempFileName() + cmd /c "`"$vcvarsall`" x64 && set > `"$tempFile`"" + Get-Content $tempFile | ForEach-Object { + if ($_ -match "^([^=]+)=(.*)$") { + $name = $matches[1] + $value = $matches[2] + if ($envBefore[$name] -ne $value) { + [System.Environment]::SetEnvironmentVariable($name, $value, "Process") + } + } + } + Remove-Item $tempFile + Write-Host "MSVC developer environment configured." -ForegroundColor Green + + # Re-add clang-cl to PATH (vcvarsall may have reset it) + $env:PATH = "$clangDir;$env:PATH" + + # --- 0c. vcpkg --- + # Check VS-bundled vcpkg first, then VCPKG_INSTALLATION_ROOT, then bootstrap + $vcpkgRoot = $null + $vsVcpkg = "$vsPath\VC\vcpkg\vcpkg.exe" + if (Test-Path $vsVcpkg) { + $vcpkgRoot = Split-Path $vsVcpkg + } elseif ($env:VCPKG_INSTALLATION_ROOT -and (Test-Path "$($env:VCPKG_INSTALLATION_ROOT)\vcpkg.exe")) { + $vcpkgRoot = $env:VCPKG_INSTALLATION_ROOT + } else { + $vcpkgRoot = "$RepoRoot\vcpkg-tool" + if (-not (Test-Path "$vcpkgRoot\vcpkg.exe")) { + Write-Host "" + Write-Host "Bootstrapping vcpkg..." + git clone https://github.com/microsoft/vcpkg.git "$vcpkgRoot" + & "$vcpkgRoot\bootstrap-vcpkg.bat" -disableMetrics + } + } + Write-Host "Using vcpkg: $vcpkgRoot\vcpkg.exe" -ForegroundColor Green + + # --- 0d. Install vcpkg packages --- + Write-Host "" + Write-Host "Installing vcpkg dependencies with triplet '$VcpkgTriplet' (this may take a while on first run)..." + & "$vcpkgRoot\vcpkg.exe" install ` + --triplet $VcpkgTriplet ` + --x-manifest-root="$RepoRoot" ` + --x-install-root="$VcpkgInstalledDir" ` + --overlay-ports="$RepoRoot\vcpkg-overlay\ports" + if ($LASTEXITCODE -ne 0) { + Write-Error "vcpkg install failed" + exit 1 + } + Write-Host "vcpkg dependencies installed." -ForegroundColor Green + + # --- 0e. Set CMake/vcpkg environment variables --- + $toolchainFile = "$vcpkgRoot\scripts\buildsystems\vcpkg.cmake" + $env:CMAKE_TOOLCHAIN_FILE = $toolchainFile + $env:VCPKG_TARGET_TRIPLET = $VcpkgTriplet + $env:VCPKG_INSTALLED_DIR = $VcpkgInstalledDir + $env:CMAKE_PREFIX_PATH = "$VcpkgInstalledDir\$VcpkgTriplet" + # Dynamic triplets produce runtime DLLs that must be on PATH for the build tools + if ($DynamicDeps) { + $env:PATH = "$VcpkgInstalledDir\$VcpkgTriplet\bin;$VcpkgInstalledDir\$VcpkgTriplet\debug\bin;$env:PATH" + } + + # --- 0f. uv (Python package manager) --- + if (-not (Get-Command uv -ErrorAction SilentlyContinue)) { + Write-Host "uv not found. Installing..." -ForegroundColor Magenta + Invoke-RestMethod https://astral.sh/uv/install.ps1 | Invoke-Expression + if (-not (Get-Command uv -ErrorAction SilentlyContinue)) { + Write-Error "uv installation failed. Install manually: https://docs.astral.sh/uv/getting-started/installation/" + exit 1 + } + } + Write-Host "Using uv: $(Get-Command uv | Select-Object -ExpandProperty Source) ($(uv --version))" -ForegroundColor Green + +} else { + Write-Host "=== Skipping prerequisites (reusing previous environment) ===" -ForegroundColor DarkGray + + # Even when skipping, we need the environment set up + $vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" + $vsPath = & $vswhere -latest -products * -property installationPath 2>$null + $candidates = @( + "$vsPath\VC\Tools\Llvm\x64\bin\clang-cl.exe", + "$vsPath\VC\Tools\Llvm\bin\clang-cl.exe" + ) + foreach ($c in $candidates) { + if (Test-Path $c) { + $clangDir = Split-Path $c + $env:PATH = "$clangDir;$env:PATH" + break + } + } + Assert-Command "clang-cl" + Assert-Command "cmake" + Assert-Command "ninja" + + # Set up MSVC env + $vcvarsall = "$vsPath\VC\Auxiliary\Build\vcvarsall.bat" + if (Test-Path $vcvarsall) { + $tempFile = [System.IO.Path]::GetTempFileName() + cmd /c "`"$vcvarsall`" x64 && set > `"$tempFile`"" + Get-Content $tempFile | ForEach-Object { + if ($_ -match "^([^=]+)=(.*)$") { + [System.Environment]::SetEnvironmentVariable($matches[1], $matches[2], "Process") + } + } + Remove-Item $tempFile + # Re-add clang-cl to PATH + $env:PATH = "$clangDir;$env:PATH" + } + + # vcpkg env - check VS-bundled vcpkg first + $vcpkgRoot = $null + $vsVcpkg = "$vsPath\VC\vcpkg\vcpkg.exe" + if (Test-Path $vsVcpkg) { + $vcpkgRoot = Split-Path $vsVcpkg + } elseif ($env:VCPKG_INSTALLATION_ROOT -and (Test-Path "$($env:VCPKG_INSTALLATION_ROOT)\vcpkg.exe")) { + $vcpkgRoot = $env:VCPKG_INSTALLATION_ROOT + } else { + $vcpkgRoot = "$RepoRoot\vcpkg-tool" + } + $env:CMAKE_TOOLCHAIN_FILE = "$vcpkgRoot\scripts\buildsystems\vcpkg.cmake" + $env:VCPKG_TARGET_TRIPLET = $VcpkgTriplet + $env:VCPKG_INSTALLED_DIR = $VcpkgInstalledDir + $env:CMAKE_PREFIX_PATH = "$VcpkgInstalledDir\$VcpkgTriplet" + # Dynamic triplets produce runtime DLLs that must be on PATH for the build tools + if ($DynamicDeps) { + $env:PATH = "$VcpkgInstalledDir\$VcpkgTriplet\bin;$VcpkgInstalledDir\$VcpkgTriplet\debug\bin;$env:PATH" + } +} + +# Verify tools +Write-Host "" +Write-Host "=== Environment summary ===" -ForegroundColor Yellow +Write-Host " clang-cl: $(Get-Command clang-cl | Select-Object -ExpandProperty Source)" +Write-Host " cmake: $(Get-Command cmake | Select-Object -ExpandProperty Source)" +Write-Host " ninja: $(Get-Command ninja | Select-Object -ExpandProperty Source)" +Write-Host " python: $(Get-Command python | Select-Object -ExpandProperty Source) ($(python --version 2>&1))" +Write-Host " uv: $(Get-Command uv | Select-Object -ExpandProperty Source) ($(uv --version 2>&1))" +Write-Host " TOOLCHAIN_FILE: $env:CMAKE_TOOLCHAIN_FILE" +Write-Host " VCPKG_TRIPLET: $env:VCPKG_TARGET_TRIPLET" +Write-Host " VCPKG_INSTALLED: $env:VCPKG_INSTALLED_DIR" +Write-Host "" + +# ========================================================================== +# STEP 1 - C++ Build +# ========================================================================== +if (-not $SkipCpp) { + if (-not $SkipConfigure) { + Write-Host "=== Step 1: Configure C++ build ===" -ForegroundColor Yellow + cmake -S cpp -B "$BuildDir" ` + -GNinja ` + -DQDK_UARCH="$QDK_UARCH" ` + -DQDK_CHEMISTRY_ENABLE_COVERAGE=OFF ` + -DQDK_CHEMISTRY_ENABLE_MPI=OFF ` + -DQDK_ENABLE_OPENMP=ON ` + -DMACIS_ENABLE_TESTS=OFF ` + -DBUILD_SHARED_LIBS=OFF ` + -DBUILD_TESTING=ON ` + -DCMAKE_BUILD_TYPE=Release ` + -DCMAKE_C_COMPILER=clang-cl ` + -DCMAKE_CXX_COMPILER=clang-cl ` + -DCMAKE_INSTALL_PREFIX="$InstallDir" ` + -DCMAKE_TOOLCHAIN_FILE="$env:CMAKE_TOOLCHAIN_FILE" ` + -DVCPKG_TARGET_TRIPLET="$env:VCPKG_TARGET_TRIPLET" ` + -DVCPKG_INSTALLED_DIR="$env:VCPKG_INSTALLED_DIR" ` + -DFETCHCONTENT_QUIET=OFF + if ($LASTEXITCODE -ne 0) { Write-Error "CMake configure failed"; exit 1 } + } else { + Write-Host "=== Step 1: Skipping configure (incremental build) ===" -ForegroundColor DarkGray + if (-not (Test-Path "$BuildDir\build.ninja")) { + Write-Error "No existing build found at $BuildDir. Run without -SkipConfigure first." + exit 1 + } + } + + Write-Host "" + Write-Host "=== Step 2: Build C++ library ===" -ForegroundColor Yellow + cmake --build "$BuildDir" --parallel 6 2>&1 *> cpp/build/build.log + if ($LASTEXITCODE -ne 0) { Write-Error "CMake build failed"; exit 1 } + Write-Host "C++ build succeeded." -ForegroundColor Green + + if (-not $SkipTests) { + Write-Host "" + Write-Host "=== Step 3: Run C++ tests ===" -ForegroundColor Yellow + Push-Location "$BuildDir" + $env:OMP_NUM_THREADS = 2 + ctest --output-on-failure --verbose --timeout 400 --output-junit ctest_results.xml -E "MACIS_SERIAL_TEST" + $ctestExit = $LASTEXITCODE + Pop-Location + if ($ctestExit -ne 0) { + Write-Warning "Some C++ tests failed (exit code: $ctestExit)" + } else { + Write-Host "All C++ tests passed." -ForegroundColor Green + } + } + + Write-Host "" + Write-Host "=== Step 4: Install C++ library ===" -ForegroundColor Yellow + cmake --install "$BuildDir" --prefix "$InstallDir" + if ($LASTEXITCODE -ne 0) { Write-Error "CMake install failed"; exit 1 } + Write-Host "C++ library installed to $InstallDir" -ForegroundColor Green + +} else { + Write-Host "=== Skipping C++ build ===" -ForegroundColor DarkGray +} + +# ========================================================================== +# STEP 2 - Python Build +# ========================================================================== +if (-not $SkipPython) { + Write-Host "" + Write-Host "=== Step 5: Install Python package ===" -ForegroundColor Yellow + Push-Location "$RepoRoot\python" + + $env:CMAKE_BUILD_PARALLEL_LEVEL = "6" + if (-not (Test-Path .\venv)) { + uv venv .\venv + } + .\venv\Scripts\activate.ps1 + # Do not install: + # - plugins: pyscf does not build on Windows + # - jupyter: requires plugins + uv pip install -v .[coverage,dev,docs,qiskit-extras,openfermion-extras] ` + -C cmake.args=-GNinja ` + -C cmake.define.CMAKE_PREFIX_PATH="$env:CMAKE_PREFIX_PATH;$InstallDir" ` + -C cmake.define.CMAKE_C_COMPILER=clang-cl ` + -C cmake.define.CMAKE_CXX_COMPILER=clang-cl ` + -C cmake.define.CMAKE_TOOLCHAIN_FILE="$env:CMAKE_TOOLCHAIN_FILE" ` + -C cmake.define.VCPKG_TARGET_TRIPLET="$env:VCPKG_TARGET_TRIPLET" ` + -C cmake.define.VCPKG_INSTALLED_DIR="$env:VCPKG_INSTALLED_DIR" + if ($LASTEXITCODE -ne 0) { Pop-Location; Write-Error "Python package install failed"; exit 1 } + + python -c "import qdk_chemistry; print('qdk_chemistry version:', qdk_chemistry.__version__)" + if ($LASTEXITCODE -ne 0) { Pop-Location; Write-Error "Python import check failed"; exit 1 } + Write-Host "Python package installed successfully." -ForegroundColor Green + + if (-not $SkipTests) { + Write-Host "" + Write-Host "=== Step 6: Run Python tests ===" -ForegroundColor Yellow + $env:OMP_NUM_THREADS = 2 + pytest -v --tb=short + $pytestExit = $LASTEXITCODE + if ($pytestExit -ne 0) { + Write-Warning "Some Python tests failed (exit code: $pytestExit)" + } else { + Write-Host "All Python tests passed." -ForegroundColor Green + } + } + + Pop-Location +} else { + Write-Host "=== Skipping Python build ===" -ForegroundColor DarkGray +} + +Write-Host "" +Write-Host "============================================" -ForegroundColor Cyan +Write-Host " Build script finished! " -ForegroundColor Cyan +Write-Host "============================================" -ForegroundColor Cyan diff --git a/.pipelines/pip-scripts/windows-build-clang-pip.ps1 b/.pipelines/pip-scripts/windows-build-clang-pip.ps1 new file mode 100644 index 000000000..b659cdcd1 --- /dev/null +++ b/.pipelines/pip-scripts/windows-build-clang-pip.ps1 @@ -0,0 +1,369 @@ +# windows-build-clang-pip.ps1 +# Local script to build and test the QDK Chemistry Python package on Windows using Clang and pip. +# pip (via scikit-build-core) handles the full C++ and Python build in one step. +# Run from the repo root in an elevated PowerShell (admin) if VS Build Tools need installing. +# +# Usage: +# .\windows-build-clang-pip.ps1 +# +# This will run a full build. It will check for prerequisites (VS Build Tools with C++ and Clang components, vcpkg, ud) +# and dependencies, and install them if missing. Then it will configure, build, and test the C++ library, install it, +# and finally build and test the Python package. By default, it uses static linking for dependencies (no DLLs). +# +# Optional switches: +# -DynamicDeps # Use dynamic linking for dependencies (DLLs) instead of static. +# # This requires copying DLLs to the Python package folder. +# -SkipPrereqs # Skip prerequisite installation (VS Build tools, vcpkg, etc) +# -SkipBuild # Skip the build step, only run tests +# -SkipTests # Skip test runs + +param( + [switch]$DynamicDeps, + [switch]$SkipPrereqs, + [switch]$SkipBuild, + [switch]$SkipTests +) + +$ErrorActionPreference = "Stop" +$RepoRoot = Get-Location +if (-not (Test-Path "$RepoRoot\cpp\CMakeLists.txt")) { + Write-Error "This script must be run from the repository root." + exit 1 +} +$VcpkgInstalledDir = "$RepoRoot\vcpkg_installed" +# vcpkg triplets: https://learn.microsoft.com/en-us/vcpkg/users/platforms/windows +# Using dynamic (DLL) dependencies requires copying the corresponding DLL files to qdk-chemistry's Python package +# installation folder. Else, Windows won't find them at runtime and the Python package will fail to import. +# This is because Windows does not have a system-wide DLL search path configuration like Linux's ldconfig. +if ($DynamicDeps) { + $VcpkgTriplet = "x64-windows" +} else { + $VcpkgTriplet = "x64-windows-static-md" +} +$QDK_UARCH = "x86-64-v3" + +$linkMode = if ($DynamicDeps) { "dynamic" } else { "static" } +Write-Host "============================================" -ForegroundColor Cyan +Write-Host " QDK Chemistry - Windows Build (pip) " -ForegroundColor Cyan +Write-Host "============================================" -ForegroundColor Cyan +Write-Host "Repo root: $RepoRoot" +Write-Host "Triplet: $VcpkgTriplet ($linkMode)" +Write-Host "" + +# -------------------------------------------------------------------------- +# Helper: ensure a command exists +# -------------------------------------------------------------------------- +function Assert-Command($Name) { + if (-not (Get-Command $Name -ErrorAction SilentlyContinue)) { + Write-Error "$Name not found in PATH. Please install it first." + exit 1 + } +} + +# ========================================================================== +# STEP 0 - Prerequisites +# ========================================================================== +if (-not $SkipPrereqs) { + Write-Host "" + Write-Host "=== Step 0: Checking / installing prerequisites ===" -ForegroundColor Yellow + + # --- 0a. VS Build Tools with clang-cl --- + $vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" + $clangCl = $null + + # Search existing VS installations for clang-cl (-products * includes BuildTools) + if (Test-Path $vswhere) { + $vsPath = & $vswhere -latest -products * -property installationPath 2>$null + if ($vsPath) { + $candidates = @( + "$vsPath\VC\Tools\Llvm\x64\bin\clang-cl.exe", + "$vsPath\VC\Tools\Llvm\bin\clang-cl.exe" + ) + foreach ($c in $candidates) { + if (Test-Path $c) { $clangCl = $c; break } + } + } + } + + if (-not $clangCl) { + Write-Host "clang-cl not found. Installing VS Build Tools with C++ and Clang components..." -ForegroundColor Magenta + Write-Host "This requires an elevated (admin) PowerShell and will take several minutes." + Write-Host "" + + $installerUrl = "https://aka.ms/vs/17/release/vs_BuildTools.exe" + $installerPath = "$env:TEMP\vs_BuildTools.exe" + + if (-not (Test-Path $installerPath)) { + Write-Host "Downloading VS Build Tools installer..." + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath -UseBasicParsing + } + + Write-Host "Running VS Build Tools installer (this may take 10-20 minutes)..." + $installArgs = @( + "--quiet", "--wait", "--norestart", + "--add", "Microsoft.VisualStudio.Workload.VCTools", + "--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang", + "--add", "Microsoft.VisualStudio.Component.VC.Llvm.ClangToolset", + "--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", + "--add", "Microsoft.VisualStudio.Component.Windows11SDK.26100", + "--includeRecommended" + ) + $proc = Start-Process -FilePath $installerPath -ArgumentList $installArgs -Wait -PassThru + if ($proc.ExitCode -ne 0 -and $proc.ExitCode -ne 3010) { + Write-Error "VS Build Tools installation failed with exit code $($proc.ExitCode)" + exit 1 + } + Write-Host "VS Build Tools installed successfully." -ForegroundColor Green + + # Re-search for clang-cl + $vsPath = & $vswhere -latest -products * -property installationPath 2>$null + if ($vsPath) { + $candidates = @( + "$vsPath\VC\Tools\Llvm\x64\bin\clang-cl.exe", + "$vsPath\VC\Tools\Llvm\bin\clang-cl.exe" + ) + foreach ($c in $candidates) { + if (Test-Path $c) { $clangCl = $c; break } + } + } + + if (-not $clangCl) { + Write-Error "clang-cl still not found after installing VS Build Tools." + exit 1 + } + } + + $clangDir = Split-Path $clangCl + Write-Host "Using clang-cl: $clangCl" -ForegroundColor Green + & $clangCl --version + + # Add clang-cl to PATH for this session + $env:PATH = "$clangDir;$env:PATH" + + # --- 0b. Set up MSVC environment (vcvarsall) --- + Write-Host "" + Write-Host "Setting up MSVC developer environment..." + $vsPath = & $vswhere -latest -products * -property installationPath + $vcvarsall = "$vsPath\VC\Auxiliary\Build\vcvarsall.bat" + if (-not (Test-Path $vcvarsall)) { + Write-Error "vcvarsall.bat not found at $vcvarsall" + exit 1 + } + + # Capture environment from vcvarsall + $envBefore = @{} + Get-ChildItem env: | ForEach-Object { $envBefore[$_.Name] = $_.Value } + + $tempFile = [System.IO.Path]::GetTempFileName() + cmd /c "`"$vcvarsall`" x64 && set > `"$tempFile`"" + Get-Content $tempFile | ForEach-Object { + if ($_ -match "^([^=]+)=(.*)$") { + $name = $matches[1] + $value = $matches[2] + if ($envBefore[$name] -ne $value) { + [System.Environment]::SetEnvironmentVariable($name, $value, "Process") + } + } + } + Remove-Item $tempFile + Write-Host "MSVC developer environment configured." -ForegroundColor Green + + # Re-add clang-cl to PATH (vcvarsall may have reset it) + $env:PATH = "$clangDir;$env:PATH" + + # --- 0c. vcpkg --- + # Check VS-bundled vcpkg first, then VCPKG_INSTALLATION_ROOT, then bootstrap + $vcpkgRoot = $null + $vsVcpkg = "$vsPath\VC\vcpkg\vcpkg.exe" + if (Test-Path $vsVcpkg) { + $vcpkgRoot = Split-Path $vsVcpkg + } elseif ($env:VCPKG_INSTALLATION_ROOT -and (Test-Path "$($env:VCPKG_INSTALLATION_ROOT)\vcpkg.exe")) { + $vcpkgRoot = $env:VCPKG_INSTALLATION_ROOT + } else { + $vcpkgRoot = "$RepoRoot\vcpkg-tool" + if (-not (Test-Path "$vcpkgRoot\vcpkg.exe")) { + Write-Host "" + Write-Host "Bootstrapping vcpkg..." + git clone https://github.com/microsoft/vcpkg.git "$vcpkgRoot" + & "$vcpkgRoot\bootstrap-vcpkg.bat" -disableMetrics + } + } + Write-Host "Using vcpkg: $vcpkgRoot\vcpkg.exe" -ForegroundColor Green + + # --- 0d. Install vcpkg packages --- + Write-Host "" + Write-Host "Installing vcpkg dependencies with triplet '$VcpkgTriplet' (this may take a while on first run)..." + & "$vcpkgRoot\vcpkg.exe" install ` + --triplet $VcpkgTriplet ` + --x-manifest-root="$RepoRoot" ` + --x-install-root="$VcpkgInstalledDir" ` + --overlay-ports="$RepoRoot\vcpkg-overlay\ports" + if ($LASTEXITCODE -ne 0) { + Write-Error "vcpkg install failed" + exit 1 + } + Write-Host "vcpkg dependencies installed." -ForegroundColor Green + + # --- 0e. Set CMake/vcpkg environment variables --- + $toolchainFile = "$vcpkgRoot\scripts\buildsystems\vcpkg.cmake" + $env:CMAKE_TOOLCHAIN_FILE = $toolchainFile + $env:VCPKG_TARGET_TRIPLET = $VcpkgTriplet + $env:VCPKG_INSTALLED_DIR = $VcpkgInstalledDir + $env:CMAKE_PREFIX_PATH = "$VcpkgInstalledDir\$VcpkgTriplet" + # Dynamic triplets produce runtime DLLs that must be on PATH for the build tools + if ($DynamicDeps) { + $env:PATH = "$VcpkgInstalledDir\$VcpkgTriplet\bin;$VcpkgInstalledDir\$VcpkgTriplet\debug\bin;$env:PATH" + } + + # --- 0f. uv (Python package manager) --- + if (-not (Get-Command uv -ErrorAction SilentlyContinue)) { + Write-Host "uv not found. Installing..." -ForegroundColor Magenta + Invoke-RestMethod https://astral.sh/uv/install.ps1 | Invoke-Expression + if (-not (Get-Command uv -ErrorAction SilentlyContinue)) { + Write-Error "uv installation failed. Install manually: https://docs.astral.sh/uv/getting-started/installation/" + exit 1 + } + } + Write-Host "Using uv: $(Get-Command uv | Select-Object -ExpandProperty Source) ($(uv --version))" -ForegroundColor Green + +} else { + Write-Host "=== Skipping prerequisites (reusing previous environment) ===" -ForegroundColor DarkGray + + # Even when skipping, we need the environment set up + $vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" + $vsPath = & $vswhere -latest -products * -property installationPath 2>$null + $candidates = @( + "$vsPath\VC\Tools\Llvm\x64\bin\clang-cl.exe", + "$vsPath\VC\Tools\Llvm\bin\clang-cl.exe" + ) + foreach ($c in $candidates) { + if (Test-Path $c) { + $clangDir = Split-Path $c + $env:PATH = "$clangDir;$env:PATH" + break + } + } + Assert-Command "clang-cl" + Assert-Command "cmake" + Assert-Command "ninja" + + # Set up MSVC env + $vcvarsall = "$vsPath\VC\Auxiliary\Build\vcvarsall.bat" + if (Test-Path $vcvarsall) { + $tempFile = [System.IO.Path]::GetTempFileName() + cmd /c "`"$vcvarsall`" x64 && set > `"$tempFile`"" + Get-Content $tempFile | ForEach-Object { + if ($_ -match "^([^=]+)=(.*)$") { + [System.Environment]::SetEnvironmentVariable($matches[1], $matches[2], "Process") + } + } + Remove-Item $tempFile + # Re-add clang-cl to PATH + $env:PATH = "$clangDir;$env:PATH" + } + + # vcpkg env - check VS-bundled vcpkg first + $vcpkgRoot = $null + $vsVcpkg = "$vsPath\VC\vcpkg\vcpkg.exe" + if (Test-Path $vsVcpkg) { + $vcpkgRoot = Split-Path $vsVcpkg + } elseif ($env:VCPKG_INSTALLATION_ROOT -and (Test-Path "$($env:VCPKG_INSTALLATION_ROOT)\vcpkg.exe")) { + $vcpkgRoot = $env:VCPKG_INSTALLATION_ROOT + } else { + $vcpkgRoot = "$RepoRoot\vcpkg-tool" + } + $env:CMAKE_TOOLCHAIN_FILE = "$vcpkgRoot\scripts\buildsystems\vcpkg.cmake" + $env:VCPKG_TARGET_TRIPLET = $VcpkgTriplet + $env:VCPKG_INSTALLED_DIR = $VcpkgInstalledDir + $env:CMAKE_PREFIX_PATH = "$VcpkgInstalledDir\$VcpkgTriplet" + # Dynamic triplets produce runtime DLLs that must be on PATH for the build tools + if ($DynamicDeps) { + $env:PATH = "$VcpkgInstalledDir\$VcpkgTriplet\bin;$VcpkgInstalledDir\$VcpkgTriplet\debug\bin;$env:PATH" + } +} + +# Verify tools +Write-Host "" +Write-Host "=== Environment summary ===" -ForegroundColor Yellow +Write-Host " clang-cl: $(Get-Command clang-cl | Select-Object -ExpandProperty Source)" +Write-Host " cmake: $(Get-Command cmake | Select-Object -ExpandProperty Source)" +Write-Host " ninja: $(Get-Command ninja | Select-Object -ExpandProperty Source)" +Write-Host " python: $(Get-Command python | Select-Object -ExpandProperty Source) ($(python --version 2>&1))" +Write-Host " uv: $(Get-Command uv | Select-Object -ExpandProperty Source) ($(uv --version 2>&1))" +Write-Host " TOOLCHAIN_FILE: $env:CMAKE_TOOLCHAIN_FILE" +Write-Host " VCPKG_TRIPLET: $env:VCPKG_TARGET_TRIPLET" +Write-Host " VCPKG_INSTALLED: $env:VCPKG_INSTALLED_DIR" +Write-Host "" + +# ========================================================================== +# STEP 1 - Build and install Python package (C++ is built by scikit-build-core) +# ========================================================================== +if (-not $SkipBuild) { + Write-Host "=== Step 1: Build and install Python package ===" -ForegroundColor Yellow + Push-Location "$RepoRoot\python" + + $env:CMAKE_BUILD_PARALLEL_LEVEL = "6" + if (-not (Test-Path .\venv)) { + uv venv .\venv + } + .\venv\Scripts\activate.ps1 + # pip drives the full build: scikit-build-core invokes CMake to compile the + # C++ library and pybind11 bindings, then packages everything into a wheel. + # Do not install: + # - plugins: pyscf does not build on Windows + # - jupyter: requires plugins + uv pip install -v .[coverage,dev,docs,qiskit-extras,openfermion-extras] ` + -C cmake.args=-GNinja ` + -C cmake.define.QDK_UARCH="$QDK_UARCH" ` + -C cmake.define.QDK_CHEMISTRY_ENABLE_COVERAGE=OFF ` + -C cmake.define.QDK_CHEMISTRY_ENABLE_MPI=OFF ` + -C cmake.define.QDK_ENABLE_OPENMP=ON ` + -C cmake.define.BUILD_SHARED_LIBS=OFF ` + -C cmake.define.BUILD_TESTING=OFF ` + -C cmake.define.CMAKE_BUILD_TYPE=Release ` + -C cmake.define.CMAKE_C_COMPILER=clang-cl ` + -C cmake.define.CMAKE_CXX_COMPILER=clang-cl ` + -C cmake.define.CMAKE_TOOLCHAIN_FILE="$env:CMAKE_TOOLCHAIN_FILE" ` + -C cmake.define.VCPKG_TARGET_TRIPLET="$env:VCPKG_TARGET_TRIPLET" ` + -C cmake.define.VCPKG_INSTALLED_DIR="$env:VCPKG_INSTALLED_DIR" + if ($LASTEXITCODE -ne 0) { Pop-Location; Write-Error "Python package install failed"; exit 1 } + + python -c "import qdk_chemistry; print('qdk_chemistry version:', qdk_chemistry.__version__)" + if ($LASTEXITCODE -ne 0) { Pop-Location; Write-Error "Python import check failed"; exit 1 } + Write-Host "Python package installed successfully." -ForegroundColor Green + + Pop-Location +} else { + Write-Host "=== Skipping build ===" -ForegroundColor DarkGray +} + +# ========================================================================== +# STEP 2 - Run Python tests +# ========================================================================== +if (-not $SkipTests) { + Write-Host "" + Write-Host "=== Step 2: Run Python tests ===" -ForegroundColor Yellow + Push-Location "$RepoRoot\python" + + # Ensure venv is activated + if (Test-Path .\venv\Scripts\activate.ps1) { + .\venv\Scripts\activate + } + + $env:OMP_NUM_THREADS = 2 + pytest -v --tb=short + $pytestExit = $LASTEXITCODE + Pop-Location + if ($pytestExit -ne 0) { + Write-Warning "Some Python tests failed (exit code: $pytestExit)" + } else { + Write-Host "All Python tests passed." -ForegroundColor Green + } +} else { + Write-Host "=== Skipping tests ===" -ForegroundColor DarkGray +} + +Write-Host "" +Write-Host "============================================" -ForegroundColor Cyan +Write-Host " Build script finished! " -ForegroundColor Cyan +Write-Host "============================================" -ForegroundColor Cyan diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2d5e45cef..1d061348e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,8 +24,10 @@ repos: files: *linted_files - id: trailing-whitespace files: *linted_files + exclude: \.patch$ - id: end-of-file-fixer files: *linted_files + exclude: \.patch$ - id: debug-statements files: *linted_files - id: check-case-conflict diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index fb04c96a1..d5a9095f1 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -41,9 +41,22 @@ if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release) endif() -# Add compiler flags -set(CMAKE_CXX_FLAGS_DEBUG_INIT "-g -O0 -Wall -Wextra") -set(CMAKE_CXX_FLAGS_RELEASE_INIT "-O3 -DNDEBUG") +# Set per-config compile flags. +if(MSVC) # clang-cl and cl + set(CMAKE_CXX_FLAGS_DEBUG "/Zi /Od /W3 /RTC1") + set(CMAKE_CXX_FLAGS_RELEASE "/O2 /W1 /DNDEBUG") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "/Zi /O2 /W1 /DNDEBUG") +elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang") + set(CMAKE_CXX_FLAGS_DEBUG "-g -O0 -Wall -Wextra") + set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -O3 -DNDEBUG") +else() + message(WARNING "Unknown compiler ${CMAKE_CXX_COMPILER_ID}, using default CMAKE_CXX_FLAGS") +endif() +set(CMAKE_C_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}") +set(CMAKE_C_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") + # Options option(QDK_CHEMISTRY_ENABLE_COVERAGE "Enable coverage build" OFF) @@ -56,6 +69,12 @@ option(QDK_CHEMISTRY_ENABLE_MPI "Enable MPI Bindings for QDK Chemistry" OFF) option(QDK_EMBED_RESOURCE_LOCATION "Point to embedded (permanent) resource location" ON) # Enable OpenMP by default, except on Apple platforms with AppleClang +# On Windows with clang-cl, CMake resolves OpenMP to: +# OpenMP_CXX_FLAGS = -Xclang -fopenmp +# OpenMP_libomp_LIBRARY = .../VC/Tools/MSVC/.../lib/x64/libomp.lib +# This uses LLVM's libomp runtime (not MSVC's vcomp*.dll). libomp.dll must +# be present at runtime alongside the built binaries. It is found in: +# %VS_INSTALL_DIR%\VC\Tools\Llvm\x64\bin\libomp.dll cmake_dependent_option(QDK_ENABLE_OPENMP "Enable OpenMP support" ON "NOT (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL \"AppleClang\")" OFF) @@ -92,6 +111,7 @@ if(NOT macis_FOUND) set(MACIS_ENABLE_MPI OFF CACHE BOOL "MACIS enable MPI" FORCE) set(MACIS_ENABLE_PYTHON OFF CACHE BOOL "MACIS Enable Python" FORCE) set(MACIS_ENABLE_EXAMPLES OFF CACHE BOOL "MACIS Build Examples" FORCE) + set(MACIS_ENABLE_TESTS ON CACHE BOOL "MACIS Build Tests") set(MACIS_ENABLE_OPENMP ${QDK_ENABLE_OPENMP} CACHE BOOL "MACIS Enable OpenMP" FORCE) if(DEFINED QDK_UARCH_USED) set(MACIS_UARCH ${QDK_UARCH_USED} CACHE STRING "MACIS Microarchitecture" FORCE) @@ -164,8 +184,12 @@ set_target_properties(chemistry PROPERTIES OUTPUT_NAME "qdk_chemistry") if(QDK_CHEMISTRY_ENABLE_COVERAGE) if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") message(STATUS "Enabling coverage build") - target_compile_options(chemistry PRIVATE --coverage -fprofile-arcs -ftest-coverage) - target_link_libraries(chemistry PRIVATE --coverage) + if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang" AND NOT MSVC) + target_compile_options(chemistry PRIVATE --coverage -fprofile-arcs -ftest-coverage) + target_link_libraries(chemistry PRIVATE --coverage) + else() + message(WARNING "Coverage build is only supported with GCC or Clang compilers") + endif() else() message(FATAL_ERROR "Coverage build is only supported in CMAKE_BUILD_TYPE=Debug or RelWithDebInfo mode") endif() diff --git a/cpp/cmake/qdk-uarch.cmake b/cpp/cmake/qdk-uarch.cmake index 1c2116ba8..9373246fe 100644 --- a/cpp/cmake/qdk-uarch.cmake +++ b/cpp/cmake/qdk-uarch.cmake @@ -1,3 +1,4 @@ +# --- Step 1: Resolve QDK_UARCH --- # Check environment variable first, then CMake variable if(NOT DEFINED QDK_UARCH AND DEFINED ENV{QDK_UARCH}) set(QDK_UARCH $ENV{QDK_UARCH}) @@ -5,16 +6,8 @@ endif() if(DEFINED QDK_UARCH) message(STATUS "Using user-defined uarch: ${QDK_UARCH}") - # If compiler ID is not GNU or Clang, we cannot use -march flag, so we will not set QDK_UARCH_FLAGS - if(NOT (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang")) - message(WARNING "Compiler ${CMAKE_CXX_COMPILER_ID} does not support -march flag. QDK_UARCH_FLAGS will not be set.") - set(QDK_UARCH_USED "NONE" CACHE STRING "User-defined microarchitecture for optimization") - else() - set(QDK_UARCH_USED ${QDK_UARCH} CACHE STRING "User-defined microarchitecture for optimization") - set(QDK_UARCH_FLAGS "-march=${QDK_UARCH}" CACHE STRING "Compiler flags for user-defined microarchitecture") - endif() else() - # Set architecture-specific defaults based on the target platform + # Auto-detect based on the target platform if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|AMD64") set(QDK_UARCH "x86-64" CACHE STRING "Target microarchitecture") message(STATUS "Auto-detected x86_64 architecture, using: ${QDK_UARCH}") @@ -25,17 +18,21 @@ else() message(WARNING "Unknown architecture ${CMAKE_SYSTEM_PROCESSOR}. QDK_UARCH not set. This may degrade performance") return() endif() +endif() - # Set the used arch and flags - set(QDK_UARCH_USED ${QDK_UARCH} CACHE STRING "User-defined microarchitecture for optimization") - if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang") - set(QDK_UARCH_FLAGS "-march=${QDK_UARCH}" CACHE STRING "Compiler flags for user-defined microarchitecture") - else() - message(WARNING "Compiler syntax for ISA flags is unknown, defaulting to the system default generic ISA") - endif() +# --- Step 2: Set QDK_UARCH_FLAGS based on compiler and uarch --- +set(QDK_UARCH_USED ${QDK_UARCH} CACHE STRING "Target microarchitecture for optimization") +if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang") + set(QDK_UARCH_FLAGS "-march=${QDK_UARCH}" CACHE STRING "Compiler flags for target microarchitecture") +elseif(MSVC) + # Users should set QDK_UARCH to a valid MSVC /arch: argument (e.g. AVX2, AVX512) + set(QDK_UARCH_FLAGS "/arch:${QDK_UARCH}" CACHE STRING "Compiler flags for target microarchitecture") +else() + message(WARNING "Compiler ${CMAKE_CXX_COMPILER_ID}: unknown flag syntax for ISA selection. QDK_UARCH_FLAGS will not be set.") + set(QDK_UARCH_USED "NONE" CACHE STRING "Target microarchitecture for optimization") endif() -# Test that the produced flags are sane +# --- Step 3: Validate the flags --- if(QDK_UARCH_FLAGS) message(STATUS "Testing QDK_UARCH_FLAGS: ${QDK_UARCH_FLAGS}") include(CheckCXXCompilerFlag) @@ -43,6 +40,6 @@ if(QDK_UARCH_FLAGS) if(NOT COMPILER_SUPPORTS_QDK_UARCH_FLAGS) message(WARNING "The compiler does not support the specified QDK_UARCH_FLAGS: ${QDK_UARCH_FLAGS}. Unsetting these flags.") unset(QDK_UARCH_FLAGS CACHE) - set(QDK_UARCH_USED "NONE" CACHE STRING "User-defined microarchitecture for optimization") + set(QDK_UARCH_USED "NONE" CACHE STRING "Target microarchitecture for optimization") endif() endif() diff --git a/cpp/cmake/third_party.cmake b/cpp/cmake/third_party.cmake index 21e170a00..fff911d17 100644 --- a/cpp/cmake/third_party.cmake +++ b/cpp/cmake/third_party.cmake @@ -2,7 +2,10 @@ include(DependencyManager) # Extract QDK_UARCH FLAGS -set(DEPENDENCY_BUILD_FLAGS BUILD_ARGS "${QDK_UARCH_FLAGS} -fPIC") +set(DEPENDENCY_BUILD_FLAGS BUILD_ARGS "${QDK_UARCH_FLAGS}") +if(NOT MSVC) + set(DEPENDENCY_BUILD_FLAGS "${DEPENDENCY_BUILD_FLAGS} -fPIC") +endif() # Save current warning settings get_property(_old_warn_deprecated CACHE CMAKE_WARN_DEPRECATED PROPERTY VALUE) @@ -69,11 +72,17 @@ set(GAUXC_ENABLE_MAGMA OFF CACHE BOOL "Enable gauxc MAGMA Support" FORCE) set(GAUXC_ENABLE_CUTLASS ON CACHE BOOL "Enable gauxc CUTLASS Support" FORCE) set(GAUXC_ENABLE_CUDA ${QDK_CHEMISTRY_ENABLE_GPU} CACHE BOOL "Enable gauxc CUDA Support" FORCE) set(GAUXC_ENABLE_MPI ${QDK_CHEMISTRY_ENABLE_MPI} CACHE BOOL "Enable gauxc MPI Support" FORCE) -set(GAUXC_ENABLE_OPENMP ${QDK_ENABLE_OPENMP} CACHE BOOL "Enable gauxc OpenMP Support" FORCE) +# Disable OpenMP in GauXC on Windows due to open issue: https://github.com/wavefunction91/GauXC/issues/196 +# Keep OpenMP for the rest of the project (MACIS, our own code). Re-enable once the upstream issue is fixed. +if(MSVC) + set(GAUXC_ENABLE_OPENMP OFF CACHE BOOL "Enable gauxc OpenMP Support" FORCE) +else() + set(GAUXC_ENABLE_OPENMP ${QDK_ENABLE_OPENMP} CACHE BOOL "Enable gauxc OpenMP Support" FORCE) +endif() handle_dependency(gauxc - GIT_REPOSITORY https://github.com/wavefunction91/gauxc.git - GIT_TAG 62fea07c9306dbd83dd18b6957358827ac9b3da0 + GIT_REPOSITORY https://github.com/lorisercole/gauxc.git + GIT_TAG 4e18eb1c4fc3b7bc1d2f91c59d8a4826b0997a4f BUILD_TARGET gauxc::gauxc INSTALL_TARGET gauxc::gauxc ${DEPENDENCY_BUILD_FLAGS} diff --git a/cpp/include/qdk/chemistry/algorithms/dynamical_correlation_calculator.hpp b/cpp/include/qdk/chemistry/algorithms/dynamical_correlation_calculator.hpp index c53e16889..e694a7d0c 100644 --- a/cpp/include/qdk/chemistry/algorithms/dynamical_correlation_calculator.hpp +++ b/cpp/include/qdk/chemistry/algorithms/dynamical_correlation_calculator.hpp @@ -76,7 +76,7 @@ class DynamicalCorrelationCalculator * * @return The algorithm's name */ - virtual std::string name() const = 0; + virtual std::string name() const override = 0; /** * @brief Access the algorithm's type name @@ -99,7 +99,7 @@ class DynamicalCorrelationCalculator * wavefunction, and optionally a bra wavefunction */ virtual DynamicalCorrelationResult _run_impl( - std::shared_ptr ansatz) const = 0; + std::shared_ptr ansatz) const override = 0; }; /** diff --git a/cpp/include/qdk/chemistry/algorithms/stability.hpp b/cpp/include/qdk/chemistry/algorithms/stability.hpp index 88b29b293..9663409cf 100644 --- a/cpp/include/qdk/chemistry/algorithms/stability.hpp +++ b/cpp/include/qdk/chemistry/algorithms/stability.hpp @@ -114,7 +114,7 @@ class StabilityChecker * @return A pair containing stability status and detailed results */ virtual std::pair> _run_impl( - std::shared_ptr wavefunction) const = 0; + std::shared_ptr wavefunction) const override = 0; }; /** diff --git a/cpp/include/qdk/chemistry/data/structure.hpp b/cpp/include/qdk/chemistry/data/structure.hpp index b1e689783..beea4dcd5 100644 --- a/cpp/include/qdk/chemistry/data/structure.hpp +++ b/cpp/include/qdk/chemistry/data/structure.hpp @@ -117,12 +117,12 @@ class Structure : public DataClass, /** * @brief Copy assignment operator */ - Structure& operator=(const Structure& other) = default; + Structure& operator=(const Structure&) = delete; /** * @brief Move assignment operator */ - Structure& operator=(Structure&& other) noexcept = default; + Structure& operator=(Structure&&) = delete; /** * @brief Destructor diff --git a/cpp/src/qdk/chemistry/algorithms/microsoft/localization/vvhv.cpp b/cpp/src/qdk/chemistry/algorithms/microsoft/localization/vvhv.cpp index a88462eef..576fb982d 100644 --- a/cpp/src/qdk/chemistry/algorithms/microsoft/localization/vvhv.cpp +++ b/cpp/src/qdk/chemistry/algorithms/microsoft/localization/vvhv.cpp @@ -64,10 +64,11 @@ class VVHVLocalization : public IterativeOrbitalLocalizationScheme { std::shared_ptr inner_localizer) : IterativeOrbitalLocalizationScheme(settings), basis_set_(basis_set), - overlap_ori_(ao_overlap), minimal_basis_name_(minimal_basis_name), - basis_ori_fp_(utils::microsoft::convert_basis_set_from_qdk(*basis_set)), - inner_localizer_(inner_localizer) { + inner_localizer_(inner_localizer), + overlap_ori_(ao_overlap), + basis_ori_fp_( + utils::microsoft::convert_basis_set_from_qdk(*basis_set)) { QDK_LOG_TRACE_ENTERING(); // Initialize all data structures and pre-compute integrals diff --git a/cpp/src/qdk/chemistry/algorithms/microsoft/macis_base.hpp b/cpp/src/qdk/chemistry/algorithms/microsoft/macis_base.hpp index 8842f75c1..15daae169 100644 --- a/cpp/src/qdk/chemistry/algorithms/microsoft/macis_base.hpp +++ b/cpp/src/qdk/chemistry/algorithms/microsoft/macis_base.hpp @@ -292,7 +292,7 @@ class Macis : public qdk::chemistry::algorithms::MultiConfigurationCalculator { */ virtual ~Macis() noexcept override = default; - virtual std::string name() const = 0; + virtual std::string name() const override = 0; protected: /** diff --git a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/include/qdk/chemistry/scf/eri/eri_multiplexer.h b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/include/qdk/chemistry/scf/eri/eri_multiplexer.h index 0366e9d25..3b9be9c7d 100644 --- a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/include/qdk/chemistry/scf/eri/eri_multiplexer.h +++ b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/include/qdk/chemistry/scf/eri/eri_multiplexer.h @@ -64,9 +64,9 @@ class ERIMultiplexer : public ERI { const SCFConfig& cfg, double omega); /** - * @brief Default constructor (private, used by factory methods) + * @brief Default constructor (deleted — base class ERI has no default ctor) */ - ERIMultiplexer() noexcept = default; + ERIMultiplexer() noexcept = delete; public: /** diff --git a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/core/basis_set.cpp b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/core/basis_set.cpp index 0f7c2b3f1..04384cd28 100644 --- a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/core/basis_set.cpp +++ b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/core/basis_set.cpp @@ -132,8 +132,8 @@ BasisSet::BasisSet(std::shared_ptr mol, int n_ecp_electrons, BasisMode mode, bool pure, bool sort) : mol(mol), mode(mode), - pure(pure), shells(input_shells), + pure(pure), ecp_shells(input_ecp_shells), element_ecp_electrons(element_ecp_electrons), n_ecp_electrons(n_ecp_electrons) { @@ -188,7 +188,7 @@ BasisSet::BasisSet(std::shared_ptr mol, BasisSet::BasisSet(std::shared_ptr mol, const std::vector& input_shells, BasisMode mode, bool pure, bool sort) - : mol(mol), mode(mode), pure(pure), shells(input_shells) { + : mol(mol), mode(mode), shells(input_shells), pure(pure) { #ifdef QDK_CHEMISTRY_ENABLE_MPI if (mpi::get_world_size() > 1) { MPI_Barrier(MPI_COMM_WORLD); @@ -245,7 +245,7 @@ BasisSet::BasisSet(std::shared_ptr mol, const std::string& path, "basis" / (normalized_path + ".json"); name = normalized_path; } else { - name = bs_path.stem(); + name = bs_path.stem().string(); } if (!std::filesystem::exists(bs_path)) { auto compressed_path = QDKChemistryConfig::get_resources_dir() / @@ -425,7 +425,7 @@ Shell Shell::from_json(const nlohmann::ordered_json& rec, const std::shared_ptr mol) { QDK_LOG_TRACE_ENTERING(); - Shell sh; + Shell sh{}; sh.atom_index = rec["atom"].template get(); sh.angular_momentum = rec["am"].template get(); diff --git a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/scf_algorithm/scf_algorithm.cpp b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/scf_algorithm/scf_algorithm.cpp index 48c620e79..f1f4250ff 100644 --- a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/scf_algorithm/scf_algorithm.cpp +++ b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/scf_algorithm/scf_algorithm.cpp @@ -36,8 +36,8 @@ SCFAlgorithm::SCFAlgorithm(const SCFContext& ctx) : ctx_(ctx), step_count_(0), last_energy_(0.0), - density_rms_(std::numeric_limits::infinity()), - delta_energy_(std::numeric_limits::infinity()) { + delta_energy_(std::numeric_limits::infinity()), + density_rms_(std::numeric_limits::infinity()) { QDK_LOG_TRACE_ENTERING(); auto num_atomic_orbitals = ctx.basis_set->num_atomic_orbitals; auto num_density_matrices = diff --git a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/util/int1e.cpp b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/util/int1e.cpp index 5e58736ba..cd1b238dc 100644 --- a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/util/int1e.cpp +++ b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/src/util/int1e.cpp @@ -91,7 +91,7 @@ class Libint2Engine : public OneBodyIntegralEngine { * @brief Get number of operator components * @return Number of matrices/operators this engine computes */ - size_t nopers() const { + size_t nopers() const override { QDK_LOG_TRACE_ENTERING(); return engine_.results().size(); } @@ -179,7 +179,7 @@ class ECPIntEngine : public OneBodyIntegralEngine { * @return Number of matrices this engine computes (1 for integrals, 3*natom * for gradients) */ - size_t nopers() const { + size_t nopers() const override { QDK_LOG_TRACE_ENTERING(); return buf_.size(); } diff --git a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/tests/util_tests.cpp b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/tests/util_tests.cpp index d8c2b8c47..614cdf9b7 100644 --- a/cpp/src/qdk/chemistry/algorithms/microsoft/scf/tests/util_tests.cpp +++ b/cpp/src/qdk/chemistry/algorithms/microsoft/scf/tests/util_tests.cpp @@ -472,11 +472,11 @@ TEST(ClassRegistryTest, PrimitiveKey) { TEST(AtomGuessTest, BasisSetMap) { // Test that the map correctly identifies equivalent basis sets auto mol = std::make_shared(); - mol->atomic_nums = {1}; + mol->atomic_nums = {3}; mol->n_atoms = 1; - mol->atomic_charges = {1}; - mol->total_nuclear_charge = 1; - mol->n_electrons = 1; + mol->atomic_charges = {3}; + mol->total_nuclear_charge = 3; + mol->n_electrons = 3; mol->coords = {{0.0, 0.0, 0.0}}; // Create two identical basis sets @@ -485,10 +485,12 @@ TEST(AtomGuessTest, BasisSetMap) { auto basis2 = BasisSet::from_database_json(mol, "sto-3g", BasisMode::PSI4, true, false); - // Create same basis set in different shell order + // Create same basis set with reversed shell order via JSON round-trip. + // Lithium STO-3G has 3 shells, so reversing produces a different ordering + // that the BasisEqChecker should reject. auto basis_json = basis1->to_json(); - // Reverse the shells - std::reverse(basis_json["shells"].begin(), basis_json["shells"].end()); + std::reverse(basis_json["electron_shells"].begin(), + basis_json["electron_shells"].end()); auto basis3 = BasisSet::from_serialized_json(mol, basis_json); // Create a different basis set (different basis name) @@ -518,7 +520,7 @@ TEST(AtomGuessTest, BasisSetMap) { EXPECT_NE(it2, basis_map.end()); EXPECT_TRUE(it2->second.isApprox(RowMajorMatrix::Identity( basis2->num_atomic_orbitals, basis2->num_atomic_orbitals))); - // Retrieve using basis3 (should not be found) + // Retrieve using basis3 (should not be found — shells are reversed) auto it3 = basis_map.find(*basis3); EXPECT_EQ(it3, basis_map.end()); // Retrieve using basis4 (should not be found) diff --git a/cpp/src/qdk/chemistry/data/basis_set.cpp b/cpp/src/qdk/chemistry/data/basis_set.cpp index 9ceb43bc2..fae957056 100644 --- a/cpp/src/qdk/chemistry/data/basis_set.cpp +++ b/cpp/src/qdk/chemistry/data/basis_set.cpp @@ -707,8 +707,8 @@ BasisSet::BasisSet(const BasisSet& other) : _name(other._name), _atomic_orbital_type(other._atomic_orbital_type), _shells_per_atom(other._shells_per_atom), - _ecp_name(other._ecp_name), _ecp_shells_per_atom(other._ecp_shells_per_atom), + _ecp_name(other._ecp_name), _ecp_electrons(other._ecp_electrons) { QDK_LOG_TRACE_ENTERING(); if (other._structure) { diff --git a/cpp/src/qdk/chemistry/data/hdf5_serialization.cpp b/cpp/src/qdk/chemistry/data/hdf5_serialization.cpp index b715fe20b..ed48d5770 100644 --- a/cpp/src/qdk/chemistry/data/hdf5_serialization.cpp +++ b/cpp/src/qdk/chemistry/data/hdf5_serialization.cpp @@ -187,9 +187,12 @@ void save_vector_to_group(H5::Group& group, const std::string& dataset_name, if (!vector.empty()) { hsize_t dims[1] = {vector.size()}; H5::DataSpace dataspace(1, dims); + // Use NATIVE_UINT64 (not NATIVE_ULONG) because on Windows LLP64 + // unsigned long is 4 bytes while size_t is 8 bytes. + static_assert(sizeof(size_t) == 8); H5::DataSet dataset = group.createDataSet( - dataset_name, H5::PredType::NATIVE_ULONG, dataspace); - dataset.write(vector.data(), H5::PredType::NATIVE_ULONG); + dataset_name, H5::PredType::NATIVE_UINT64, dataspace); + dataset.write(vector.data(), H5::PredType::NATIVE_UINT64); } } @@ -225,7 +228,8 @@ std::vector load_size_vector_from_group( hsize_t dims[1]; dataspace.getSimpleExtentDims(dims); std::vector vector(dims[0]); - dataset.read(vector.data(), H5::PredType::NATIVE_ULONG); + static_assert(sizeof(size_t) == 8); + dataset.read(vector.data(), H5::PredType::NATIVE_UINT64); return vector; } diff --git a/cpp/src/qdk/chemistry/data/orbitals.cpp b/cpp/src/qdk/chemistry/data/orbitals.cpp index 248d78195..42aa3e0ae 100644 --- a/cpp/src/qdk/chemistry/data/orbitals.cpp +++ b/cpp/src/qdk/chemistry/data/orbitals.cpp @@ -1025,7 +1025,7 @@ void Orbitals::to_hdf5(H5::Group& group) const { // Save essential metadata that can't be computed from data unsigned num_atomic_orbitals = get_num_atomic_orbitals(); unsigned num_molecular_orbitals = get_num_molecular_orbitals(); - bool restricted = is_restricted(); + hbool_t restricted = static_cast(is_restricted()); H5::DataSet aos_dataset = metadata_group.createDataSet( "num_atomic_orbitals", H5::PredType::NATIVE_UINT, scalar_space); @@ -1427,17 +1427,22 @@ void Orbitals::_save_orbital_metadata_to_hdf5( mos_dataset.write(&num_molecular_orbitals, H5::PredType::NATIVE_UINT); // Save boolean flags + // Use hbool_t intermediaries — hbool_t is typically unsigned int (4 bytes), + // while C++ bool is 1 byte. Writing a bool* with NATIVE_HBOOL is UB. + hbool_t hb_is_restricted = static_cast(is_restricted); H5::DataSet restricted_dataset = metadata_group.createDataSet( "is_restricted", H5::PredType::NATIVE_HBOOL, scalar_space); - restricted_dataset.write(&is_restricted, H5::PredType::NATIVE_HBOOL); + restricted_dataset.write(&hb_is_restricted, H5::PredType::NATIVE_HBOOL); + hbool_t hb_has_overlap_matrix = static_cast(has_overlap_matrix); H5::DataSet overlap_dataset = metadata_group.createDataSet( "has_overlap_matrix", H5::PredType::NATIVE_HBOOL, scalar_space); - overlap_dataset.write(&has_overlap_matrix, H5::PredType::NATIVE_HBOOL); + overlap_dataset.write(&hb_has_overlap_matrix, H5::PredType::NATIVE_HBOOL); + hbool_t hb_has_basis_set = static_cast(has_basis_set); H5::DataSet basis_dataset = metadata_group.createDataSet( "has_basis_set", H5::PredType::NATIVE_HBOOL, scalar_space); - basis_dataset.write(&has_basis_set, H5::PredType::NATIVE_HBOOL); + basis_dataset.write(&hb_has_basis_set, H5::PredType::NATIVE_HBOOL); } bool Orbitals::is_unrestricted() const { @@ -1937,7 +1942,7 @@ void ModelOrbitals::to_hdf5(H5::Group& group) const { // Save ModelOrbitals metadata unsigned num_orbitals = _num_orbitals; - bool is_restricted = _is_restricted; + hbool_t hb_is_restricted = static_cast(_is_restricted); H5::DataSet orbitals_dataset = metadata_group.createDataSet( "num_orbitals", H5::PredType::NATIVE_UINT, scalar_space); @@ -1945,7 +1950,7 @@ void ModelOrbitals::to_hdf5(H5::Group& group) const { H5::DataSet restricted_dataset = metadata_group.createDataSet( "is_restricted", H5::PredType::NATIVE_HBOOL, scalar_space); - restricted_dataset.write(&is_restricted, H5::PredType::NATIVE_HBOOL); + restricted_dataset.write(&hb_is_restricted, H5::PredType::NATIVE_HBOOL); // Save active space indices save_vector_to_group(group, "active_space_indices_alpha", @@ -1986,8 +1991,9 @@ std::shared_ptr ModelOrbitals::from_hdf5(H5::Group& group) { H5::DataSet restricted_dataset = metadata_group.openDataSet("is_restricted"); - bool is_restricted; - restricted_dataset.read(&is_restricted, H5::PredType::NATIVE_HBOOL); + hbool_t hb_is_restricted; + restricted_dataset.read(&hb_is_restricted, H5::PredType::NATIVE_HBOOL); + bool is_restricted = static_cast(hb_is_restricted); // Load active space indices std::vector active_indices_alpha, active_indices_beta; diff --git a/cpp/src/qdk/chemistry/data/wavefunction_containers/cc.cpp b/cpp/src/qdk/chemistry/data/wavefunction_containers/cc.cpp index 0b08e97c2..b88aba59f 100644 --- a/cpp/src/qdk/chemistry/data/wavefunction_containers/cc.cpp +++ b/cpp/src/qdk/chemistry/data/wavefunction_containers/cc.cpp @@ -41,8 +41,8 @@ CoupledClusterContainer::CoupledClusterContainer( const std::optional& t2_amplitudes_bbbb) : WavefunctionContainer( WavefunctionType::NotSelfDual), // Always force NotSelfDual for CC - _wavefunction(wavefunction), - _orbitals(orbitals) { + _orbitals(orbitals), + _wavefunction(wavefunction) { QDK_LOG_TRACE_ENTERING(); if (!orbitals) { throw std::invalid_argument("Orbitals cannot be null"); @@ -488,7 +488,8 @@ void CoupledClusterContainer::to_hdf5(H5::Group& group) const { bool is_complex = this->is_complex(); H5::Attribute is_complex_attr = group.createAttribute( "is_complex", H5::PredType::NATIVE_HBOOL, H5::DataSpace(H5S_SCALAR)); - is_complex_attr.write(H5::PredType::NATIVE_HBOOL, &is_complex); + hbool_t hb_is_complex = static_cast(is_complex); + is_complex_attr.write(H5::PredType::NATIVE_HBOOL, &hb_is_complex); // store amplitudes if (_t1_amplitudes_aa) { diff --git a/cpp/src/qdk/chemistry/utils/logger.cpp b/cpp/src/qdk/chemistry/utils/logger.cpp index ec87e7915..150a222cf 100644 --- a/cpp/src/qdk/chemistry/utils/logger.cpp +++ b/cpp/src/qdk/chemistry/utils/logger.cpp @@ -4,6 +4,11 @@ #include +#ifdef _WIN32 +#include +#endif + +#include #include #include #include @@ -15,6 +20,48 @@ namespace qdk::chemistry::utils { +#ifdef _WIN32 +/* +On Windows, spdlog's wincolor_sink and stdout_sink both cache a Windows HANDLE +(via GetStdHandle / _get_osfhandle) at construction time. When pytest's capfd +later redirects fd 1 via dup2(), the cached HANDLE still points to the original +stdout — so all Logger output bypasses capture. + +See spdlog 1.17.0 sources: + https://github.com/gabime/spdlog/blob/v1.17.0/include/spdlog/sinks/wincolor_sink-inl.h#L164-L165 + GetStdHandle(STD_OUTPUT_HANDLE) called once in constructor, stored as + out_handle_ + https://github.com/gabime/spdlog/blob/v1.17.0/include/spdlog/sinks/wincolor_sink-inl.h#L157 + WriteFile(out_handle_, ...) on every write + https://github.com/gabime/spdlog/blob/v1.17.0/include/spdlog/sinks/wincolor_sink.h#L42 + void *out_handle_ member (never refreshed) + +On Linux this problem does not occur because stdout_color_sink_mt is aliased to +ansicolor_stdout_sink, which writes via fwrite(stdout): + https://github.com/gabime/spdlog/blob/v1.17.0/include/spdlog/sinks/ansicolor_sink-inl.h#L123 + https://github.com/gabime/spdlog/blob/v1.17.0/include/spdlog/sinks/stdout_color_sinks.h#L16-L26 + +This sink writes via fwrite(stdout), which goes through the C runtime's fd layer +(fd 1) and respects dup2() redirections. + +Note: unlike ansicolor_sink, this sink does not emit ANSI color codes, so log +output on Windows is uncolored. Colored output could be added by wrapping the +formatted message's color range (msg.color_range_start / color_range_end) in +ANSI escape sequences, as ansicolor_sink does. +See: https://github.com/gabime/spdlog/issues/3138 +*/ +class stdout_fd_sink final : public spdlog::sinks::base_sink { + protected: + void sink_it_(const spdlog::details::log_msg& msg) override { + spdlog::memory_buf_t formatted; + formatter_->format(msg, formatted); + std::fwrite(formatted.data(), 1, formatted.size(), stdout); + std::fflush(stdout); + } + void flush_() override { std::fflush(stdout); } +}; +#endif + // Track our own global level to avoid spdlog::get_level() issues // Map compile-time level to spdlog level static constexpr spdlog::level::level_enum default_level_from_config() { @@ -197,7 +244,13 @@ static void apply_spdlog_global_level_and_flush_policy( static void init_global_logger() { try { +#ifdef _WIN32 + auto sink = std::make_shared(); + g_logger = std::make_shared("qdk-chemistry", sink); + spdlog::register_logger(g_logger); +#else g_logger = spdlog::stdout_color_mt("qdk-chemistry"); +#endif } catch (const spdlog::spdlog_ex&) { g_logger = spdlog::get("qdk-chemistry"); } diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index d4d2f1b59..1a4e78f9d 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -74,8 +74,10 @@ function(add_gtest_executable test_source) if(QDK_CHEMISTRY_ENABLE_COVERAGE) # Handle coverage of inline functions - target_compile_options(${test_name} PRIVATE --coverage -fprofile-arcs -ftest-coverage) - target_link_libraries(${test_name} PRIVATE --coverage) + if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang" AND NOT MSVC) + target_compile_options(${test_name} PRIVATE --coverage -fprofile-arcs -ftest-coverage) + target_link_libraries(${test_name} PRIVATE --coverage) + endif() endif() # Discover tests automatically diff --git a/cpp/tests/test_hamiltonian.cpp b/cpp/tests/test_hamiltonian.cpp index a273ee9ce..66d16660a 100644 --- a/cpp/tests/test_hamiltonian.cpp +++ b/cpp/tests/test_hamiltonian.cpp @@ -1375,29 +1375,33 @@ TEST_F(HamiltonianTest, SparseContainerFCIDUMP) { std::string filename = "test.sparse.hamiltonian.fcidump"; EXPECT_NO_THROW(h.to_fcidump_file(filename, 1, 1)); - std::ifstream file(filename); - EXPECT_TRUE(file.is_open()); - - std::stringstream buffer; - buffer << file.rdbuf(); - std::string fcidump_content = buffer.str(); - - // Two-body integrals from sparse map (sorted by key: (0,0,0,0) then - // (1,1,1,1)), one-body lower triangle in column-major order, then core - // energy. - const std::string reference_fcidump_contents = - "&FCI NORB=2, NELEC=2, MS2=0,\n" - "ORBSYM=1,1,\n" - "ISYM=1,\n" - "&END\n" - " 2.0000000000000000e+00 1 1 1 1\n" - " 3.0000000000000000e+00 2 2 2 2\n" - " 1.0000000000000000e+00 1 1 0 0\n" - " 5.0000000000000000e-01 2 1 0 0\n" - " 1.0000000000000000e+00 2 2 0 0\n" - " 1.5000000000000000e+00 0 0 0 0\n"; - - EXPECT_EQ(fcidump_content, reference_fcidump_contents); + // Scope the ifstream so it closes before remove() — Windows does not + // allow deleting a file while a handle is open. + { + std::ifstream file(filename); + EXPECT_TRUE(file.is_open()); + + std::stringstream buffer; + buffer << file.rdbuf(); + std::string fcidump_content = buffer.str(); + + // Two-body integrals from sparse map (sorted by key: (0,0,0,0) then + // (1,1,1,1)), one-body lower triangle in column-major order, then core + // energy. + const std::string reference_fcidump_contents = + "&FCI NORB=2, NELEC=2, MS2=0,\n" + "ORBSYM=1,1,\n" + "ISYM=1,\n" + "&END\n" + " 2.0000000000000000e+00 1 1 1 1\n" + " 3.0000000000000000e+00 2 2 2 2\n" + " 1.0000000000000000e+00 1 1 0 0\n" + " 5.0000000000000000e-01 2 1 0 0\n" + " 1.0000000000000000e+00 2 2 0 0\n" + " 1.5000000000000000e+00 0 0 0 0\n"; + + EXPECT_EQ(fcidump_content, reference_fcidump_contents); + } std::filesystem::remove(filename); } @@ -1739,13 +1743,17 @@ TEST_F(HamiltonianTest, FCIDUMPActiveSpaceConsistency) { 1); }); - // Verify file was created and has correct NORB (should be 2, not 3) - std::ifstream file("test_active_space.hamiltonian.fcidump"); - EXPECT_TRUE(file.is_open()); + // Verify file was created and has correct NORB (should be 2, not 3). + // Scope the ifstream so it closes before remove() — Windows does not + // allow deleting a file while a handle is open. + { + std::ifstream file("test_active_space.hamiltonian.fcidump"); + EXPECT_TRUE(file.is_open()); - std::string first_line; - std::getline(file, first_line); - EXPECT_TRUE(first_line.find("NORB=2") != std::string::npos); + std::string first_line; + std::getline(file, first_line); + EXPECT_TRUE(first_line.find("NORB=2") != std::string::npos); + } // Clean up std::filesystem::remove("test_active_space.hamiltonian.fcidump"); diff --git a/external/macis/CMakeLists.txt b/external/macis/CMakeLists.txt index 173df3748..e34a3e746 100644 --- a/external/macis/CMakeLists.txt +++ b/external/macis/CMakeLists.txt @@ -55,7 +55,8 @@ if(MACIS_ENABLE_PYTHON) endif() # Tests +option( MACIS_ENABLE_TESTS "Build MACIS Tests" ON ) include(CTest) -if(BUILD_TESTING) +if(BUILD_TESTING AND MACIS_ENABLE_TESTS) add_subdirectory(tests) endif() diff --git a/external/macis/cmake/macis-ips4o.cmake b/external/macis/cmake/macis-ips4o.cmake index e1dfd0db4..55e620330 100644 --- a/external/macis/cmake/macis-ips4o.cmake +++ b/external/macis/cmake/macis-ips4o.cmake @@ -13,4 +13,6 @@ FetchContent_Declare( ips4o FetchContent_MakeAvailable( ips4o ) add_library( ips4o INTERFACE ) target_include_directories( ips4o INTERFACE ${ips4o_SOURCE_DIR} ) -target_link_libraries( ips4o INTERFACE atomic ) +if(NOT WIN32) + target_link_libraries( ips4o INTERFACE atomic ) +endif() diff --git a/external/macis/cmake/macis-spdlog.cmake b/external/macis/cmake/macis-spdlog.cmake index 2358c3a0b..a3df6c483 100644 --- a/external/macis/cmake/macis-spdlog.cmake +++ b/external/macis/cmake/macis-spdlog.cmake @@ -16,7 +16,9 @@ if( NOT spdlog_FOUND ) ) set(SPDLOG_INSTALL "ON" CACHE BOOL "Install SPDLOG" FORCE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") + if(NOT MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") + endif() FetchContent_MakeAvailable( spdlog ) set(MACIS_SPDLOG_EXPORT spdlog CACHE STRING "" FORCE) else() diff --git a/external/macis/cmake/macis-uarch.cmake b/external/macis/cmake/macis-uarch.cmake index 31c6c9b6c..21bb855e5 100644 --- a/external/macis/cmake/macis-uarch.cmake +++ b/external/macis/cmake/macis-uarch.cmake @@ -1,19 +1,24 @@ if(DEFINED MACIS_UARCH) message(STATUS "Using user-defined uarch: ${MACIS_UARCH}") - # If compiler ID is not GNU or Clang, we cannot use -march flag, so we will not set MACIS_UARCH_FLAGS - if(NOT (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang")) - message(WARNING "Compiler ${CMAKE_CXX_COMPILER_ID} does not support -march flag. MACIS_UARCH_FLAGS will not be set.") - set(MACIS_UARCH_USED "NONE" CACHE STRING "User-defined microarchitecture for optimization") - else() - set(MACIS_UARCH_USED ${MACIS_UARCH} CACHE STRING "User-defined microarchitecture for optimization") - set(MACIS_UARCH_FLAGS "-march=${MACIS_UARCH}" CACHE STRING "Compiler flags for user-defined microarchitecture") - endif() else() message(WARNING "MACIS_UARCH not defined. This may degrade performance") set(MACIS_UARCH_USED "NONE" CACHE STRING "User-defined microarchitecture for optimization") + return() +endif() + +# Set MACIS_UARCH_FLAGS based on compiler and uarch +set(MACIS_UARCH_USED ${MACIS_UARCH} CACHE STRING "Target microarchitecture for optimization") +if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang") + set(MACIS_UARCH_FLAGS "-march=${MACIS_UARCH}" CACHE STRING "Compiler flags for target microarchitecture") +elseif(MSVC) + # Users should set MACIS_UARCH to a valid MSVC /arch: argument (e.g. AVX2, AVX512) + set(MACIS_UARCH_FLAGS "/arch:${MACIS_UARCH}" CACHE STRING "Compiler flags for target microarchitecture") +else() + message(WARNING "Compiler ${CMAKE_CXX_COMPILER_ID}: unknown flag syntax for ISA selection. MACIS_UARCH_FLAGS will not be set.") + set(MACIS_UARCH_USED "NONE" CACHE STRING "Target microarchitecture for optimization") endif() -# Test that the produced flags are sane +# Validate the flags if(MACIS_UARCH_FLAGS) message(STATUS "Testing MACIS_UARCH_FLAGS: ${MACIS_UARCH_FLAGS}") include(CheckCXXCompilerFlag) @@ -21,6 +26,6 @@ if(MACIS_UARCH_FLAGS) if(NOT COMPILER_SUPPORTS_MACIS_UARCH_FLAGS) message(WARNING "The compiler does not support the specified MACIS_UARCH_FLAGS: ${MACIS_UARCH_FLAGS}. Unsetting these flags.") unset(MACIS_UARCH_FLAGS CACHE) - set(MACIS_UARCH_USED "NONE" CACHE STRING "User-defined microarchitecture for optimization") + set(MACIS_UARCH_USED "NONE" CACHE STRING "Target microarchitecture for optimization") endif() endif() diff --git a/external/macis/include/macis/bitset_operations.hpp b/external/macis/include/macis/bitset_operations.hpp index 2328ea0f6..19a4fde42 100644 --- a/external/macis/include/macis/bitset_operations.hpp +++ b/external/macis/include/macis/bitset_operations.hpp @@ -17,6 +17,51 @@ namespace macis { +// Portable bit-manipulation helpers +#if defined(_MSC_VER) + +/** + * @brief CLZ (count leading zeros) for MSVC + */ +inline auto clz(unsigned int i) { + if (i == 0) return static_cast(sizeof(unsigned int) * CHAR_BIT); + + unsigned long idx; + const auto scanned = _BitScanReverse(&idx, i); + assert(scanned); + return 31 - static_cast(idx); +} +inline auto clz(unsigned long int i) { + return clz(static_cast(i)); +} +inline auto clz(unsigned long long int i) { + if (i == 0) + return static_cast(sizeof(unsigned long long int) * CHAR_BIT); + + unsigned long idx; + const auto scanned = _BitScanReverse64(&idx, i); + assert(scanned); + return 63 - static_cast(idx); +} + +/** + * @brief ffsl/ffsll equivalents for MSVC + * + * Returns the position of the first set bit (1-indexed), or 0 if none. + */ +inline int macis_ffsl(unsigned long i) { + unsigned long idx; + if (_BitScanForward(&idx, i)) return static_cast(idx) + 1; + return 0; +} +inline int macis_ffsll(unsigned long long i) { + unsigned long idx; + if (_BitScanForward64(&idx, i)) return static_cast(idx) + 1; + return 0; +} + +#else // GCC/Clang + /** * @brief Typesafe CLZ * @@ -47,6 +92,14 @@ inline auto clz(unsigned long int i) { return __builtin_clzl(i); } */ inline auto clz(unsigned long long int i) { return __builtin_clzll(i); } +/** + * @brief ffsl/ffsll wrappers for GCC/Clang + */ +inline int macis_ffsl(unsigned long i) { return ffsl(i); } +inline int macis_ffsll(unsigned long long i) { return ffsll(i); } + +#endif // _MSC_VER + /** * @brief Typesafe FLS * @@ -127,9 +180,18 @@ template uint128_t to_uint128(std::bitset bits) { static_assert(N <= 128, "N > 128"); if constexpr (N == 128) { +#if defined(_MSC_VER) && !defined(__clang__) + uint128_t result; + result.lo = fast_to_ullong(bits); + std::bitset<64> hi_bits; + for (size_t i = 0; i < 64; ++i) hi_bits[i] = bits[i + 64]; + result.hi = fast_to_ullong(hi_bits); + return result; +#else alignas(alignof(uint128_t)) std::bitset cpy = bits; auto _x = reinterpret_cast(&cpy); return *_x; +#endif } else { return fast_to_ullong(bits); } @@ -202,9 +264,9 @@ std::bitset full_mask(size_t i) { template uint32_t ffs(const std::bitset& bits) { if constexpr (N <= 32) - return ffsl(fast_to_ulong(bits)); + return macis_ffsl(fast_to_ulong(bits)); else if constexpr (N <= 64) - return ffsll(fast_to_ullong(bits)); + return macis_ffsll(fast_to_ullong(bits)); else if constexpr (N <= 128) { // For 128-bit, check low 64 bits then high 64 bits std::bitset<64> low_bits; @@ -213,9 +275,9 @@ uint32_t ffs(const std::bitset& bits) { low_bits[i] = bits[i]; if (i < N - 64) high_bits[i] = bits[i + 64]; } - auto low_result = ffsll(fast_to_ullong(low_bits)); + auto low_result = macis_ffsll(fast_to_ullong(low_bits)); if (low_result) return low_result; - auto high_result = ffsll(fast_to_ullong(high_bits)); + auto high_result = macis_ffsll(fast_to_ullong(high_bits)); if (high_result) return high_result + 64; return 0; } else { diff --git a/external/macis/include/macis/types.hpp b/external/macis/include/macis/types.hpp index 1949faed9..83d5ea0bf 100644 --- a/external/macis/include/macis/types.hpp +++ b/external/macis/include/macis/types.hpp @@ -8,7 +8,12 @@ */ #pragma once + +#if defined(_MSC_VER) +#include +#else #include +#endif #include #include @@ -64,11 +69,50 @@ using wfn_t = std::bitset; template using wavefunction_iterator_t = typename std::vector >::iterator; +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(push) +#pragma warning(disable : 4146) // unary minus on unsigned +// MSVC does not support __uint128_t; use a struct-based polyfill +struct uint128_t { + uint64_t lo; + uint64_t hi; + uint128_t() : lo(0), hi(0) {} + uint128_t(uint64_t v) : lo(v), hi(0) {} + bool operator<(const uint128_t& o) const { + return hi < o.hi || (hi == o.hi && lo < o.lo); + } + bool operator==(const uint128_t& o) const { return lo == o.lo && hi == o.hi; } + uint128_t operator<<(int shift) const { + uint128_t r; + if (shift >= 128) { + r.lo = 0; + r.hi = 0; + } else if (shift >= 64) { + r.lo = 0; + r.hi = lo << (shift - 64); + } else if (shift > 0) { + r.lo = lo << shift; + r.hi = (hi << shift) | (lo >> (64 - shift)); + } else { + r.lo = lo; + r.hi = hi; + } + return r; + } + uint128_t& operator|=(const uint128_t& o) { + lo |= o.lo; + hi |= o.hi; + return *this; + } +}; +#pragma warning(pop) +#else #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpedantic" /// @brief 128-bit unsigned integer type using uint128_t = __uint128_t; #pragma GCC diagnostic pop +#endif /** * @brief A type-safe wrapper that associates a value with a specific parameter diff --git a/external/macis/python/tests/test_pymacis.py b/external/macis/python/tests/test_pymacis.py index c0d4056a0..9cce5c759 100644 --- a/external/macis/python/tests/test_pymacis.py +++ b/external/macis/python/tests/test_pymacis.py @@ -336,9 +336,9 @@ def test_write_fcidump_with_threshold(tmp_path): pymacis.write_fcidump(str(output_path2), header, T, V, core_energy, 1e-13) # Read file contents - with open(output_path1, "r") as f: + with open(output_path1, "r", encoding="utf-8") as f: content_tight = f.read() - with open(output_path2, "r") as f: + with open(output_path2, "r", encoding="utf-8") as f: content_loose = f.read() # With tight threshold, small integrals should be present @@ -465,7 +465,7 @@ def test_write_fcidump_file_format(tmp_path): pymacis.write_fcidump(str(output_path), header, T, V, core_energy) # Read file content and verify format - with open(output_path, "r") as f: + with open(output_path, "r", encoding="utf-8") as f: content = f.read() # Check header format @@ -520,7 +520,7 @@ def test_fcidump_format_compatibility(tmp_path): # Manually create FCIDUMP file in indices first format fcidump_indices_first = tmp_path / "test_indices_first.fcidump" - with open(fcidump_indices_first, "w") as f: + with open(fcidump_indices_first, "w", encoding="utf-8") as f: # Write header f.write("&FCI NORB=2,NELEC=2,MS2=0,\n") f.write(" ISYM=1,\n") diff --git a/external/macis/src/lobpcgxx/CMakeLists.txt b/external/macis/src/lobpcgxx/CMakeLists.txt index bb25d79a7..dbb188c26 100644 --- a/external/macis/src/lobpcgxx/CMakeLists.txt +++ b/external/macis/src/lobpcgxx/CMakeLists.txt @@ -16,14 +16,18 @@ endif() set( gpu_backend "none" CACHE STRING "GPU backend to use" FORCE) message( WARNING "Turning off GPU Backend for BLAS++/LAPACK++") set(build_tests OFF CACHE BOOL "Build BLAS++/LAPACK++ tests" FORCE) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") +if(NOT MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") +endif() find_package(lapackpp CONFIG QUIET) if(NOT lapackpp_FOUND ) -include( FetchContent ) + include( FetchContent ) FetchContent_Declare( blaspp GIT_REPOSITORY https://github.com/icl-utk-edu/blaspp.git GIT_TAG v2025.05.28 + PATCH_COMMAND git apply ${CMAKE_CURRENT_SOURCE_DIR}/cmake/blaspp_cache_defs.patch + UPDATE_DISCONNECTED TRUE ) FetchContent_Declare( lapackpp GIT_REPOSITORY https://github.com/icl-utk-edu/lapackpp.git @@ -32,13 +36,23 @@ include( FetchContent ) FetchContent_MakeAvailable( blaspp ) FetchContent_MakeAvailable( lapackpp ) - target_compile_definitions( lapackpp PUBLIC LAPACK_COMPLEX_CPP ) + + if(MSVC) + # lapackpp sources use std::complex directly but on Windows _MSC_VER causes + # config.h to skip #include . Force-include it. + target_compile_options( lapackpp PRIVATE /FIcomplex ) + + # Suppress all warnings from third-party blaspp/lapackpp (clang-cl) + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + target_compile_options( blaspp PRIVATE -w ) + target_compile_options( lapackpp PRIVATE -w ) + endif() + endif() endif() add_library( lobpcgxx INTERFACE ) target_link_libraries( lobpcgxx INTERFACE blaspp lapackpp ) target_compile_features( lobpcgxx INTERFACE cxx_std_20 ) -target_compile_definitions( lobpcgxx INTERFACE LAPACK_COMPLEX_CPP ) target_include_directories( lobpcgxx INTERFACE $ diff --git a/external/macis/src/lobpcgxx/cmake/blaspp_cache_defs.patch b/external/macis/src/lobpcgxx/cmake/blaspp_cache_defs.patch new file mode 100644 index 000000000..352736a7c --- /dev/null +++ b/external/macis/src/lobpcgxx/cmake/blaspp_cache_defs.patch @@ -0,0 +1,33 @@ +# blaspp bug: blaspp_defs_ (e.g. -DBLAS_FORTRAN_ADD_) is never cached. +# On reconfigure the detection is skipped (run_=false), blaspp_defs_ is empty, +# and blaspp_defines gets overwritten with empty (CACHE INTERNAL implies FORCE), +# causing defines.h to lose all BLAS mangling definitions. +# Fix: cache blaspp_defs_ and restore it in the two cached-settings paths. +diff --git a/cmake/BLASFinder.cmake b/cmake/BLASFinder.cmake +index 2994f85..fa494c9 100644 +--- a/cmake/BLASFinder.cmake ++++ b/cmake/BLASFinder.cmake +@@ -26,6 +26,7 @@ if (BLAS_LIBRARIES) + # Already checked this BLAS_LIBRARIES; load cached results. + message( STATUS "Using cached BLAS_LIBRARIES settings" ) + set( BLAS_FOUND "${blas_found_cached}" ) ++ set( blaspp_defs_ "${blaspp_defs_cached}" ) + set( run_ false ) + else() + # Need to check BLAS_LIBRARIES. +@@ -42,6 +43,7 @@ elseif ( "${blas}" STREQUAL "${blas_cached}" + message( STATUS "Using cached blas settings" ) + set( BLAS_LIBRARIES "${blas_libraries_cached}" ) + set( BLAS_FOUND "${blas_found_cached}" ) ++ set( blaspp_defs_ "${blaspp_defs_cached}" ) + set( run_ false ) + else() + # Search blas, blas_int, etc. +@@ -549,6 +551,7 @@ set( blas_fortran_cached "${blas_fortran}" CACHE INTERNAL "" ) + set( blas_int_cached "${blas_int}" CACHE INTERNAL "" ) + set( blas_threaded_cached "${blas_threaded}" CACHE INTERNAL "" ) + ++set( blaspp_defs_cached "${blaspp_defs_}" CACHE INTERNAL "" ) + endif() # run_ + #=============================================================================== + diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 76e01f925..92ec0d457 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -85,6 +85,19 @@ find_package(Python 3.10 REQUIRED COMPONENTS Interpreter Development.Module) # Find pybind11 - tell it to use the FindPython we already called set(PYBIND11_FINDPYTHON ON) +# When the vcpkg toolchain is active its find_package wrapper may not search the isolated build environment where pip +# installed pybind11. Ask pybind11 itself for its CMake config directory so the wrapper can find it. +if(NOT pybind11_DIR) + execute_process( + COMMAND "${Python_EXECUTABLE}" -m pybind11 --cmakedir + OUTPUT_VARIABLE _pybind11_cmakedir + OUTPUT_STRIP_TRAILING_WHITESPACE + ERROR_QUIET + RESULT_VARIABLE _pybind11_result) + if(_pybind11_result EQUAL 0 AND _pybind11_cmakedir) + set(pybind11_DIR "${_pybind11_cmakedir}" CACHE PATH "" FORCE) + endif() +endif() find_package(pybind11 REQUIRED) # Create the Python extension module @@ -137,8 +150,12 @@ target_compile_features(_core PUBLIC cxx_std_20) if(QDK_CHEMISTRY_ENABLE_COVERAGE) if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") message(STATUS "Enabling coverage build") - target_compile_options(_core PRIVATE --coverage -fprofile-arcs -ftest-coverage) - target_link_libraries(_core PRIVATE --coverage) + if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang" AND NOT MSVC) + target_compile_options(_core PRIVATE --coverage -fprofile-arcs -ftest-coverage) + target_link_libraries(_core PRIVATE --coverage) + else() + message(WARNING "Coverage build is only supported with GCC or Clang compilers") + endif() else() message(FATAL_ERROR "Coverage build is only supported in CMAKE_BUILD_TYPE=Debug or RelWithDebInfo mode") endif() @@ -172,13 +189,50 @@ target_compile_definitions(_core PRIVATE VERSION_INFO=${PROJECT_VERSION}) set_target_properties(_core PROPERTIES CXX_VISIBILITY_PRESET "hidden" VISIBILITY_INLINES_HIDDEN YES - BUILD_RPATH "$ORIGIN:$ORIGIN/.." - INSTALL_RPATH "$ORIGIN:$ORIGIN/.." - BUILD_RPATH_USE_ORIGIN ON ) +if(NOT WIN32) + set_target_properties(_core PROPERTIES + BUILD_RPATH "$ORIGIN:$ORIGIN/.." + INSTALL_RPATH "$ORIGIN:$ORIGIN/.." + BUILD_RPATH_USE_ORIGIN ON + ) +endif() # Install the Python extension module install(TARGETS _core LIBRARY DESTINATION . COMPONENT _core ) + +# Windows (Python 3.8+): DLL dependencies of .pyd files are only found if they +# reside in the same directory as the .pyd or are registered via os.add_dll_directory(). +# PATH is NOT searched. When vcpkg provides dynamic libraries (x64-windows triplet), +# copy the runtime DLLs next to _core.pyd so the package works out of the box. +# With static triplets (x64-windows-static-md) all dependency code is linked directly +# into _core.pyd, so no bundling is needed. +if(VCPKG_LIBRARY_LINKAGE STREQUAL "static") + set(_bundle_dlls_default OFF) +else() + set(_bundle_dlls_default ON) +endif() +option(QDK_BUNDLE_RUNTIME_DLLS "Bundle vcpkg and OpenMP DLLs into the install tree (Windows only)" ${_bundle_dlls_default}) + +if(WIN32 AND QDK_BUNDLE_RUNTIME_DLLS) + message(STATUS "Bundling runtime DLLs into install tree") + # -- vcpkg dependencies (openblas, hdf5, fmt, spdlog and their transitive deps) -- + if(VCPKG_INSTALLED_DIR AND VCPKG_TARGET_TRIPLET) + set(_vcpkg_bin "${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/bin") + set(_vcpkg_runtime_dlls) + foreach(_dll fmt hdf5 hdf5_cpp openblas spdlog szip zlib1) + if(EXISTS "${_vcpkg_bin}/${_dll}.dll") + list(APPEND _vcpkg_runtime_dlls "${_vcpkg_bin}/${_dll}.dll") + endif() + endforeach() + if(_vcpkg_runtime_dlls) + install(FILES ${_vcpkg_runtime_dlls} + DESTINATION . + COMPONENT _core + ) + endif() + endif() +endif() diff --git a/python/pyproject.toml b/python/pyproject.toml index 9f0268dc5..24b4bd1d8 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -60,6 +60,7 @@ dev = [ "mypy", "nbclient", "nbformat", + "pre-commit", "pytest", "pytest-env", "ruff" @@ -243,7 +244,7 @@ cmake.build-type = "Release" # Build in current directory (python/) cmake.source-dir = "." # Enable verbose output for debugging -cmake.verbose = true +build.verbose = true install.components = ["_core", "qdk_chemistry_resources"] metadata.version.provider = "scikit_build_core.metadata.regex" metadata.version.input = "../VERSION" diff --git a/python/src/qdk_chemistry/__init__.py b/python/src/qdk_chemistry/__init__.py index f5b7d9c7e..364aa76b8 100644 --- a/python/src/qdk_chemistry/__init__.py +++ b/python/src/qdk_chemistry/__init__.py @@ -5,16 +5,40 @@ # Licensed under the MIT License. See LICENSE.txt in the project root for license information. # -------------------------------------------------------------------------------------------- +# On Windows, native DLL dependencies (OpenBLAS, HDF5, etc.) may not be on the default DLL search path. +# Register additional directories *before* any import of the C++ extension module (_core). +import os as _os +import sys as _sys from importlib.metadata import PackageNotFoundError as _PackageNotFoundError from importlib.metadata import version as _get_version from pathlib import Path +# Ensure UTF-8 encoding for stdout/stderr on all platforms (especially Windows). +# Q# circuit diagrams contain special characters that cannot be encoded in Windows' default cp1252 encoding. +if hasattr(_sys.stdout, "reconfigure"): + _sys.stdout.reconfigure(encoding="utf-8") +if hasattr(_sys.stderr, "reconfigure"): + _sys.stderr.reconfigure(encoding="utf-8") + +if _sys.platform == "win32": + # Allow users / CI to point to extra DLL directories via a semicolon- + # separated environment variable (e.g. the vcpkg bin directory). + # Note: DLLs bundled next to _core.pyd are found automatically by the + # Windows loader, so no add_dll_directory() is needed for those. + _dll_dirs = _os.environ.get("QDK_DLL_DIR", "") + _dll_dir_handles = [] + for _d in _dll_dirs.split(";"): + _d = _d.strip() + if _d and _os.path.isdir(_d): + _dll_dir_handles.append(_os.add_dll_directory(_d)) # type: ignore[attr-defined] + del _dll_dirs + try: __version__ = _get_version("qdk-chemistry") except _PackageNotFoundError: # Fallback for development/uninstalled use - read from VERSION file try: - __version__ = (Path(__file__).parent.parent.parent.parent / "VERSION").read_text().strip() + __version__ = (Path(__file__).parent.parent.parent.parent / "VERSION").read_text(encoding="utf-8").strip() except (OSError, UnicodeDecodeError): # VERSION file not reachable or unreadable (e.g. vendored copy without repo root) __version__ = "0.0.0.dev0" @@ -133,7 +157,7 @@ def _is_placeholder_stub(stub_file: Path) -> bool: if not stub_file.exists(): return True try: - content = stub_file.read_text() + content = stub_file.read_text(encoding="utf-8") return "placeholder" in content.lower() except (OSError, PermissionError): return False @@ -149,7 +173,7 @@ def _update_stub_references(stub_file: Path) -> None: Also adds necessary imports if they don't exist. """ try: - content = stub_file.read_text() + content = stub_file.read_text(encoding="utf-8") original_content = content needs_data_import = False needs_algorithms_import = False @@ -197,7 +221,7 @@ def _update_stub_references(stub_file: Path) -> None: lines[import_section_end:import_section_end] = new_imports content = "\n".join(lines) - stub_file.write_text(content) + stub_file.write_text(content, encoding="utf-8") except (OSError, PermissionError): pass # Skip files that can't be read/written @@ -241,6 +265,7 @@ def _generate_stubs_on_first_import() -> None: check=False, capture_output=True, text=True, + encoding="utf-8", cwd=str(chemistry_dir), ) @@ -361,7 +386,7 @@ def _generate_registry_stubs() -> None: ) overload_code = "\n".join(overloads) - stub_file.write_text(overload_code) + stub_file.write_text(overload_code, encoding="utf-8") except (ImportError, AttributeError, RuntimeError, OSError) as e: # Log but don't fail - type stubs are optional diff --git a/python/src/qdk_chemistry/data/base.py b/python/src/qdk_chemistry/data/base.py index 86f038616..b4ef43595 100644 --- a/python/src/qdk_chemistry/data/base.py +++ b/python/src/qdk_chemistry/data/base.py @@ -312,7 +312,7 @@ def to_json_file(self, filename: str | Path) -> None: """ if self._data_type_name: _validate_filename_suffix(filename, self._data_type_name, "write") - with Path(filename).open("w") as f: + with Path(filename).open("w", encoding="utf-8") as f: json.dump(self.to_json(), f, indent=2) def to_hdf5_file(self, filename: str | Path) -> None: @@ -403,7 +403,7 @@ def from_json_file(cls, filename: str | Path) -> "DataClass": """ if cls._data_type_name: _validate_filename_suffix(filename, cls._data_type_name, "read") - with Path(filename).open("r") as f: + with Path(filename).open("r", encoding="utf-8") as f: json_data = json.load(f) return cls.from_json(json_data) diff --git a/python/src/qdk_chemistry/data/noise_models.py b/python/src/qdk_chemistry/data/noise_models.py index aed94dea5..64c0ea73d 100644 --- a/python/src/qdk_chemistry/data/noise_models.py +++ b/python/src/qdk_chemistry/data/noise_models.py @@ -228,7 +228,7 @@ def to_yaml_file(self, yaml_file: str | Path) -> None: # Convert to serializable dict data = self.to_json() - with Path(yaml_file).open("w") as f: + with Path(yaml_file).open("w", encoding="utf-8") as f: yaml.dump(data, f) @classmethod @@ -246,7 +246,7 @@ def from_yaml_file(cls, yaml_file: str | Path) -> "QuantumErrorProfile": if not Path(yaml_file).exists(): raise FileNotFoundError(f"File {yaml_file} not found") - with Path(yaml_file).open("r") as f: + with Path(yaml_file).open("r", encoding="utf-8") as f: data = yaml.load(f) if not isinstance(data, dict): diff --git a/python/src/qdk_chemistry/utils/cubegen.py b/python/src/qdk_chemistry/utils/cubegen.py index 6335cc7b9..410286b20 100644 --- a/python/src/qdk_chemistry/utils/cubegen.py +++ b/python/src/qdk_chemistry/utils/cubegen.py @@ -73,7 +73,7 @@ def _generate_cube(coeff, label): cubegen.orbital(mol, outfile=outfile_name, coeff=coeff, nx=nx, ny=ny, nz=nz, margin=margin) if output_folder is None: - with open(outfile_name) as f: + with open(outfile_name, encoding="utf-8") as f: assert isinstance(cubefile_paths, dict) cubefile_paths[label.replace(".cube", "")] = f.read() os.remove(outfile_name) diff --git a/python/src/qdk_chemistry/utils/qsharp/__init__.py b/python/src/qdk_chemistry/utils/qsharp/__init__.py index 3cbe37545..3caad9025 100644 --- a/python/src/qdk_chemistry/utils/qsharp/__init__.py +++ b/python/src/qdk_chemistry/utils/qsharp/__init__.py @@ -24,7 +24,7 @@ def get_qsharp_utils(): try: return qdk.code.QDKChemistry.Utils except AttributeError: - code = "\n".join(f.read_text() for f in _QS_FILES) + code = "\n".join(f.read_text(encoding="utf-8") for f in _QS_FILES) qsharp.eval(code) return qdk.code.QDKChemistry.Utils diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 910845905..a937a6f63 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -6,13 +6,25 @@ # -------------------------------------------------------------------------------------------- import os +import sys # Disable telemetry before any qdk_chemistry imports. # Uses setdefault so an explicit env override is still respected. os.environ.setdefault("QSHARP_PYTHON_TELEMETRY", "false") +# Ensure UTF-8 output on all platforms, especially Windows (for circuit diagrams with special characters) +if hasattr(sys.stdout, "reconfigure"): + sys.stdout.reconfigure(encoding="utf-8") # type: ignore +if hasattr(sys.stderr, "reconfigure"): + sys.stderr.reconfigure(encoding="utf-8") # type: ignore + +# Use non-interactive matplotlib backend to avoid Tk/display dependencies in tests. +# Must be set before any matplotlib.pyplot import. +import matplotlib + +matplotlib.use("Agg") + import platform as plt -import sys import tempfile from pathlib import Path diff --git a/python/tests/test_basis_set.py b/python/tests/test_basis_set.py index d5111a19a..e1df52440 100644 --- a/python/tests/test_basis_set.py +++ b/python/tests/test_basis_set.py @@ -343,7 +343,11 @@ def test_json_serialization(): assert basis_in.get_num_shells() == 2 assert basis_in.get_num_atomic_orbitals() == 4 - # Test file-based serialization + # Test file-based serialization. + # NOTE: Use delete=False so the file handle is closed when the with-block exits, allowing C++ code to open it. + # On Windows the default (delete=True) keeps an exclusive lock on the file. We delete the file manually in `finally` + # via Path.unlink(). This pattern is used throughout the test suite. + # NOTE: Python 3.12+ supports `delete=False, delete_on_close=True` which would avoid the manual unlink() at the end. with tempfile.NamedTemporaryFile(suffix=".basis_set.json", mode="w", delete=False) as tmp: filename = tmp.name @@ -357,7 +361,7 @@ def test_json_serialization(): assert basis_file.get_num_shells() == 2 assert basis_file.get_num_atomic_orbitals() == 4 finally: - Path(filename).unlink() + Path(filename).unlink(missing_ok=True) def test_hdf5_serialization(): @@ -540,9 +544,10 @@ def test_basis_set_file_io_generic(): basis = BasisSet("STO-3G", shells) # Test JSON file I/O - with tempfile.NamedTemporaryFile(suffix=".basis_set.json") as tmp_json: + with tempfile.NamedTemporaryFile(suffix=".basis_set.json", delete=False) as tmp_json: json_filename = tmp_json.name + try: # Save using generic method basis.to_file(json_filename, "json") @@ -554,11 +559,14 @@ def test_basis_set_file_io_generic(): assert basis2.get_num_shells() == basis.get_num_shells() assert basis2.get_num_atomic_orbitals() == basis.get_num_atomic_orbitals() assert basis2.get_num_atoms() == basis.get_num_atoms() + finally: + Path(json_filename).unlink(missing_ok=True) # Test HDF5 file I/O - with tempfile.NamedTemporaryFile(suffix=".basis_set.h5") as tmp_hdf5: + with tempfile.NamedTemporaryFile(suffix=".basis_set.h5", delete=False) as tmp_hdf5: hdf5_filename = tmp_hdf5.name + try: # Save using generic method basis.to_file(hdf5_filename, "hdf5") @@ -570,6 +578,8 @@ def test_basis_set_file_io_generic(): assert basis3.get_num_shells() == basis.get_num_shells() assert basis3.get_num_atomic_orbitals() == basis.get_num_atomic_orbitals() assert basis3.get_num_atoms() == basis.get_num_atoms() + finally: + Path(hdf5_filename).unlink(missing_ok=True) # Test unsupported file type with pytest.raises(RuntimeError, match="Unsupported file type"): @@ -598,9 +608,10 @@ def test_basis_set_hdf5_specific(): basis = BasisSet("6-31G", shells) # Test new to_hdf5_file method - with tempfile.NamedTemporaryFile(suffix=".basis_set.h5") as tmp_hdf5: + with tempfile.NamedTemporaryFile(suffix=".basis_set.h5", delete=False) as tmp_hdf5: hdf5_filename = tmp_hdf5.name + try: # Save using new method basis.to_hdf5_file(hdf5_filename) @@ -633,6 +644,8 @@ def test_basis_set_hdf5_specific(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(hdf5_filename).unlink(missing_ok=True) def test_basis_set_json_specific(): @@ -654,9 +667,10 @@ def test_basis_set_json_specific(): basis = BasisSet("cc-pVDZ", shells, AOType.Cartesian) # Test updated JSON file I/O methods - with tempfile.NamedTemporaryFile(suffix=".basis_set.json") as tmp_json: + with tempfile.NamedTemporaryFile(suffix=".basis_set.json", delete=False) as tmp_json: json_filename = tmp_json.name + try: # Save using to_json_file method basis.to_json_file(json_filename) @@ -690,6 +704,8 @@ def test_basis_set_json_specific(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(json_filename).unlink(missing_ok=True) def test_basis_set_file_io_validation(): @@ -755,9 +771,10 @@ def test_basis_set_file_io_round_trip(): basis = BasisSet("complex-basis", shells, AOType.Spherical) # Test JSON round-trip - with tempfile.NamedTemporaryFile(suffix=".basis_set.json") as tmp_json: + with tempfile.NamedTemporaryFile(suffix=".basis_set.json", delete=False) as tmp_json: json_filename = tmp_json.name + try: # Save and reload basis.to_json_file(json_filename) basis_json = BasisSet.from_json_file(json_filename) @@ -789,11 +806,14 @@ def test_basis_set_file_io_round_trip(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(json_filename).unlink(missing_ok=True) # Test HDF5 round-trip - with tempfile.NamedTemporaryFile(suffix=".basis_set.h5") as tmp_hdf5: + with tempfile.NamedTemporaryFile(suffix=".basis_set.h5", delete=False) as tmp_hdf5: hdf5_filename = tmp_hdf5.name + try: # Save and reload basis.to_hdf5_file(hdf5_filename) basis_hdf5 = BasisSet.from_hdf5_file(hdf5_filename) @@ -823,6 +843,8 @@ def test_basis_set_file_io_round_trip(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(hdf5_filename).unlink(missing_ok=True) def test_basis_set_consistency_between_methods(): diff --git a/python/tests/test_circuit.py b/python/tests/test_circuit.py index 58637a038..4ab868faa 100644 --- a/python/tests/test_circuit.py +++ b/python/tests/test_circuit.py @@ -283,7 +283,7 @@ def test_to_json_file(self, simple_qasm, simple_qir): try: circuit.to_json_file(tmp_path) - with open(tmp_path) as f: + with open(tmp_path, encoding="utf-8") as f: loaded_data = json.load(f) assert "qasm" in loaded_data diff --git a/python/tests/test_docs_examples.py b/python/tests/test_docs_examples.py index 65ba1bcea..838391dd2 100644 --- a/python/tests/test_docs_examples.py +++ b/python/tests/test_docs_examples.py @@ -11,6 +11,7 @@ import sys import unittest from pathlib import Path +from tempfile import TemporaryDirectory from typing import ClassVar from qdk_chemistry.plugins.qiskit import ( @@ -39,7 +40,7 @@ def check_example_requirements(example_file: Path) -> tuple[bool, bool, bool, bo requires_openfermion, is_slow) """ - content = example_file.read_text() + content = example_file.read_text(encoding="utf-8") requires_pyscf = False requires_qiskit = False @@ -141,18 +142,22 @@ def setUpClass(cls): def _run_python_example(self, example_file: Path): """Helper method to run a Python example file.""" + tmpdir = TemporaryDirectory(dir=example_file.parent.parent) result = subprocess.run( [sys.executable, str(example_file)], check=False, capture_output=True, text=True, + encoding="utf-8", timeout=360, - cwd=example_file.parent, + cwd=tmpdir.name, + env={**os.environ, "PYTHONIOENCODING": "utf-8"}, ) - assert result.returncode == 0, ( - f"Example {example_file.name} failed:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" - ) + if result.returncode == 0: + tmpdir.cleanup() + else: + self.fail(f"Example {example_file.name} failed:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}") # Dynamically create test methods for each example file diff --git a/python/tests/test_energy_estimator.py b/python/tests/test_energy_estimator.py index 8fac6059f..233f96b16 100644 --- a/python/tests/test_energy_estimator.py +++ b/python/tests/test_energy_estimator.py @@ -227,7 +227,7 @@ def test_measurement_data_to_json(): # Verify file was created and contains expected structure assert Path(temp_path).exists() - with open(temp_path) as f: + with open(temp_path, encoding="utf-8") as f: data = json.load(f) assert isinstance(data, dict) # Should have one entry for the pauli group plus version field diff --git a/python/tests/test_energy_expectation_result.py b/python/tests/test_energy_expectation_result.py index aa37d48ed..f7ac3d2d7 100644 --- a/python/tests/test_energy_expectation_result.py +++ b/python/tests/test_energy_expectation_result.py @@ -69,7 +69,7 @@ def test_energy_expectation_result_json_file_io(): assert Path(filename).exists() # Verify file contents - with open(filename) as f: + with open(filename, encoding="utf-8") as f: data = json.load(f) assert data["energy_expectation_value"] == -1.0 diff --git a/python/tests/test_estimator_data.py b/python/tests/test_estimator_data.py index ee8927646..b24450f7f 100644 --- a/python/tests/test_estimator_data.py +++ b/python/tests/test_estimator_data.py @@ -49,7 +49,7 @@ def test_measurement_data_serialization(): tmpfile_path = tmpfile.name # Load from json file and verify contents - with open(tmpfile_path) as f: + with open(tmpfile_path, encoding="utf-8") as f: data = json.load(f) assert data == measurement_data_dict diff --git a/python/tests/test_mc.py b/python/tests/test_mc.py index 97fb01035..e23d58aa0 100644 --- a/python/tests/test_mc.py +++ b/python/tests/test_mc.py @@ -18,6 +18,13 @@ float_comparison_relative_tolerance, ) +try: + import pyscf # noqa: F401 + + PYSCF_AVAILABLE = True +except ImportError: + PYSCF_AVAILABLE = False + def create_water_structure(): """Create a water molecule structure. @@ -86,6 +93,7 @@ def test_mc_calculator_water_fci(self): ) assert wfn_fci.size() == 441 + @pytest.mark.skipif(not PYSCF_AVAILABLE, reason="PySCF not available") def test_mc_cas_entropies_doublet(self): """Test MACIS CAS entropy evaluation on NO doublet with full active space.""" # Create NO molecule @@ -166,7 +174,7 @@ def test_mc_cas_entropies_singlet(self): """Test MACIS CAS entropy evaluation on H2O singlet with full active space.""" h2o = create_water_structure() - # use pyscf for ROHF + # use default SCF solver for RHF scf_solver = algorithms.create("scf_solver") scf_solver.settings().set("scf_type", "restricted") mc_calculator = algorithms.create("multi_configuration_calculator", "macis_cas") diff --git a/python/tests/test_measurement_data.py b/python/tests/test_measurement_data.py index f0a18b3e9..fa7839ab8 100644 --- a/python/tests/test_measurement_data.py +++ b/python/tests/test_measurement_data.py @@ -89,7 +89,7 @@ def test_measurement_data_json_file_io(): assert Path(filename).exists() # Verify file contents - with open(filename) as f: + with open(filename, encoding="utf-8") as f: loaded = json.load(f) assert "0" in loaded diff --git a/python/tests/test_noise_models.py b/python/tests/test_noise_models.py index 3ee187829..80ae6bb96 100644 --- a/python/tests/test_noise_models.py +++ b/python/tests/test_noise_models.py @@ -5,6 +5,9 @@ # Licensed under the MIT License. See LICENSE.txt in the project root for license information. # -------------------------------------------------------------------------------------------- +import os +import subprocess +import sys import tempfile from pathlib import Path @@ -24,16 +27,67 @@ def test_profile_dumping(simple_error_profile): """Test dumping quantum error profile to YAML.""" - with tempfile.NamedTemporaryFile() as tmp_file: - simple_error_profile.to_yaml_file(tmp_file.name) + # NOTE: Use delete=False so the file handle is closed when the with-block exits, allowing C++ code to open it. + # On Windows the default (delete=True) keeps an exclusive lock on the file. We delete the file manually in `finally` + # via Path.unlink(). This pattern is used throughout the test suite. + # NOTE: Python 3.12+ supports `delete=False, delete_on_close=True` which would avoid the manual unlink() at the end. + with tempfile.NamedTemporaryFile(delete=False) as tmp_file: + filename = tmp_file.name + try: + simple_error_profile.to_yaml_file(filename) + finally: + Path(filename).unlink(missing_ok=True) def test_yaml_save_and_load_equivalence(simple_error_profile): """Test that a saved error profile gives the same values on loading.""" - with tempfile.NamedTemporaryFile() as tmp_file: - simple_error_profile.to_yaml_file(tmp_file.name) - loaded_profile = QuantumErrorProfile.from_yaml_file(tmp_file.name) + with tempfile.NamedTemporaryFile(delete=False) as tmp_file: + filename = tmp_file.name + try: + simple_error_profile.to_yaml_file(filename) + loaded_profile = QuantumErrorProfile.from_yaml_file(filename) assert simple_error_profile == loaded_profile + finally: + Path(filename).unlink(missing_ok=True) + + +def test_yaml_unicode_round_trip_and_stdout(tmp_path): + """Test Unicode YAML round-trip and subprocess stdout decoding.""" + script_path = tmp_path / "utf8_roundtrip.py" + yaml_path = tmp_path / "unicode.quantum_error_profile.yaml" + script_path.write_text( + "\n".join( + [ + "from pathlib import Path", + "import sys", + "from qdk_chemistry.data.noise_models import QuantumErrorProfile", + "", + "yaml_path = Path(sys.argv[1])", + "profile = QuantumErrorProfile(", + " name='φ-profile Å',", + " description='ΔE αβγ 你好',", + " errors={'h': {'depolarizing_error': 0.01}},", + ")", + "profile.to_yaml_file(yaml_path)", + "loaded = QuantumErrorProfile.from_yaml_file(yaml_path)", + "assert loaded.name == 'φ-profile Å'", + "assert loaded.description == 'ΔE αβγ 你好'", + "print('UTF-8 ok: φ Å ΔE αβγ 你好')", + ] + ), + encoding="utf-8", + ) + + result = subprocess.run( + [sys.executable, str(script_path), str(yaml_path)], + capture_output=True, + text=True, + encoding="utf-8", + check=True, + env={**os.environ, "PYTHONIOENCODING": "utf-8"}, + ) + + assert "UTF-8 ok: φ Å ΔE αβγ 你好" in result.stdout def test_basis_gates(simple_error_profile): diff --git a/python/tests/test_orbitals.py b/python/tests/test_orbitals.py index 81c4d4fed..990451319 100644 --- a/python/tests/test_orbitals.py +++ b/python/tests/test_orbitals.py @@ -9,6 +9,7 @@ import pickle import re import tempfile +from pathlib import Path import numpy as np import pytest @@ -202,8 +203,14 @@ def test_json_serialization(): ) # Test file-based serialization - with tempfile.NamedTemporaryFile(suffix=".orbitals.json") as tmp: + # NOTE: Use delete=False so the file handle is closed when the with-block exits, allowing C++ code to open it. + # On Windows the default (delete=True) keeps an exclusive lock on the file. We delete the file manually in `finally` + # via Path.unlink(). This pattern is used throughout the test suite. + # NOTE: Python 3.12+ supports `delete=False, delete_on_close=True` which would avoid the manual unlink() at the end. + with tempfile.NamedTemporaryFile(suffix=".orbitals.json", delete=False) as tmp: filename = tmp.name + + try: orb_out.to_json_file(filename) orb_file = Orbitals.from_json_file(filename) @@ -221,6 +228,8 @@ def test_json_serialization(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(filename).unlink(missing_ok=True) def test_hdf5_serialization(): @@ -231,29 +240,32 @@ def test_hdf5_serialization(): basis_set = create_test_basis_set(3, "test-hdf5-serialization") orb_out = Orbitals(coeffs, energies, overlap, basis_set) + with tempfile.NamedTemporaryFile(suffix=".orbitals.h5", delete=False) as tmp: + filename = tmp.name + try: - with tempfile.NamedTemporaryFile(suffix=".orbitals.h5") as tmp: - filename = tmp.name - orb_out.to_hdf5_file(filename) - - orb_in = Orbitals.from_hdf5_file(filename) - - coeffs_out_a, coeffs_out_b = orb_out.get_coefficients() - coeffs_in_a, coeffs_in_b = orb_in.get_coefficients() - assert np.allclose( - coeffs_out_a, - coeffs_in_a, - rtol=float_comparison_relative_tolerance, - atol=float_comparison_absolute_tolerance, - ) - assert np.allclose( - coeffs_out_b, - coeffs_in_b, - rtol=float_comparison_relative_tolerance, - atol=float_comparison_absolute_tolerance, - ) + orb_out.to_hdf5_file(filename) + + orb_in = Orbitals.from_hdf5_file(filename) + + coeffs_out_a, coeffs_out_b = orb_out.get_coefficients() + coeffs_in_a, coeffs_in_b = orb_in.get_coefficients() + assert np.allclose( + coeffs_out_a, + coeffs_in_a, + rtol=float_comparison_relative_tolerance, + atol=float_comparison_absolute_tolerance, + ) + assert np.allclose( + coeffs_out_b, + coeffs_in_b, + rtol=float_comparison_relative_tolerance, + atol=float_comparison_absolute_tolerance, + ) except RuntimeError as e: pytest.skip(f"HDF5 test skipped - {e!s}") + finally: + Path(filename).unlink(missing_ok=True) def test_complete_orbitals_workflow(): @@ -268,8 +280,10 @@ def test_complete_orbitals_workflow(): assert orb.get_num_molecular_orbitals() == 2 assert orb.is_restricted() - with tempfile.NamedTemporaryFile(suffix=".orbitals.json") as tmp_json: + with tempfile.NamedTemporaryFile(suffix=".orbitals.json", delete=False) as tmp_json: json_filename = tmp_json.name + + try: orb.to_json_file(json_filename) orb2 = Orbitals.from_json_file(json_filename) assert orb2.get_num_atomic_orbitals() == orb.get_num_atomic_orbitals() @@ -288,6 +302,8 @@ def test_complete_orbitals_workflow(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(json_filename).unlink(missing_ok=True) def test_orbitals_file_io_generic(): @@ -299,9 +315,10 @@ def test_orbitals_file_io_generic(): orb = Orbitals(coeffs, energies, overlap, basis_set) # Test JSON file I/O - with tempfile.NamedTemporaryFile(suffix=".orbitals.json") as tmp_json: + with tempfile.NamedTemporaryFile(suffix=".orbitals.json", delete=False) as tmp_json: json_filename = tmp_json.name + try: # Save using generic method orb.to_file(json_filename, "json") @@ -327,11 +344,14 @@ def test_orbitals_file_io_generic(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(json_filename).unlink(missing_ok=True) # Test HDF5 file I/O - with tempfile.NamedTemporaryFile(suffix=".orbitals.h5") as tmp_hdf5: + with tempfile.NamedTemporaryFile(suffix=".orbitals.h5", delete=False) as tmp_hdf5: hdf5_filename = tmp_hdf5.name + try: # Save using generic method orb.to_file(hdf5_filename, "hdf5") @@ -357,6 +377,8 @@ def test_orbitals_file_io_generic(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(hdf5_filename).unlink(missing_ok=True) # Test unsupported file type with pytest.raises(RuntimeError, match="Unsupported file type"): @@ -375,9 +397,10 @@ def test_orbitals_hdf5_specific(): orb = Orbitals(coeffs, energies, overlap, basis_set) # Test HDF5 file I/O methods - with tempfile.NamedTemporaryFile(suffix=".orbitals.h5") as tmp_hdf5: + with tempfile.NamedTemporaryFile(suffix=".orbitals.h5", delete=False) as tmp_hdf5: hdf5_filename = tmp_hdf5.name + try: # Save using new method orb.to_hdf5_file(hdf5_filename) @@ -427,11 +450,14 @@ def test_orbitals_hdf5_specific(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(hdf5_filename).unlink(missing_ok=True) # Test HDF5 file I/O methods work correctly - with tempfile.NamedTemporaryFile(suffix=".orbitals.h5") as tmp_hdf5: + with tempfile.NamedTemporaryFile(suffix=".orbitals.h5", delete=False) as tmp_hdf5: hdf5_filename = tmp_hdf5.name + try: # Save using method orb.to_hdf5_file(hdf5_filename) @@ -441,6 +467,8 @@ def test_orbitals_hdf5_specific(): # Check equality assert orb3.get_num_atomic_orbitals() == orb.get_num_atomic_orbitals() assert orb3.get_num_molecular_orbitals() == orb.get_num_molecular_orbitals() + finally: + Path(hdf5_filename).unlink(missing_ok=True) def test_orbitals_file_io_validation(): @@ -491,9 +519,10 @@ def test_orbitals_file_io_round_trip(): ) # Test JSON round-trip - with tempfile.NamedTemporaryFile(suffix=".orbitals.json") as tmp_json: + with tempfile.NamedTemporaryFile(suffix=".orbitals.json", delete=False) as tmp_json: json_filename = tmp_json.name + try: # Save and reload orb.to_json_file(json_filename) orb_json = Orbitals.from_json_file(json_filename) @@ -541,11 +570,14 @@ def test_orbitals_file_io_round_trip(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(json_filename).unlink(missing_ok=True) # Test HDF5 round-trip - with tempfile.NamedTemporaryFile(suffix=".orbitals.h5") as tmp_hdf5: + with tempfile.NamedTemporaryFile(suffix=".orbitals.h5", delete=False) as tmp_hdf5: hdf5_filename = tmp_hdf5.name + try: # Save and reload orb.to_hdf5_file(hdf5_filename) orb_hdf5 = Orbitals.from_hdf5_file(hdf5_filename) @@ -593,6 +625,8 @@ def test_orbitals_file_io_round_trip(): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(hdf5_filename).unlink(missing_ok=True) def test_active_space_management(): @@ -664,8 +698,10 @@ def test_active_space_serialization(): orb = Orbitals(coeffs, None, None, basis_set, [active_indices, []]) # Test JSON serialization - with tempfile.NamedTemporaryFile(suffix=".orbitals.json") as tmp_json: + with tempfile.NamedTemporaryFile(suffix=".orbitals.json", delete=False) as tmp_json: json_filename = tmp_json.name + + try: orb.to_json_file(json_filename) # Load into a new object @@ -678,26 +714,31 @@ def test_active_space_serialization(): json_alpha, json_beta = orb_json.get_active_space_indices() assert np.array_equal(json_alpha, active_indices) assert np.array_equal(json_beta, active_indices) + finally: + Path(json_filename).unlink(missing_ok=True) # Test HDF5 serialization + with tempfile.NamedTemporaryFile(suffix=".orbitals.h5", delete=False) as tmp_hdf5: + hdf5_filename = tmp_hdf5.name + try: - with tempfile.NamedTemporaryFile(suffix=".orbitals.h5") as tmp_hdf5: - hdf5_filename = tmp_hdf5.name - orb.to_hdf5_file(hdf5_filename) + orb.to_hdf5_file(hdf5_filename) - # Load into a new object - orb_hdf5 = Orbitals.from_hdf5_file(hdf5_filename) + # Load into a new object + orb_hdf5 = Orbitals.from_hdf5_file(hdf5_filename) - # Check that active space was preserved - assert orb_hdf5.has_active_space() + # Check that active space was preserved + assert orb_hdf5.has_active_space() - # Verify active space indices - hdf5_alpha, hdf5_beta = orb_hdf5.get_active_space_indices() - assert np.array_equal(hdf5_alpha, active_indices) - assert np.array_equal(hdf5_beta, active_indices) + # Verify active space indices + hdf5_alpha, hdf5_beta = orb_hdf5.get_active_space_indices() + assert np.array_equal(hdf5_alpha, active_indices) + assert np.array_equal(hdf5_beta, active_indices) except RuntimeError as e: pytest.skip(f"HDF5 test skipped - {e!s}") + finally: + Path(hdf5_filename).unlink(missing_ok=True) def test_active_space_copy_assign(): diff --git a/python/tests/test_qubit_hamiltonian.py b/python/tests/test_qubit_hamiltonian.py index 0f462d8c7..25e12ec6e 100644 --- a/python/tests/test_qubit_hamiltonian.py +++ b/python/tests/test_qubit_hamiltonian.py @@ -453,7 +453,7 @@ def test_json_to_json_file_no_complex_error(self, tmp_path): qubit_hamiltonian.to_json_file(str(filename)) # Verify the file can be read - with open(filename) as f: + with open(filename, encoding="utf-8") as f: data = json.load(f) assert "pauli_strings" in data diff --git a/python/tests/test_readme_snippets.py b/python/tests/test_readme_snippets.py index e0df4e04e..4bda1f10e 100644 --- a/python/tests/test_readme_snippets.py +++ b/python/tests/test_readme_snippets.py @@ -12,6 +12,7 @@ # -------------------------------------------------------------------------------------------- import importlib.util +import os import re as _re import subprocess import sys @@ -79,25 +80,34 @@ def run_snippet(code: str, snippet_index: int, readme_path: Path, log_dir: Path """ with tempfile.TemporaryDirectory() as td: fn = Path(td) / f"snippet_{snippet_index}.py" + out_path: Path | None = None + err_path: Path | None = None fn.write_text(code, encoding="utf-8") if log_dir: log_dir.mkdir(parents=True, exist_ok=True) out_path = log_dir / f"snippet_{snippet_index}.stdout.txt" err_path = log_dir / f"snippet_{snippet_index}.stderr.txt" meta_path = log_dir / f"snippet_{snippet_index}.meta.txt" - meta_path.write_text(f"readme: {readme_path}\nsnippet_index: {snippet_index}\n") + meta_path.write_text(f"readme: {readme_path}\nsnippet_index: {snippet_index}\n", encoding="utf-8") try: proc = subprocess.run( - [sys.executable, "-B", str(fn)], cwd=td, capture_output=True, text=True, check=True, timeout=60 + [sys.executable, "-B", str(fn)], + cwd=td, + capture_output=True, + text=True, + encoding="utf-8", + check=True, + timeout=60, + env={**os.environ, "PYTHONIOENCODING": "utf-8"}, ) except (subprocess.CalledProcessError, OSError) as exc: - if log_dir: - out_path.write_text(getattr(exc, "stdout", "") or "") - err_path.write_text(getattr(exc, "stderr", "") or "") + if out_path is not None and err_path is not None: + out_path.write_text(getattr(exc, "stdout", "") or "", encoding="utf-8") + err_path.write_text(getattr(exc, "stderr", "") or "", encoding="utf-8") return False - if log_dir: - out_path.write_text(proc.stdout or "") - err_path.write_text(proc.stderr or "") + if out_path is not None and err_path is not None: + out_path.write_text(proc.stdout or "", encoding="utf-8") + err_path.write_text(proc.stderr or "", encoding="utf-8") return True diff --git a/python/tests/test_sample_workflow.py b/python/tests/test_sample_workflow.py index 790535231..5e119d724 100644 --- a/python/tests/test_sample_workflow.py +++ b/python/tests/test_sample_workflow.py @@ -45,6 +45,13 @@ except ImportError: _HAS_JUPYTER_CLIENT = False +try: + import pyscf # noqa: F401 + + PYSCF_AVAILABLE = True +except ImportError: + PYSCF_AVAILABLE = False + # Environment variable to enable slow tests (including notebook e2e tests) _RUN_SLOW_TESTS = os.getenv("QDK_CHEMISTRY_RUN_SLOW_TESTS", "").lower() in {"1", "true", "yes"} @@ -226,6 +233,7 @@ def test_factory_list(): not _HAS_JUPYTER_KERNEL, reason="Jupyter kernel 'python3' not available. Install ipykernel and register the kernel.", ) +@pytest.mark.skipif(not PYSCF_AVAILABLE, reason="PySCF not available") def test_state_prep_energy(): """Test the examples/state_prep_energy.ipynb notebook executes without errors.""" notebook_path = EXAMPLES_DIR / "state_prep_energy.ipynb" @@ -243,6 +251,7 @@ def test_state_prep_energy(): not _HAS_JUPYTER_KERNEL, reason="Jupyter kernel 'python3' not available. Install ipykernel and register the kernel.", ) +@pytest.mark.skipif(not PYSCF_AVAILABLE, reason="PySCF not available") def test_qpe_stretched_n2(): """Test the examples/qpe_stretched_n2.ipynb notebook executes without errors.""" notebook_path = EXAMPLES_DIR / "qpe_stretched_n2.ipynb" diff --git a/python/tests/test_sample_workflow_utils.py b/python/tests/test_sample_workflow_utils.py index 1b43bfec0..03d18a59f 100644 --- a/python/tests/test_sample_workflow_utils.py +++ b/python/tests/test_sample_workflow_utils.py @@ -21,8 +21,9 @@ def _run_workflow(cmd, cwd: Path) -> subprocess.CompletedProcess[str]: cwd=cwd, capture_output=True, text=True, + encoding="utf-8", check=False, - env=os.environ.copy(), + env={**os.environ, "PYTHONIOENCODING": "utf-8"}, ) diff --git a/python/tests/test_settings.py b/python/tests/test_settings.py index cc2796d36..334fcf6bd 100644 --- a/python/tests/test_settings.py +++ b/python/tests/test_settings.py @@ -569,7 +569,7 @@ def test_json_file_operations(self): assert Path(json_file).exists() # Verify file content - with open(json_file) as f: + with open(json_file, encoding="utf-8") as f: data = json.load(f) assert data["method"] == "hf" assert data["max_iterations"] == 100 @@ -649,7 +649,7 @@ def test_generic_file_operations(self): assert Path(json_file).exists() # Verify content - with open(json_file) as f: + with open(json_file, encoding="utf-8") as f: data = json.load(f) assert data["method"] == "hf" assert data["max_iterations"] == 100 @@ -824,7 +824,7 @@ def test_consistency_between_generic_and_specific_methods(self): settings.to_json_file(json_file2) # Files should be identical - with open(json_file1) as f1, open(json_file2) as f2: + with open(json_file1, encoding="utf-8") as f1, open(json_file2, encoding="utf-8") as f2: assert f1.read() == f2.read() # Load with both methods and verify consistency diff --git a/python/tests/test_stability.py b/python/tests/test_stability.py index 6deff5fcd..c50e627d3 100644 --- a/python/tests/test_stability.py +++ b/python/tests/test_stability.py @@ -8,6 +8,7 @@ import pickle import tempfile import warnings +from pathlib import Path import numpy as np import pytest @@ -104,8 +105,14 @@ def test_stability_result_json_serialization(self): ) # Test file-based serialization - with tempfile.NamedTemporaryFile(suffix=".stability_result.json") as tmp: + # NOTE: Use delete=False so the file handle is closed when the with-block exits, allowing C++ code to open it. + # On Windows the default (delete=True) keeps an exclusive lock on the file. We delete the file manually in + # `finally` via Path.unlink(). This pattern is used throughout the test suite. + # NOTE: Python 3.12+ supports `delete=False, delete_on_close=True` which would avoid the manual unlink(). + with tempfile.NamedTemporaryFile(suffix=".stability_result.json", delete=False) as tmp: filename = tmp.name + + try: result_out.to_json_file(filename) result_file = StabilityResult.from_json_file(filename) @@ -125,56 +132,62 @@ def test_stability_result_json_serialization(self): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(filename).unlink(missing_ok=True) def test_stability_result_hdf5_serialization(self): """Test HDF5 serialization and deserialization.""" result_out = self.create_test_stability_result() + with tempfile.NamedTemporaryFile(suffix=".stability_result.h5", delete=False) as tmp: + filename = tmp.name + try: - with tempfile.NamedTemporaryFile(suffix=".stability_result.h5") as tmp: - filename = tmp.name - result_out.to_hdf5_file(filename) - - result_in = StabilityResult.from_hdf5_file(filename) - - # Verify data preservation - assert result_in.is_internal_stable() == result_out.is_internal_stable() - assert result_in.is_external_stable() == result_out.is_external_stable() - assert np.allclose( - result_out.get_internal_eigenvalues(), - result_in.get_internal_eigenvalues(), - rtol=float_comparison_relative_tolerance, - atol=float_comparison_absolute_tolerance, - ) - assert np.allclose( - result_out.get_external_eigenvalues(), - result_in.get_external_eigenvalues(), - rtol=float_comparison_relative_tolerance, - atol=float_comparison_absolute_tolerance, - ) - assert np.allclose( - result_out.get_internal_eigenvectors(), - result_in.get_internal_eigenvectors(), - rtol=float_comparison_relative_tolerance, - atol=float_comparison_absolute_tolerance, - ) - assert np.allclose( - result_out.get_external_eigenvectors(), - result_in.get_external_eigenvectors(), - rtol=float_comparison_relative_tolerance, - atol=float_comparison_absolute_tolerance, - ) + result_out.to_hdf5_file(filename) + + result_in = StabilityResult.from_hdf5_file(filename) + + # Verify data preservation + assert result_in.is_internal_stable() == result_out.is_internal_stable() + assert result_in.is_external_stable() == result_out.is_external_stable() + assert np.allclose( + result_out.get_internal_eigenvalues(), + result_in.get_internal_eigenvalues(), + rtol=float_comparison_relative_tolerance, + atol=float_comparison_absolute_tolerance, + ) + assert np.allclose( + result_out.get_external_eigenvalues(), + result_in.get_external_eigenvalues(), + rtol=float_comparison_relative_tolerance, + atol=float_comparison_absolute_tolerance, + ) + assert np.allclose( + result_out.get_internal_eigenvectors(), + result_in.get_internal_eigenvectors(), + rtol=float_comparison_relative_tolerance, + atol=float_comparison_absolute_tolerance, + ) + assert np.allclose( + result_out.get_external_eigenvectors(), + result_in.get_external_eigenvectors(), + rtol=float_comparison_relative_tolerance, + atol=float_comparison_absolute_tolerance, + ) except RuntimeError as e: pytest.skip(f"HDF5 test skipped - {e!s}") + finally: + Path(filename).unlink(missing_ok=True) def test_stability_result_file_io_generic(self): """Test generic file I/O methods for StabilityResult.""" result = self.create_test_stability_result() # Test JSON file I/O - with tempfile.NamedTemporaryFile(suffix=".stability_result.json") as tmp_json: + with tempfile.NamedTemporaryFile(suffix=".stability_result.json", delete=False) as tmp_json: json_filename = tmp_json.name + try: # Save using generic method result.to_file(json_filename, "json") @@ -198,11 +211,14 @@ def test_stability_result_file_io_generic(self): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(json_filename).unlink(missing_ok=True) # Test HDF5 file I/O - with tempfile.NamedTemporaryFile(suffix=".stability_result.h5") as tmp_hdf5: + with tempfile.NamedTemporaryFile(suffix=".stability_result.h5", delete=False) as tmp_hdf5: hdf5_filename = tmp_hdf5.name + try: # Save using generic method result.to_file(hdf5_filename, "hdf5") @@ -226,6 +242,8 @@ def test_stability_result_file_io_generic(self): rtol=float_comparison_relative_tolerance, atol=float_comparison_absolute_tolerance, ) + finally: + Path(hdf5_filename).unlink(missing_ok=True) # Test unsupported file type with pytest.raises(ValueError, match="Unsupported file type"): @@ -273,12 +291,16 @@ def test_stability_result_empty_data_io(self): assert from_json.external_size() == empty_result.external_size() # Test file I/O with empty data - with tempfile.NamedTemporaryFile(suffix=".stability_result.json") as tmp: + with tempfile.NamedTemporaryFile(suffix=".stability_result.json", delete=False) as tmp: filename = tmp.name + + try: empty_result.to_json_file(filename) empty_from_file = StabilityResult.from_json_file(filename) assert empty_from_file.is_stable() assert empty_from_file.empty() # Method call, not property + finally: + Path(filename).unlink(missing_ok=True) def test_stability_result_pickle_serialization_and_repr(self): """Test pickle serialization support and string representation for StabilityResult.""" diff --git a/vcpkg-configuration.json b/vcpkg-configuration.json new file mode 100644 index 000000000..f9dacd5c2 --- /dev/null +++ b/vcpkg-configuration.json @@ -0,0 +1,9 @@ +{ + "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg-tool/main/docs/vcpkg-configuration.schema.json", + "default-registry": { + "kind": "git", + "repository": "https://github.com/microsoft/vcpkg", + "baseline": "52f5569a8e867859fac967c1e3f10ca0a8a3cc39" + }, + "overlay-ports": ["./vcpkg-overlay/ports"] +} diff --git a/vcpkg-overlay/ports/openblas/cmake-project-include.cmake b/vcpkg-overlay/ports/openblas/cmake-project-include.cmake new file mode 100644 index 000000000..574073bb4 --- /dev/null +++ b/vcpkg-overlay/ports/openblas/cmake-project-include.cmake @@ -0,0 +1,66 @@ +# TARGET: The target architecture +# +# Originally, OpenBLAS tries to optimize for the host CPU unless +# - being given an explixit TARGET, and +# - CMAKE_CROSSCOMPILING, and +# - not building for uwp (aka WINDOWSSTORE) +# For this optimization, it runs 'getarch' and 'getarch_2nd' which it builds +# from source. The getarch executables are not built when not optimizing. +# +# Consequences: +# - The port must ensure that TARGET is set when cross compiling for a different CPU or OS. +# - The port must install getarch executables when possible. +# +# DYNAMIC_ARCH enables support "for multiple targets with runtime detection". +# (But not for MSVC, https://github.com/OpenMathLib/OpenBLAS/wiki/How-to-use-OpenBLAS-in-Microsoft-Visual-Studio#cmake-and-visual-studio.) +# The OpenBLAS README.md suggests that this shall be used with TARGET being +# set "to the oldest model you expect to encounter". This affects "all the +# common code in the library". + +set(need_target 0) +if(NOT "${TARGET}" STREQUAL "") + message(STATUS "TARGET: ${TARGET} (user-defined)") +elseif(DYNAMIC_ARCH) + message(STATUS "DYNAMIC_ARCH: ${DYNAMIC_ARCH}") + set(need_target 1) # for C +elseif(CMAKE_CROSSCOMPILING AND NOT GETARCH_BINARY_DIR) + set(need_target 1) # for C and for optimized kernel +else() + message(STATUS "TARGET: (OpenBLAS getarch/getarch_2nd)") +endif() + +if(need_target) + set(target_default "GENERIC") + if(MSVC) + # "does not support the dialect of assembly used in the cpu-specific optimized files" + # https://github.com/OpenMathLib/OpenBLAS/wiki/How-to-use-OpenBLAS-in-Microsoft-Visual-Studio#cmake-and-visual-studio + elseif(VCPKG_TARGET_ARCHITECTURE MATCHES "^x64|^x86") + set(target_default "ATOM") + elseif(VCPKG_TARGET_ARCHITECTURE MATCHES "^arm64") + set(target_default "ARMV8") + elseif(VCPKG_TARGET_ARCHITECTURE MATCHES "^arm") + set(target_default "ARMV7") + endif() + set(TARGET "${target_default}" CACHE STRING "") + message(STATUS "TARGET: ${TARGET}") +endif() + +# NUM_THREADS: The number of threads expected to be used. +# +# This setting affects both the configuration with USE_THREAD enabled +# (multithreaded OpenBLAS) and disabled (multithreaded access to OpenBLAS). +# This shouldn't be set too low for generic packages. But it comes with a +# memory footprint. + +if(DEFINED NUM_THREADS) + message(STATUS "NUM_THREADS: ${NUM_THREADS} (user-defined)") +elseif(EMSCRIPTEN) + message(STATUS "NUM_THREADS: (for EMSCRIPTEN)") +elseif(need_target) + set(num_threads_default 24) + if(ANDROID OR IOS) + set(num_threads_default 8) + endif() + set(NUM_THREADS "${num_threads_default}" CACHE STRING "") + message(STATUS "NUM_THREADS: ${NUM_THREADS}") +endif() diff --git a/vcpkg-overlay/ports/openblas/disable-testing.diff b/vcpkg-overlay/ports/openblas/disable-testing.diff new file mode 100644 index 000000000..5c01a519b --- /dev/null +++ b/vcpkg-overlay/ports/openblas/disable-testing.diff @@ -0,0 +1,20 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 2006604..c9fedb9 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -323,6 +323,7 @@ if (USE_THREAD) + endif() + endif() + ++if(BUILD_TESTING) + #if (MSVC OR NOT NOFORTRAN) + if (NOT NO_CBLAS) + if (NOT ONLY_CBLAS) +@@ -348,6 +349,7 @@ endif() + if (CPP_THREAD_SAFETY_TEST OR CPP_THREAD_SAFETY_GEMV) + add_subdirectory(cpp_thread_test) + endif() ++endif() + + if (NOT FIXED_LIBNAME) + set_target_properties(${OpenBLAS_LIBS} PROPERTIES diff --git a/vcpkg-overlay/ports/openblas/getarch.diff b/vcpkg-overlay/ports/openblas/getarch.diff new file mode 100644 index 000000000..d91e5e106 --- /dev/null +++ b/vcpkg-overlay/ports/openblas/getarch.diff @@ -0,0 +1,73 @@ +diff --git a/cmake/prebuild.cmake b/cmake/prebuild.cmake +index 609fbe2..daeb25c 100644 +--- a/cmake/prebuild.cmake ++++ b/cmake/prebuild.cmake +@@ -95,7 +95,7 @@ else () + endif () + + # Cannot run getarch on target if we are cross-compiling +-if (DEFINED CORE AND CMAKE_CROSSCOMPILING AND NOT (${HOST_OS} STREQUAL "WINDOWSSTORE")) ++if(CMAKE_CROSSCOMPILING AND NOT DEFINED GETARCH_BINARY_DIR) + # Write to config as getarch would + if (DEFINED TARGET_CORE) + set(TCORE ${TARGET_CORE}) +@@ -1373,7 +1373,11 @@ endif () + file(MAKE_DIRECTORY ${TARGET_CONF_DIR}) + file(RENAME ${TARGET_CONF_TEMP} "${TARGET_CONF_DIR}/${TARGET_CONF}") + +-else(NOT CMAKE_CROSSCOMPILING) ++else() ++ if(NOT CMAKE_CROSSCOMPILING) ++ set(GETARCH_BINARY_DIR "${PROJECT_BINARY_DIR}") ++ endif() ++ + # compile getarch + set(GETARCH_SRC + ${PROJECT_SOURCE_DIR}/getarch.c +@@ -1420,6 +1424,7 @@ else(NOT CMAKE_CROSSCOMPILING) + if (NOT ${GETARCH_RESULT}) + MESSAGE(FATAL_ERROR "Compiling getarch failed ${GETARCH_LOG}") + endif () ++ install(PROGRAMS "${PROJECT_BINARY_DIR}/${GETARCH_BIN}" DESTINATION bin) + endif () + unset (HAVE_AVX2) + unset (HAVE_AVX) +@@ -1439,8 +1444,8 @@ else(NOT CMAKE_CROSSCOMPILING) + message(STATUS "Running getarch") + + # use the cmake binary w/ the -E param to run a shell command in a cross-platform way +-execute_process(COMMAND "${PROJECT_BINARY_DIR}/${GETARCH_BIN}" 0 OUTPUT_VARIABLE GETARCH_MAKE_OUT) +-execute_process(COMMAND "${PROJECT_BINARY_DIR}/${GETARCH_BIN}" 1 OUTPUT_VARIABLE GETARCH_CONF_OUT) ++execute_process(COMMAND "${GETARCH_BINARY_DIR}/${GETARCH_BIN}" 0 OUTPUT_VARIABLE GETARCH_MAKE_OUT) ++execute_process(COMMAND "${GETARCH_BINARY_DIR}/${GETARCH_BIN}" 1 OUTPUT_VARIABLE GETARCH_CONF_OUT) + + message(STATUS "GETARCH results:\n${GETARCH_MAKE_OUT}") + +@@ -1463,11 +1468,12 @@ execute_process(COMMAND "${PROJECT_BINARY_DIR}/${GETARCH_BIN}" 1 OUTPUT_VARIABLE + if (NOT ${GETARCH2_RESULT}) + MESSAGE(FATAL_ERROR "Compiling getarch_2nd failed ${GETARCH2_LOG}") + endif () ++ install(PROGRAMS "${PROJECT_BINARY_DIR}/${GETARCH2_BIN}" DESTINATION bin) + endif () + + # use the cmake binary w/ the -E param to run a shell command in a cross-platform way +-execute_process(COMMAND "${PROJECT_BINARY_DIR}/${GETARCH2_BIN}" 0 OUTPUT_VARIABLE GETARCH2_MAKE_OUT) +-execute_process(COMMAND "${PROJECT_BINARY_DIR}/${GETARCH2_BIN}" 1 OUTPUT_VARIABLE GETARCH2_CONF_OUT) ++execute_process(COMMAND "${GETARCH_BINARY_DIR}/${GETARCH2_BIN}" 0 OUTPUT_VARIABLE GETARCH2_MAKE_OUT) ++execute_process(COMMAND "${GETARCH_BINARY_DIR}/${GETARCH2_BIN}" 1 OUTPUT_VARIABLE GETARCH2_CONF_OUT) + + # append config data from getarch_2nd to the TARGET file and read in CMake vars + file(APPEND "${TARGET_CONF_TEMP}" ${GETARCH2_CONF_OUT}) +diff --git a/cmake/system.cmake b/cmake/system.cmake +index eae7436..b2a6da7 100644 +--- a/cmake/system.cmake ++++ b/cmake/system.cmake +@@ -13,7 +13,7 @@ if(CMAKE_CROSSCOMPILING AND NOT DEFINED TARGET) + set(TARGET "ARMV8") + elseif(ARM) + set(TARGET "ARMV7") # TODO: Ask compiler which arch this is +- else() ++ elseif(NOT DEFINED GETARCH_BINARY_DIR) + message(FATAL_ERROR "When cross compiling, a TARGET is required.") + endif() + endif() diff --git a/vcpkg-overlay/ports/openblas/openblas_common.h b/vcpkg-overlay/ports/openblas/openblas_common.h new file mode 100644 index 000000000..a36a84c8a --- /dev/null +++ b/vcpkg-overlay/ports/openblas/openblas_common.h @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See LICENSE.txt in the project root for +// license information. + +#pragma once +#include "openblas/openblas_config.h" + +#if defined(OPENBLAS_OS_WINNT) || defined(OPENBLAS_OS_CYGWIN_NT) || \ + defined(OPENBLAS_OS_INTERIX) +#define OPENBLAS_WINDOWS_ABI +#define OPENBLAS_OS_WINDOWS + +#ifdef DOUBLE +#define DOUBLE_DEFINED DOUBLE +#undef DOUBLE +#endif +#endif + +#ifdef NEEDBUNDERSCORE +#define BLASFUNC(FUNC) FUNC##_ + +#else +#define BLASFUNC(FUNC) FUNC +#endif + +#ifdef OPENBLAS_QUAD_PRECISION +typedef struct { + unsigned long x[2]; +} xdouble; +#elif defined OPENBLAS_EXPRECISION +#define xdouble long double +#else +#define xdouble double +#endif + +#if defined(OS_WINNT) && defined(__64BIT__) +typedef long long BLASLONG; +typedef unsigned long long BLASULONG; +#else +typedef long BLASLONG; +typedef unsigned long BLASULONG; +#endif + +#ifdef OPENBLAS_USE64BITINT +typedef BLASLONG blasint; +#else +typedef int blasint; +#endif + +#if defined(XDOUBLE) || defined(DOUBLE) +#define FLOATRET FLOAT +#else +#ifdef NEED_F2CCONV +#define FLOATRET double +#else +#define FLOATRET float +#endif +#endif + +/* Inclusion of a standard header file is needed for definition of __STDC_* + predefined macros with some compilers (e.g. GCC 4.7 on Linux). This occurs + as a side effect of including either or . */ +#include diff --git a/vcpkg-overlay/ports/openblas/portfile.cmake b/vcpkg-overlay/ports/openblas/portfile.cmake new file mode 100644 index 000000000..19b49e4db --- /dev/null +++ b/vcpkg-overlay/ports/openblas/portfile.cmake @@ -0,0 +1,78 @@ +vcpkg_from_github( + OUT_SOURCE_PATH SOURCE_PATH + REPO OpenMathLib/OpenBLAS + REF "v${VERSION}" + SHA512 046316b4297460bffca09c890ecad17ea39d8b3db92ff445d03b547dd551663d37e40f38bce8ae11e2994374ff01e622b408da27aa8e40f4140185ee8f001a60 + HEAD_REF develop + PATCHES + disable-testing.diff + getarch.diff + system-check-msvc.diff + win32-uwp.diff +) + +vcpkg_check_features(OUT_FEATURE_OPTIONS OPTIONS + FEATURES + threads USE_THREAD + simplethread USE_SIMPLE_THREADED_LEVEL3 + dynamic-arch DYNAMIC_ARCH +) + +# If not explicitly configured for a cross build, OpenBLAS wants to run +# getarch executables in order to optimize for the target. +# Adapting this to vcpkg triplets: +# - install-getarch.diff introduces and uses GETARCH_BINARY_DIR, +# - architecture and system name are required to match for GETARCH_BINARY_DIR, but +# - uwp (aka WindowsStore) may run windows getarch. +string(REPLACE "WindowsStore_" "_" SYSTEM_KEY "${VCPKG_CMAKE_SYSTEM_NAME}_${VCPKG_TARGET_ARCHITECTURE}") +set(GETARCH_BINARY_DIR "${CURRENT_HOST_INSTALLED_DIR}/manual-tools/${PORT}/${SYSTEM_KEY}") +if(EXISTS "${GETARCH_BINARY_DIR}") + message(STATUS "OpenBLAS cross build, but may use ${PORT}:${HOST_TRIPLET} getarch") + list(APPEND OPTIONS "-DGETARCH_BINARY_DIR=${GETARCH_BINARY_DIR}") +elseif(VCPKG_CROSSCOMPILING) + message(STATUS "OpenBLAS cross build, may not be able to use getarch") +else() + message(STATUS "OpenBLAS native build") +endif() + +if(VCPKG_TARGET_IS_EMSCRIPTEN) + # Only the riscv64 kernel with riscv64_generic target is supported. + # Cf. https://github.com/OpenMathLib/OpenBLAS/issues/3640#issuecomment-1144029630 et al. + list(APPEND OPTIONS + -DEMSCRIPTEN_SYSTEM_PROCESSOR=riscv64 + -DTARGET=RISCV64_GENERIC + ) +endif() + +vcpkg_cmake_configure( + SOURCE_PATH "${SOURCE_PATH}" + OPTIONS + ${OPTIONS} + "-DCMAKE_PROJECT_INCLUDE=${CURRENT_PORT_DIR}/cmake-project-include.cmake" + -DBUILD_TESTING=OFF + # QDK overlay change: BUILD_WITHOUT_LAPACK=OFF to include LAPACK routines. + # C_LAPACK=ON uses OpenBLAS's embedded C-translated LAPACK (no Fortran needed). + -DBUILD_WITHOUT_LAPACK=OFF + -DNOFORTRAN=ON + -DC_LAPACK=ON + MAYBE_UNUSED_VARIABLES + GETARCH_BINARY_DIR +) + +vcpkg_cmake_install() +vcpkg_copy_pdbs() +vcpkg_cmake_config_fixup(CONFIG_PATH lib/cmake/OpenBLAS) +vcpkg_fixup_pkgconfig() + +# Required from native builds, optional from cross builds. +if(NOT VCPKG_CROSSCOMPILING OR EXISTS "${CURRENT_PACKAGES_DIR}/bin/getarch${VCPKG_TARGET_EXECUTABLE_SUFFIX}") + vcpkg_copy_tools( + TOOL_NAMES getarch getarch_2nd + DESTINATION "${CURRENT_PACKAGES_DIR}/manual-tools/${PORT}/${SYSTEM_KEY}" + AUTO_CLEAN + ) +endif() + +file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/include" "${CURRENT_PACKAGES_DIR}/debug/share") + +vcpkg_install_copyright(FILE_LIST "${SOURCE_PATH}/LICENSE") diff --git a/vcpkg-overlay/ports/openblas/system-check-msvc.diff b/vcpkg-overlay/ports/openblas/system-check-msvc.diff new file mode 100644 index 000000000..f1c395e1a --- /dev/null +++ b/vcpkg-overlay/ports/openblas/system-check-msvc.diff @@ -0,0 +1,21 @@ +diff --git a/cmake/system_check.cmake b/cmake/system_check.cmake +index e94497a..d884727 100644 +--- a/cmake/system_check.cmake ++++ b/cmake/system_check.cmake +@@ -36,6 +36,16 @@ if(CMAKE_CL_64 OR MINGW64) + else() + set(X86_64 1) + endif() ++elseif(MSVC) ++ if(CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64") ++ set(X86_64 1) ++ elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "ARM") ++ set(ARM 1) ++ elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "ARM64") ++ set(ARM64 1) ++ else() ++ set(X86 1) ++ endif() + elseif(MINGW OR (MSVC AND NOT CMAKE_CROSSCOMPILING)) + set(X86 1) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc.*|power.*|Power.*" OR (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND CMAKE_OSX_ARCHITECTURES MATCHES "ppc.*")) diff --git a/vcpkg-overlay/ports/openblas/vcpkg.json b/vcpkg-overlay/ports/openblas/vcpkg.json new file mode 100644 index 000000000..9a22b2e47 --- /dev/null +++ b/vcpkg-overlay/ports/openblas/vcpkg.json @@ -0,0 +1,50 @@ +{ + "name": "openblas", + "version": "0.3.29", + "description": "OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version.", + "homepage": "https://github.com/OpenMathLib/OpenBLAS", + "license": "BSD-3-Clause", + "dependencies": [ + { + "name": "openblas", + "host": true + }, + { + "name": "vcpkg-cmake", + "host": true + }, + { + "name": "vcpkg-cmake-config", + "host": true + } + ], + "features": { + "dynamic-arch": { + "description": "Support for multiple targets in a single library", + "supports": "!windows | mingw" + }, + "simplethread": { + "description": [ + "Use simple thread safety for level3 functions", + "Alternative to serialization of concurrent access to parallelized level3 functions." + ], + "dependencies": [ + { + "name": "openblas", + "features": [ + "threads" + ] + } + ] + }, + "threads": { + "description": "Enable multi-threading", + "dependencies": [ + { + "name": "pthreads", + "platform": "!windows" + } + ] + } + } +} diff --git a/vcpkg-overlay/ports/openblas/win32-uwp.diff b/vcpkg-overlay/ports/openblas/win32-uwp.diff new file mode 100644 index 000000000..21e0bb7d2 --- /dev/null +++ b/vcpkg-overlay/ports/openblas/win32-uwp.diff @@ -0,0 +1,67 @@ +diff --git a/cmake/os.cmake b/cmake/os.cmake +index 2effbe0..538ede2 100644 +--- a/cmake/os.cmake ++++ b/cmake/os.cmake +@@ -18,7 +18,7 @@ if (${CMAKE_SYSTEM_NAME} STREQUAL "AIX") + endif () + + # TODO: this is probably meant for mingw, not other windows compilers +-if (${CMAKE_SYSTEM_NAME} STREQUAL "Windows") ++if (WIN32) + + set(NEED_PIC 0) + set(NO_EXPRECISION 1) +@@ -69,7 +69,7 @@ if (CYGWIN) + set(NO_EXPRECISION 1) + endif () + +-if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows" AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Interix" AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Android") ++if (NOT WIN32 AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Interix" AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Android") + if (USE_THREAD) + set(EXTRALIB "${EXTRALIB} -lpthread") + endif () +diff --git a/cmake/system.cmake b/cmake/system.cmake +index 683c318..eae7436 100644 +--- a/cmake/system.cmake ++++ b/cmake/system.cmake +@@ -507,7 +507,7 @@ if (USE_SIMPLE_THREADED_LEVEL3) + set(CCOMMON_OPT "${CCOMMON_OPT} -DUSE_SIMPLE_THREADED_LEVEL3") + endif () + +-if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") ++if (NOT WIN32) + if (DEFINED MAX_STACK_ALLOC) + if (NOT ${MAX_STACK_ALLOC} EQUAL 0) + set(CCOMMON_OPT "${CCOMMON_OPT} -DMAX_STACK_ALLOC=${MAX_STACK_ALLOC}") +@@ -516,7 +516,7 @@ else () + set(CCOMMON_OPT "${CCOMMON_OPT} -DMAX_STACK_ALLOC=2048") + endif () + endif () +-if (NOT ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") ++if (NOT WIN32) + if (DEFINED BLAS3_MEM_ALLOC_THRESHOLD) + if (NOT ${BLAS3_MEM_ALLOC_THRESHOLD} EQUAL 32) + set(CCOMMON_OPT "${CCOMMON_OPT} -DBLAS3_MEM_ALLOC_THRESHOLD=${BLAS3_MEM_ALLOC_THRESHOLD}") +@@ -633,7 +633,7 @@ endif() + set(LAPACK_FPFLAGS "${LAPACK_FPFLAGS} ${FPFLAGS}") + + #Disable -fopenmp for LAPACK Fortran codes on Windows. +-if (${CMAKE_SYSTEM_NAME} STREQUAL "Windows") ++if (WIN32) + set(FILTER_FLAGS "-fopenmp;-mp;-openmp;-xopenmp=parallel") + foreach (FILTER_FLAG ${FILTER_FLAGS}) + string(REPLACE ${FILTER_FLAG} "" LAPACK_FFLAGS ${LAPACK_FFLAGS}) +@@ -665,11 +665,11 @@ if (INTERFACE64) + set(LAPACK_CFLAGS "${LAPACK_CFLAGS} -DLAPACK_ILP64") + endif () + +-if (${CMAKE_SYSTEM_NAME} STREQUAL "Windows") ++if (WIN32) + set(LAPACK_CFLAGS "${LAPACK_CFLAGS} -DOPENBLAS_OS_WINDOWS") + endif () + +-if (${CMAKE_C_COMPILER} STREQUAL "LSB" OR ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") ++if (${CMAKE_C_COMPILER} STREQUAL "LSB" OR WIN32) + set(LAPACK_CFLAGS "${LAPACK_CFLAGS} -DLAPACK_COMPLEX_STRUCTURE") + endif () + if (${CMAKE_C_COMPILER_ID} MATCHES "IntelLLVM" AND ${CMAKE_SYSTEM_NAME} STREQUAL "Windows") diff --git a/vcpkg.json b/vcpkg.json new file mode 100644 index 000000000..32a08ef16 --- /dev/null +++ b/vcpkg.json @@ -0,0 +1,23 @@ +{ + "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg-tool/main/docs/vcpkg.schema.json", + "name": "qdk-chemistry", + "version-string": "1.1.0", + "description": "Quantum Development Kit - Chemistry Library (Windows build dependencies)", + "dependencies": [ + "eigen3", + { + "name": "hdf5", + "features": ["cpp"] + }, + "boost-headers", + "boost-container-hash", + "boost-dynamic-bitset", + "boost-sort", + "catch2", + "gtest", + "nlohmann-json", + "spdlog", + "fmt", + "openblas" + ] +}