Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/python_wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ jobs:
echo "::error file=python_wheel.yml::Python tests failed with status $pytest_status."
exit 1
fi
python${{ inputs.python_version }} -m pip install --user fastapi uvicorn llvmlite
python${{ inputs.python_version }} -m pip install --user fastapi uvicorn llvmlite openfermionpyscf==0.5
for backendTest in /tmp/tests/backends/*.py; do
python${{ inputs.python_version }} -m pytest $backendTest
pytest_status=$?
Expand Down
6 changes: 2 additions & 4 deletions Overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,8 @@ diagnostics infrastructures for free.

Specifically, this library provides the `qudit`, `qubit`, `qreg`, and `qspan`
types, the intrinsic quantum operations, and the algorithmic primitives like
`cudaq::sample` and `cudaq::observe`.

This library defines the `cudaq::spin_op` for defining general Pauli tensor
product terms.
`cudaq::sample`, `cudaq::observe`, and `cudaq::evolve`, as well as their
asynchronous versions.

This library defines the `quantum_platform` architecture, enabling CUDA-Q to
target both simulated and physical quantum computing architectures.
Expand Down
2 changes: 1 addition & 1 deletion docs/sphinx/specification/cudaq/operations.rst
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ The default set of quantum intrinsic operations for the cudaq::qubit type is as

bool MEASURE_OP(qubit &q) noexcept;
std::vector<bool> MEASURE_OP(qvector &q) noexcept;
double measure(cudaq::spin_op & term) noexcept { ... }
double measure(const cudaq::spin_op & term) noexcept { ... }
}

**[1]** For the default implementation of the :code:`cudaq::qubit` intrinsic operations, we let
Expand Down
9 changes: 7 additions & 2 deletions lib/Optimizer/Transforms/ObserveAnsatz.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -225,11 +225,16 @@ struct AppendMeasurements : public OpRewritePattern<func::FuncOp> {
return funcOp.emitOpError("Errors encountered in pass analysis");
auto nQubits = iter->second.nQubits;

if (nQubits != termBSF.size() / 2)
if (nQubits < termBSF.size() / 2)
return funcOp.emitOpError("Invalid number of binary-symplectic elements "
"provided. Must provide 2 * NQubits = " +
"provided: " +
std::to_string(termBSF.size()) +
". Must provide at most 2 * NQubits = " +
std::to_string(2 * nQubits));

// Update nQubits so we only measure the requested qubits.
nQubits = termBSF.size() / 2;

// If the mapping pass was not run, we expect no pre-existing measurements.
if (!iter->second.mappingPassRan && !iter->second.measurements.empty())
return funcOp.emitOpError("Cannot observe kernel with measures in it.");
Expand Down
8 changes: 8 additions & 0 deletions python/cudaq/operator/definitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,14 @@ def __init__(self):
# This should never be called. We have `__new__` method instead.
raise ValueError("Not supported")

@staticmethod
def empty() -> OperatorSum:
return OperatorSum()

@staticmethod
def identity() -> OperatorSum:
return ProductOperator(ScalarOperator.const(1.))

# Convert from a Pauli word to an Operator
@staticmethod
def from_word(word: str) -> ProductOperator:
Expand Down
5 changes: 3 additions & 2 deletions python/cudaq/operator/expressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@ def __init__(self: OperatorSum,
evaluating the operator expression.
"""
self._terms = tuple(terms)
if len(self._terms) == 0:
self._terms = (ProductOperator((ScalarOperator.const(0),)),)
self._cache = {}
self._iter_idx = 0

Expand Down Expand Up @@ -168,6 +166,8 @@ def padded_term(term: ProductOperator) -> ProductOperator:
term *= ElementaryOperator.identity(degree)
return term

if len(self._terms) == 0:
return arithmetics.evaluate(ScalarOperator.const(0))
if pad_terms:
sum = padded_term(self._terms[0])._evaluate(arithmetics, pad_terms)
for term in self._terms[1:]:
Expand Down Expand Up @@ -345,6 +345,7 @@ def to_sparse_matrix(self: OperatorSum):
return self._to_spinop().to_sparse_matrix()

def __iter__(self: OperatorSum) -> OperatorSum:
self._iter_idx = 0
return self

def __next__(self: OperatorSum) -> ProductOperator:
Expand Down
10 changes: 5 additions & 5 deletions python/runtime/cudaq/spin/py_matrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
#include <pybind11/operators.h>
#include <pybind11/stl.h>

#include "cudaq/spin_op.h"
#include "py_spin_op.h"
#include "cudaq/utils/matrix.h"
#include "py_matrix.h"

#include <complex>

Expand Down Expand Up @@ -39,7 +39,7 @@ void bindComplexMatrix(py::module &mod) {
/// The following makes this fully compatible with NumPy
.def_buffer([](complex_matrix &op) -> py::buffer_info {
return py::buffer_info(
op.data(), sizeof(std::complex<double>),
op.data, sizeof(std::complex<double>),
py::format_descriptor<std::complex<double>>::format(), 2,
{op.rows(), op.cols()},
{sizeof(std::complex<double>) * op.cols(),
Expand All @@ -48,7 +48,7 @@ void bindComplexMatrix(py::module &mod) {
.def(py::init([](const py::buffer &b) {
py::buffer_info info = b.request();
complex_matrix m(info.shape[0], info.shape[1]);
extractMatrixData(info, m.data());
extractMatrixData(info, m.data);
return m;
}),
"Create a :class:`ComplexMatrix` from a buffer of data, such as a "
Expand Down Expand Up @@ -91,7 +91,7 @@ void bindComplexMatrix(py::module &mod) {
static_cast<ssize_t>(sizeof(std::complex<double>))};

// Return a numpy array without copying data
return py::array_t<std::complex<double>>(shape, strides, m.data());
return py::array_t<std::complex<double>>(shape, strides, m.data);
},
"Convert :class:`ComplexMatrix` to numpy.ndarray.");
}
Expand Down
26 changes: 26 additions & 0 deletions python/tests/backends/test_Quantinuum_LocalEmulation_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,15 @@
from typing import List


def requires_openfermion():
open_fermion_found = True
try:
import openfermion, openfermionpyscf
except:
open_fermion_found = False
return pytest.mark.skipif(not open_fermion_found,
reason=f"openfermion is not installed")

def assert_close(want, got, tolerance=1.0e-1) -> bool:
return abs(want - got) < tolerance

Expand Down Expand Up @@ -320,6 +329,23 @@ def test_toffoli():
cudaq.sample(test_toffoli)


@requires_openfermion()
def test_observe_chemistry():
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))]
molecule, data = cudaq.chemistry.create_molecular_hamiltonian(geometry, 'sto-3g', 1, 0)

qubit_count = data.n_orbitals * 2

@cudaq.kernel
def kernel(thetas: list[float]):
qubits = cudaq.qvector(qubit_count)

result = cudaq.observe(kernel, molecule, [.0,.0,.0,.0], shots_count = 1000)

expectation = result.expectation()
assert_close(expectation, 0.707)


# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
Expand Down
2 changes: 1 addition & 1 deletion python/tests/builder/test_kernel_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import pytest
import random
import numpy as np
import sys
import os
from typing import List

import cudaq
Expand Down
2 changes: 2 additions & 0 deletions runtime/common/Trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

namespace cudaq {

struct QuditInfo;

/// @brief A trace is a circuit representation of the executed computation, as
/// seen by the execution manager. (Here, a circuit is represented as a list
/// of instructions on qudits). Since the execution manager cannot "see" control
Expand Down
2 changes: 1 addition & 1 deletion runtime/cudaq/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ add_library(${LIBRARY_NAME}
qis/remote_state.cpp
qis/state.cpp
utils/cudaq_utils.cpp
utils/tensor.cpp
utils/matrix.cpp
distributed/mpi_plugin.cpp)

set_property(GLOBAL APPEND PROPERTY CUDAQ_RUNTIME_LIBS ${LIBRARY_NAME})
Expand Down
64 changes: 30 additions & 34 deletions runtime/cudaq/algorithms/evolve.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,50 +27,49 @@ using async_evolve_result = std::future<evolve_result>;

namespace __internal__ {
template <typename OpTy>
cudaq::operator_sum<cudaq::matrix_operator> convertOp(const OpTy &op) {
cudaq::sum_op<cudaq::matrix_handler> convertOp(const OpTy &op) {
if constexpr (std::is_convertible_v<
OpTy, cudaq::product_operator<cudaq::matrix_operator>>) {
cudaq::operator_sum<cudaq::matrix_operator> convertedOp(op);
OpTy, cudaq::product_op<cudaq::matrix_handler>>) {
cudaq::sum_op<cudaq::matrix_handler> convertedOp(op);
return convertedOp;
} else if constexpr (std::is_convertible_v<
OpTy, cudaq::operator_sum<cudaq::matrix_operator>>) {
OpTy, cudaq::sum_op<cudaq::matrix_handler>>) {
return op;
} else {
throw std::invalid_argument("Invalid operator type: cannot convert type " +
std::string(typeid(op).name()) +
" to cudaq::product_operator or "
"cudaq::operator_sum");
" to cudaq::product_op or "
"cudaq::sum_op");
}
}

template <typename OpTy>
std::vector<cudaq::operator_sum<cudaq::matrix_operator>>
std::vector<cudaq::sum_op<cudaq::matrix_handler>>
convertOps(const std::vector<OpTy> &ops) {
std::vector<cudaq::operator_sum<cudaq::matrix_operator>> converted;
std::vector<cudaq::sum_op<cudaq::matrix_handler>> converted;
for (const auto &op : ops)
converted.emplace_back(convertOp(op));
return converted;
}

template <typename OpTy>
std::vector<cudaq::operator_sum<cudaq::matrix_operator>>
std::vector<cudaq::sum_op<cudaq::matrix_handler>>
convertOps(const std::initializer_list<OpTy> &ops) {
std::vector<cudaq::operator_sum<cudaq::matrix_operator>> converted;
std::vector<cudaq::sum_op<cudaq::matrix_handler>> converted;
for (const auto &op : ops)
converted.emplace_back(convertOp(op));
return converted;
}
} // namespace __internal__

#if CUDAQ_USE_STD20
template <
operator_type HamTy,
operator_type CollapseOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
operator_type ObserveOpTy = cudaq::operator_sum<cudaq::matrix_operator>>
template <operator_type HamTy,
operator_type CollapseOpTy = cudaq::sum_op<cudaq::matrix_handler>,
operator_type ObserveOpTy = cudaq::sum_op<cudaq::matrix_handler>>
#else
template <typename HamTy,
typename CollapseOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
typename ObserveOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
typename CollapseOpTy = cudaq::sum_op<cudaq::matrix_handler>,
typename ObserveOpTy = cudaq::sum_op<cudaq::matrix_handler>,
typename = std::enable_if_t<cudaq::operator_type<HamTy> &&
cudaq::operator_type<CollapseOpTy> &&
cudaq::operator_type<ObserveOpTy>>>
Expand Down Expand Up @@ -127,14 +126,13 @@ evolve_result evolve(const HamTy &hamiltonian,
}

#if CUDAQ_USE_STD20
template <
operator_type HamTy,
operator_type CollapseOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
operator_type ObserveOpTy = cudaq::operator_sum<cudaq::matrix_operator>>
template <operator_type HamTy,
operator_type CollapseOpTy = cudaq::sum_op<cudaq::matrix_handler>,
operator_type ObserveOpTy = cudaq::sum_op<cudaq::matrix_handler>>
#else
template <typename HamTy,
typename CollapseOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
typename ObserveOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
typename CollapseOpTy = cudaq::sum_op<cudaq::matrix_handler>,
typename ObserveOpTy = cudaq::sum_op<cudaq::matrix_handler>,
typename = std::enable_if_t<cudaq::operator_type<HamTy> &&
cudaq::operator_type<CollapseOpTy> &&
cudaq::operator_type<ObserveOpTy>>>
Expand Down Expand Up @@ -163,14 +161,13 @@ evolve(const HamTy &hamiltonian, const cudaq::dimension_map &dimensions,
}

#if CUDAQ_USE_STD20
template <
operator_type HamTy,
operator_type CollapseOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
operator_type ObserveOpTy = cudaq::operator_sum<cudaq::matrix_operator>>
template <operator_type HamTy,
operator_type CollapseOpTy = cudaq::sum_op<cudaq::matrix_handler>,
operator_type ObserveOpTy = cudaq::sum_op<cudaq::matrix_handler>>
#else
template <typename HamTy,
typename CollapseOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
typename ObserveOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
typename CollapseOpTy = cudaq::sum_op<cudaq::matrix_handler>,
typename ObserveOpTy = cudaq::sum_op<cudaq::matrix_handler>,
typename = std::enable_if_t<cudaq::operator_type<HamTy> &&
cudaq::operator_type<CollapseOpTy> &&
cudaq::operator_type<ObserveOpTy>>>
Expand Down Expand Up @@ -199,14 +196,13 @@ evolve(const HamTy &hamiltonian, const cudaq::dimension_map &dimensions,
}

#if CUDAQ_USE_STD20
template <
operator_type HamTy,
operator_type CollapseOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
operator_type ObserveOpTy = cudaq::operator_sum<cudaq::matrix_operator>>
template <operator_type HamTy,
operator_type CollapseOpTy = cudaq::sum_op<cudaq::matrix_handler>,
operator_type ObserveOpTy = cudaq::sum_op<cudaq::matrix_handler>>
#else
template <typename HamTy,
typename CollapseOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
typename ObserveOpTy = cudaq::operator_sum<cudaq::matrix_operator>,
typename CollapseOpTy = cudaq::sum_op<cudaq::matrix_handler>,
typename ObserveOpTy = cudaq::sum_op<cudaq::matrix_handler>,
typename = std::enable_if_t<cudaq::operator_type<HamTy> &&
cudaq::operator_type<CollapseOpTy> &&
cudaq::operator_type<ObserveOpTy>>>
Expand Down
7 changes: 3 additions & 4 deletions runtime/cudaq/algorithms/evolve_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,12 +156,11 @@ evolve_async(std::function<evolve_result()> evolveFunctor,
}

evolve_result evolveSingle(
const operator_sum<cudaq::matrix_operator> &hamiltonian,
const sum_op<cudaq::matrix_handler> &hamiltonian,
const cudaq::dimension_map &dimensions, const schedule &schedule,
const state &initial_state, base_integrator &integrator,
const std::vector<operator_sum<cudaq::matrix_operator>>
&collapse_operators = {},
const std::vector<operator_sum<cudaq::matrix_operator>> &observables = {},
const std::vector<sum_op<cudaq::matrix_handler>> &collapse_operators = {},
const std::vector<sum_op<cudaq::matrix_handler>> &observables = {},
bool store_intermediate_results = false,
std::optional<int> shots_count = std::nullopt);

Expand Down
12 changes: 5 additions & 7 deletions runtime/cudaq/base_integrator.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,21 +18,19 @@ namespace cudaq {
// Struct captures the system dynamics needed by the integrator
struct SystemDynamics {
std::vector<int64_t> modeExtents;
operator_sum<cudaq::matrix_operator> hamiltonian;
std::vector<operator_sum<cudaq::matrix_operator>> collapseOps;
sum_op<cudaq::matrix_handler> hamiltonian;
std::vector<sum_op<cudaq::matrix_handler>> collapseOps;
std::unordered_map<std::string, std::complex<double>> parameters;

SystemDynamics(
const std::vector<int64_t> extents,
const operator_sum<cudaq::matrix_operator> &ham,
const std::vector<operator_sum<cudaq::matrix_operator>> &cOps = {},
const sum_op<cudaq::matrix_handler> &ham,
const std::vector<sum_op<cudaq::matrix_handler>> &cOps = {},
const std::unordered_map<std::string, std::complex<double>> &params = {})
: modeExtents(extents), hamiltonian(ham), collapseOps(cOps),
parameters(params) {}

SystemDynamics()
: hamiltonian(operator_sum<cudaq::matrix_operator>(
cudaq::matrix_operator::empty())){};
SystemDynamics() : hamiltonian(cudaq::matrix_op::empty()){};
};

class base_time_stepper;
Expand Down
Loading
Loading