Skip to content
Closed
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
0865b7f
initial simplification of wildcard budget code in gst.py
rileyjmurray Sep 29, 2025
58eff07
remove unused code
rileyjmurray Sep 29, 2025
4a8b7fb
Clean up style in reportables.py. Move some leakage metric definition…
rileyjmurray Sep 30, 2025
f6f31a0
fix mathematical bug in definition of subspace-restricted Frobenius norm
rileyjmurray Oct 6, 2025
d6faeb6
apply bugfix to last commit (needed to include POVM effects, whereas …
rileyjmurray Oct 6, 2025
36e1e36
make note of possible changes to eigenvalue_entanglement_infidelity
rileyjmurray Oct 6, 2025
57fea37
make add_lago_models robust to estimates that are missing `stdgaugeop…
rileyjmurray Oct 6, 2025
1d5df52
seems to fix diamond-distance wildcard with leakage models. Still nee…
rileyjmurray Oct 7, 2025
b74c4a0
define proper constants (either module-level or class-level) that hol…
rileyjmurray Oct 7, 2025
e0a8fbe
add a second stage to leakage-aware gauge optimization
rileyjmurray Oct 8, 2025
d6953d7
make note of idea without making half-baked changes to code
rileyjmurray Oct 8, 2025
2de1162
changes to leakage.py
rileyjmurray Oct 8, 2025
e4098f7
implement fully-general gate_leakage_profile and gate_seepage_profile…
rileyjmurray Oct 10, 2025
9b90bae
test for matrix_tools.py:is_projector. Use np.linalg.matrix_rank in l…
rileyjmurray Oct 10, 2025
06c3c36
correct errors in recent changes to fidelity-based gauge optimization
rileyjmurray Oct 10, 2025
a768328
heavy refactor of gaugeopt_to_target as entry point for gauge optimiz…
rileyjmurray Oct 15, 2025
276c254
tweaks in last commit`s refactor of gaugeopt_to_target
rileyjmurray Oct 15, 2025
fe9c4ec
extremely messy implementation of leakage metrics and gauge optimizat…
rileyjmurray Oct 17, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 65 additions & 22 deletions pygsti/algorithms/gaugeopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,7 @@ def _create_objective_fn(model, target_model, item_weights: Optional[dict[str,fl
if item_weights is None: item_weights = {}
opWeight = item_weights.get('gates', 1.0)
spamWeight = item_weights.get('spam', 1.0)
prepWeight = item_weights.get('prep', spamWeight)
mxBasis = model.basis

#Use the target model's basis if model's is unknown
Expand Down Expand Up @@ -608,21 +609,51 @@ def _mock_objective_fn(v):

dim = int(_np.sqrt(mxBasis.dim))
if n_leak > 0:
B = _tools.leading_dxd_submatrix_basis_vectors(dim - n_leak, dim, mxBasis)
P = B @ B.T.conj()
if _np.linalg.norm(P.imag) > 1e-12:
msg = f"Attempting to run leakage-aware gauge optimization with basis {mxBasis}\n"
msg += "is resulting an orthogonal projector onto the computational subspace that\n"
msg += "is not real-valued. Try again with a different basis, like 'l2p1' or 'gm'."
raise ValueError(msg)
else:
P = P.real
P = _tools.superop_subspace_projector(dim - n_leak, dim, mxBasis)
transform_mx_arg = (P, _tools.matrixtools.IdentityOperator())
# ^ The semantics of this tuple are defined by the frobeniusdist function
# in the ExplicitOpModelCalc class.
else:
transform_mx_arg = None
# ^ It would be equivalent to set this to a pair of IdentityOperator objects.
from pygsti.report.reportables import eigenvalue_entanglement_infidelity, vec_fidelity


"""
We say that a (model, target) pair admit a _perfect gauge_ if "model" has no relational errors
when compared to "target." If "model" is considered in the perfect gauge, then

(1) its fidelities with the target gates match the eigenvalue fidelities with the target gates;

(2) the fidelities between rho_e and {E_t : E_t in M_target} will match those between
rho_e and {E_e : E_e in M_estimate}; and

(3) the fidelities between rho_t and {E_e : E_e in M_estimate} will match those between
rho_t and {E_t : E_t in M_target}.

In view of this, taking fidelity as the gauge-optimization objective tries to minimize an
aggregration of any mismatch between the various fidelities above.
"""

gate_fidelity_targets : dict[ Union[str, _baseobjs.Label], Union[float, _np.floating] ] = dict()
if gates_metric == 'fidelity':
for lbl in target_model.operations:
G_target = target_model.operations[lbl]
G_curest = model.operations[lbl]
t = 1 - eigenvalue_entanglement_infidelity(G_curest.to_dense(), G_target.to_dense(), model.basis)
gate_fidelity_targets[lbl] = min(t, 1.0)

spam_fidelity_targets : dict[
tuple[Union[str, _baseobjs.Label], Union[str, _baseobjs.Label]],
dict[Union[str, _baseobjs.Label], Union[float, _np.floating]]
] = dict()
if spam_metric == 'fidelity':
for preplbl in target_model.preps:
for povmlbl in target_model.povms:
rho_curest = model.preps[preplbl].to_dense()
M_curest = model.povms[povmlbl]
t = {elbl: vec_fidelity(rho_curest, e.to_dense(), mxBasis).item() for (elbl, e) in M_curest.items() }
spam_fidelity_targets[(preplbl, povmlbl)] = t

def _objective_fn(gauge_group_el, oob_check):
mdl = _transform_with_oob_check(model, gauge_group_el, oob_check)
Expand Down Expand Up @@ -656,12 +687,15 @@ def _objective_fn(gauge_group_el, oob_check):
ret += val

elif gates_metric == "fidelity":
# If n_leak==0, then subspace_entanglement_fidelity is just entanglement_fidelity
# Leakage-aware metrics NOT available
for opLbl in mdl.operations:
wt = item_weights.get(opLbl, opWeight)
top = target_model.operations[opLbl].to_dense()
mop = mdl.operations[opLbl].to_dense()
ret += wt * (1.0 - _tools.subspace_entanglement_fidelity(top, mop, mxBasis, n_leak))**2
t = gate_fidelity_targets[opLbl]
v = _tools.entanglement_fidelity(top, mop, mxBasis)
z = _np.abs(t - v)
ret += wt * z

elif gates_metric == "tracedist":
# If n_leak==0, then subspace_jtracedist is just jtracedist.
Expand Down Expand Up @@ -691,17 +725,26 @@ def _objective_fn(gauge_group_el, oob_check):

elif spam_metric == "fidelity":
# Leakage-aware metrics NOT available
for preplabel, m_prep in mdl.preps.items():
wt = item_weights.get(preplabel, spamWeight)
rhoMx1 = _tools.vec_to_stdmx(m_prep.to_dense(), mxBasis)
t_prep = target_model.preps[preplabel]
rhoMx2 = _tools.vec_to_stdmx(t_prep.to_dense(), mxBasis)
ret += wt * (1.0 - _tools.fidelity(rhoMx1, rhoMx2))**2

for povmlabel in mdl.povms.keys():
wt = item_weights.get(povmlabel, spamWeight)
fidelity = _tools.povm_fidelity(mdl, target_model, povmlabel)
ret += wt * (1.0 - fidelity)**2
val = 0.0
for preplbl in target_model.preps:
wt_prep = item_weights.get(preplbl, prepWeight)
for povmlbl in target_model.povms:
wt_povm = item_weights.get(povmlbl, spamWeight)
rho_curest = model.preps[preplbl].to_dense()
rho_target = target_model.preps[preplbl].to_dense()
M_curest = model.povms[povmlbl]
M_target = target_model.povms[povmlbl]
vs_prep = {elbl: vec_fidelity(rho_curest, e.to_dense(), mxBasis) for (elbl, e) in M_target.items() }
vs_povm = {elbl: vec_fidelity(rho_target, e.to_dense(), mxBasis) for (elbl, e) in M_curest.items() }
t_dict = spam_fidelity_targets[(preplbl, povmlbl)]
val1 = 0.0
for lbl, f in vs_prep.items():
val1 += _np.abs(t_dict[lbl] - f)
val2 = 0.0
for lbl, f in vs_povm.items():
val2 += _np.abs(t_dict[lbl] - f)
val += (wt_prep * val1 + wt_povm * val2)
ret += val

elif spam_metric == "tracedist":
# Leakage-aware metrics NOT available.
Expand Down
4 changes: 3 additions & 1 deletion pygsti/baseobjs/basisconstructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import itertools as _itertools
import re as _re
import numbers as _numbers
import numpy as _np
import scipy.sparse as _sps
Expand Down Expand Up @@ -697,6 +698,7 @@ def gm_labels(matrix_dim):
lblList.extend(["Z_{%d}" % (k) for k in range(1, d)])
return lblList


def lf_labels(matrix_dim: int) -> tuple[str,...]:
if matrix_dim != 3:
raise NotImplementedError()
Expand All @@ -718,6 +720,7 @@ def lf_labels(matrix_dim: int) -> tuple[str,...]:
)
return lbls


def lf_matrices(matrix_dim: int) -> list[_np.ndarray]:
"""
This basis is used to isolate the parts of Hilbert-Schmidt space that act on
Expand All @@ -743,7 +746,6 @@ def lf_matrices(matrix_dim: int) -> list[_np.ndarray]:
return leakage_basis_mxs



def qsim_matrices(matrix_dim):
"""
Get the elements of the QuantumSim basis with matrix dimension `matrix_dim`.
Expand Down
7 changes: 5 additions & 2 deletions pygsti/modelmembers/operations/linearop.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from pygsti.baseobjs.opcalc import bulk_eval_compact_polynomials_complex as _bulk_eval_compact_polynomials_complex
from pygsti.modelmembers import modelmember as _modelmember
from pygsti.tools import optools as _ot
from pygsti.tools import optools as _ot, matrixtools as _mt
from pygsti import SpaceT

from typing import Any
Expand Down Expand Up @@ -416,11 +416,14 @@ def frobeniusdist_squared(self, other_op, transform=None, inv_transform=None) ->
float
"""
self_mx = self.to_dense("minimal")
other_mx = other_op.to_dense("minimal")
if transform is not None:
self_mx = self_mx @ transform
if isinstance(inv_transform, _mt.IdentityOperator):
other_mx = other_mx @ transform
if inv_transform is not None:
self_mx = inv_transform @ self_mx
return _ot.frobeniusdist_squared(self_mx, other_op.to_dense("minimal"))
return _ot.frobeniusdist_squared(self_mx, other_mx)


def frobeniusdist(self, other_op, transform=None, inv_transform=None):
Expand Down
6 changes: 5 additions & 1 deletion pygsti/modelmembers/povms/effect.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from pygsti.modelmembers import modelmember as _modelmember
from pygsti.tools import optools as _ot
from pygsti.tools import matrixtools as _mt
from pygsti.baseobjs.opcalc import bulk_eval_compact_polynomials_complex as _bulk_eval_compact_polynomials_complex

from typing import Any
Expand Down Expand Up @@ -142,9 +143,12 @@ def frobeniusdist_squared(self, other_spam_vec, transform=None, inv_transform=No
float
"""
vec = self.to_dense()
other_vec = other_spam_vec.to_dense()
if transform is not None:
vec = transform.T @ vec
return _ot.frobeniusdist_squared(vec, other_spam_vec.to_dense())
if isinstance(inv_transform, _mt.IdentityOperator):
other_vec = transform.T @ other_vec
return _ot.frobeniusdist_squared(vec, other_vec)

def residuals(self, other_spam_vec, transform=None, inv_transform=None):
"""
Expand Down
3 changes: 2 additions & 1 deletion pygsti/models/explicitcalc.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,14 +172,15 @@ def frobeniusdist(self, other_calc, transform_mx: Union[None, _np.ndarray, Trans
if item_weights is None: item_weights = {}
opWeight = item_weights.get('gates', 1.0)
spamWeight = item_weights.get('spam', 1.0)
prepWeight = item_weights.get('prep', spamWeight)

for opLabel, gate in self.operations.items():
wt = item_weights.get(opLabel, opWeight)
d += wt * gate.frobeniusdist_squared(other_calc.operations[opLabel], P, invP)
nSummands += wt * (gate.dim)**2

for lbl, rhoV in self.preps.items():
wt = item_weights.get(lbl, spamWeight)
wt = item_weights.get(lbl, prepWeight)
d += wt * rhoV.frobeniusdist_squared(other_calc.preps[lbl], P, invP)
nSummands += wt * rhoV.dim

Expand Down
Loading
Loading