From d3b5fe96839fcb8176aec218a9c2685fd16966f1 Mon Sep 17 00:00:00 2001 From: Ashe Miller Date: Thu, 29 Feb 2024 15:49:24 -0700 Subject: [PATCH 001/102] Added Propagation Code for EOC Error Generators --- pygsti/propErrorGens/ErrorPropagator.py | 228 ++++++++++++ pygsti/propErrorGens/propagatableerrorgen.py | 361 +++++++++++++++++++ pygsti/propErrorGens/pyGSTiStimTranslator.py | 65 ++++ 3 files changed, 654 insertions(+) create mode 100644 pygsti/propErrorGens/ErrorPropagator.py create mode 100644 pygsti/propErrorGens/propagatableerrorgen.py create mode 100644 pygsti/propErrorGens/pyGSTiStimTranslator.py diff --git a/pygsti/propErrorGens/ErrorPropagator.py b/pygsti/propErrorGens/ErrorPropagator.py new file mode 100644 index 000000000..0ea24cdca --- /dev/null +++ b/pygsti/propErrorGens/ErrorPropagator.py @@ -0,0 +1,228 @@ +import stim +from pygsti.propErrorGens.propagatableerrorgen import * +from pygsti.propErrorGens.pyGSTiStimTranslator import * +from numpy import abs +from numpy.linalg import multi_dot +from scipy.linalg import expm + + +''' +takes a pygsti circuit where each gate has a defined error model and returns the errorgenerators necessary to create an +end of circuit error generator under a variety of scenarios + +circ: pygsti circuit +errorModel: Dictionary defined the small markovian error generators and their rates for each gate +BCHOrder: in cases where the BCH approximation is used, carries it out to the desired order (can currently only handle order 1 or 2) +BCHLayerWise: If true will push the errors through one layer of gatesand then combines them using the bch approximation at each layer +If false will simply push all errors to the end +NonMarkovian: Pushes the error generators to the end and then formats them to work with the cumulant expansion code +MultiGateDict: Containts the translation between a numbered gate Gxpi22 and the PyGSTi standard gate used when a singular gate has +multiple error iterations +MultiGate: lets the code know +returns: list of propagatableerrorgens +''' +def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=False,NonMarkovian=False,MultiGate=False): + qubits=len(circ.line_labels) + stim_layers=[] + for j in range(circ.depth): + layer = circ.layer(j) + stim_layer=pyGSTiLayer_to_stimLayer(layer,qubits,MultiGateDict,MultiGate) + stim_layers.append(stim_layer) + stim_layers.pop(0) #Immeditielty toss the first layer because it is not important, + + propagation_layers=[] + if not BCHLayerwise or NonMarkovian: + while len(stim_layers) != 0: + top_layer=stim_layers.pop(0) + for layer in stim_layers: + top_layer = layer*top_layer + propagation_layers.append(top_layer) + else: + propagation_layers = stim_layers + + errorLayers=buildErrorlayers(circ,errorModel,qubits) + + num_error_layers=len(errorLayers) + fully_propagated_layers=[] + for _ in range(0,num_error_layers-1): + err_layer=errorLayers.pop(0) + layer=propagation_layers.pop(0) + for err_order in err_layer: + for errorGenerator in err_order: + errorGenerator.propagate_error_gen_inplace_tableau(layer) + if BCHLayerwise and not NonMarkovian: + following_layer = errorLayers.pop(0) + new_errors=BCH_Handler(err_layer,following_layer,BCHOrder) + errorLayers.insert(new_errors,0) + else: + fully_propagated_layers.append(err_layer) + + fully_propagated_layers.append(errorLayers.pop(0)) + if BCHLayerwise and not NonMarkovian: + for order in errorLayers: + for error in order: + if len(fully_propagated_layers)==0: + fully_propagated_layers.append(error) + elif error in fully_propagated_layers: + idy=fully_propagated_layers.index(error) + new_error=error+fully_propagated_layers[idy] + fully_propagated_layers.pop(idy) + fully_propagated_layers.append(new_error) + else: + fully_propagated_layers.append(error) + return fully_propagated_layers + + elif not BCHLayerwise and not NonMarkovian: + simplified_EOC_errors=[] + if BCHOrder == 1: + for layer in fully_propagated_layers: + for order in layer: + for error in order: + if len(simplified_EOC_errors)==0: + simplified_EOC_errors.append(error) + elif error in simplified_EOC_errors: + idy=simplified_EOC_errors.index(error) + new_error=error+simplified_EOC_errors[idy] + simplified_EOC_errors.pop(idy) + if not (abs(new_error.get_Error_Rate()) <.000001): + simplified_EOC_errors.append(new_error) + else: + if not (abs(error.get_Error_Rate())<.000001): + simplified_EOC_errors.append(error) + else: + Exception("Higher propagated through Errors are not Implemented Yet") + return simplified_EOC_errors + + else: + return fully_propagated_layers + + +''' +takes two error layers (list of propagatableerrorgens) and find the bch combination of the two +err_layer: list lists of propagatableerrorgens +following_layer: list of propagatableerrorgens +BCHOrder: Order to carry the bch expansion out to, can currently be set to one or two +returns list of lists of propagatableerrorgens. The outer list contains each of them individual list denote order +''' +def BCH_Handler(err_layer,following_layer,BCHOrder): + new_errors=[] + for curr_order in range(0,BCHOrder): + working_order=[] + if curr_order == 0: + used_indexes=[] + for error in err_layer[curr_order]: + try: + idy=following_layer[curr_order].index(error) + working_order.append(error+following_layer[curr_order][idy]) + used_indexes.append(idy) + except: + working_order.append(error) + for idy,error in enumerate(following_layer[curr_order]): + if idy in used_indexes: + continue + else: + working_order.append(error) + + new_errors.append(working_order) + elif curr_order ==1: + working_order=[] + for error1 in err_layer[curr_order-1]: + for error2 in following_layer[curr_order-1]: + errorlist = commute_errors(error1,error2,BCHweight=1/2) + for error3 in errorlist: + if len(working_order)==0: + working_order.append(error3) + elif error3 in working_order: + idy=working_order.index(error3) + new_error=error3+working_order[idy] + working_order.pop(idy) + working_order.append(new_error) + else: + working_order.append(error3) + if len(err_layer)==2: + for error3 in err_layer[1]: + if len(working_order)==0: + working_order.append(error3) + elif error3 in working_order: + idy=working_order.index(error3) + new_error=error3+working_order[idy] + working_order.pop(idy) + working_order.append(new_error) + else: + working_order.append(error3) + if len(following_layer)==2: + for error3 in following_layer[1]: + if len(working_order)==0: + working_order.append(error3) + elif error3 in working_order: + idy=working_order.index(error3) + new_error=error3+working_order[idy] + working_order.pop(idy) + if new_error.get_Error_Rate() != 0j: + working_order.append(new_error) + else: + working_order.append(error3) + new_errors.append(working_order) + + else: + Exception("Higher Orders are not Implemented Yet") + + +''' +takes a pygst circuit object and error Dictionary and creates error layers + +inputs +circ: pygsti circuit +errorDict: Dictionary defined the small markovian error generators and their rates for each gate +qubits: number of qubits in the circuit + +output +ErrorGens, a list of error gen layers (which are list of propagatable errorgens) + +''' +def buildErrorlayers(circ,errorDict,qubits): + ErrorGens=[] + #For the jth layer of each circuit + for j in range(circ.depth): + l = circ.layer(j) # get the layer + errorLayer=[] + for _, g in enumerate(l): # for gate in layer l + gErrorDict = errorDict[g.name] #get the errors for the gate + p1=qubits*'I' # make some paulis why? + p2=qubits*'I' + for errs in gErrorDict: #for an error in the accompanying error dictionary + errType=errs[0] + paulis=[] + for ind,el in enumerate(g): #enumerate the gate ind =0 is name ind = 1 is first qubit ind = 2 is second qubit + if ind !=0: #if the gate element of concern is not the name + p1=p1[:el] + errs[1][ind-1] +p1[(el+1):] + + paulis.append(p1) + if errType in "CA": + for ind,el in enumerate(g): + if ind !=0: + p2=p2[:el] + errs[2][ind-1] +p2[(el+1):] + paulis.append(p2) + errorLayer.append(propagatableerrorgen(errType,paulis,gErrorDict[errs])) + ErrorGens.append([errorLayer]) + return ErrorGens + + + +# There's a factor of a half missing in here. +def nm_propagators(corr, Elist): + Kms = [] + for idm in range(len(Elist)): + Am = Elist[idm].toWeightedErrorBasisMatrix + # This assumes that Elist is in reverse chronological order + partials = [] + for idn in range(idm, len(Elist)): + An = Elist[idn].toWeightedErrorBasisMatrix() + partials += [corr[idm,idn] * Am @ An] + partials[0] = partials[0]/2 + Kms += [sum(partials,0)] + return Kms + +def averaged_evolution(corr, Elist): + Kms = nm_propagators(corr, Elist) + return multi_dot([expm(Km) for Km in Kms]) \ No newline at end of file diff --git a/pygsti/propErrorGens/propagatableerrorgen.py b/pygsti/propErrorGens/propagatableerrorgen.py new file mode 100644 index 000000000..1fa6febe2 --- /dev/null +++ b/pygsti/propErrorGens/propagatableerrorgen.py @@ -0,0 +1,361 @@ +from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel +from pygsti.propErrorGens.pyGSTiStimTranslator import * +import stim +from numpy import array,kron +from pygsti.tools import change_basis +from pygsti.tools.lindbladtools import create_elementary_errorgen +''' +Similar to errorgenlabel but has an errorrate included as well as additional classes +''' +class propagatableerrorgen(ElementaryErrorgenLabel): + ''' + Labels an elementary errorgen by a type, pauli and error rate + ''' + + @classmethod + def cast(cls, obj, sslbls=None, identity_label='I'): + raise NotImplementedError("TODO: Implement casts for this method") + + + ''' + Initiates the errorgen object + Inputs + errorgen_type: charecture can be set to 'H' Hamiltonian, 'S' Stochastic, 'C' Correlated or 'A' active following the conventions + of the taxonomy of small markovian errorgens paper + + Outputs: + propagatableerrorgen object + ''' + def __init__(self,errorgen_type,basis_element_labels,error_rate): + self.errorgen_type=str(errorgen_type) + self.basis_element_labels=tuple(basis_element_labels) + self.error_rate=error_rate + + ''' + hashes the error gen object + ''' + def __hash__(self): + return hash((self.errorgen_type,self.basis_element_labels)) + + ''' + checks and if two error gens have the same type and labels + ''' + def __eq__(self, other): + return (self.errorgen_type == other.errorgen_type + and self.basis_element_labels == other.basis_element_labels) + + ''' + displays the errorgens as strings + ''' + def __str__(self): + return self.errorgen_type + "(" + ",".join(map(str, self.basis_element_labels)) + ")" + ": " + self.error_rate + + + def __repr__(self): + return str((self.errorgen_type, self.basis_element_labels, self.error_rate)) + + ''' + adds the error rates together oftwo error generators of the same type and label + ''' + def __add__(self,other): + if self.errorgen_type == other.errorgen_type and self.basis_element_labels == other.basis_element_labels: + return propagatableerrorgen(self.errorgen_type,self.basis_element_labels,self.error_rate + other.error_rate) + else: + raise Exception("ErrorGens are not equal") + + ''' + returns the dictionary representation of the error generator inline with pygsti notation + ''' + def to_dict(self): + return {self: self.error_rate} + + + ''' + returns the error rate + ''' + def get_Error_Rate(self): + return self.error_rate + + ''' + returns the string representation of the first pauli label + ''' + def getP1(self): + return self.basis_element_labels[0] + + ''' + returns the string representation of the second pauli label + ''' + def getP2(self): + return self.basis_element_labels[1] + + ''' + propagates a propagatableerrorgen object through a clifford layer, returns the created error gen + ''' + def propagate_error_gen_inplace(self, player): + slayer = pyGSTiLayer_to_stimLayer(player) + new_basis_labels = [] + weightmod = 1 + for pauli in self.basis_element_labels: + temp=pyGSTiPauli_2_stimPauli(pauli) + temp = slayer(temp) + weightmod=weightmod*temp.sign + new_basis_labels.append(stimPauli_2_pyGSTiPauli(temp)) + + if self.errorgen_type in 'HCA': + self.error_rate=self.error_rate*weightmod + self.basis_element_labels =tuple(new_basis_labels) + + ''' + using stim propagates the associated pauli labels through a stim tableu object, the object is modified inplace + ''' + def propagate_error_gen_inplace_tableau(self, slayer): + new_basis_labels = [] + weightmod = 1 + for pauli in self.basis_element_labels: + temp=pyGSTiPauli_2_stimPauli(pauli) + temp = slayer(temp) + weightmod=weightmod*temp.sign + new_basis_labels.append(stimPauli_2_pyGSTiPauli(temp)) + + if self.errorgen_type in 'HCA': + self.error_rate=self.error_rate*weightmod + self.basis_element_labels =tuple(new_basis_labels) + + ''' + returns the strings representing the pauli labels in the pygsti representation of paulis as stim PauliStrings + ''' + def returnStimPaulis(self): + paulis_string=[] + for pauli in self.basis_element_labels: + paulis_string.append(stim.PauliString(pauli)) + return tuple(paulis_string) + + ''' + Returns the errorbasis matrix for the associated errorgenerator mulitplied by its error rate + + input: A pygsti defined matrix basis by default can be pauli-product, gellmann 'gm' or then pygsti standard basis 'std' + functions defaults to pauli product if not specified + ''' + def toWeightedErrorBasisMatrix(self,matrix_basis='pp'): + PauliDict={ + 'I' : array([[1.0,0.0],[0.0,1.0]]), + 'X' : array([[0.0j, 1.0+0.0j], [1.0+0.0j, 0.0j]]), + 'Y' : array([[0.0, -1.0j], [1.0j, 0.0]]), + 'Z' : array([[1.0, 0.0j], [0.0j, -1.0]]) + } + paulis=[] + for paulistring in self.basis_element_labels: + for idx,pauli in enumerate(paulistring): + if idx == 0: + pauliMat = PauliDict[pauli] + else: + pauliMat=kron(pauliMat,PauliDict[pauli]) + paulis.append(pauliMat) + if self.errorgen_type in 'HS': + return self.error_rate*change_basis(create_elementary_errorgen(self.errorgen_type,paulis[0]),'std',matrix_basis) + else: + return self.error_rate*change_basis(create_elementary_errorgen(self.errorgen_type,paulis[0],paulis[1]),'std',matrix_basis) + + + + + +''' +Returns the Commutator of two errors +''' +def commute_errors(ErG1,ErG2, weightFlip=1.0, BCHweight=1.0): + def com(p1,p2): + P1 = pyGSTiPauli_2_stimPauli(p1) + P2=pyGSTiPauli_2_stimPauli(p2) + P3=P1*P2-P2*P1 + return (P3.weight,stimPauli_2_pyGSTiPauli(P3)) + + def acom(p1,p2): + P1 = pyGSTiPauli_2_stimPauli(p1) + P2=pyGSTiPauli_2_stimPauli(p2) + P3=P1*P2+P2*P1 + return (P3.weight,stimPauli_2_pyGSTiPauli(P3)) + + def labelMultiply(p1,p2): + P1 = pyGSTiPauli_2_stimPauli(p1) + P2=pyGSTiPauli_2_stimPauli(p2) + P3=P1*P2 + return (P3.weight,stimPauli_2_pyGSTiPauli(P3)) + + errorGens=[] + + wT=ErG1.getWeight()*ErG2.getWeight()*weightFlip*BCHweight + + if ErG1.getType()=='H' and ErG2.getType()=='H': + pVec=com(ErG1.getP1() , ErG2.getP2()) + errorGens.append( propagatableerrorgen( 'H' , [pVec[1]] , -1j*wT *pVec[0] ) ) + + elif ErG1.getType()=='H' and ErG2.getType()=='S': + pVec=com(ErG2.getP1() , ErG1.getP1()) + errorGens.append( propagatableerrorgen( 'C' , [ErG2.getP1() , pVec[1]] , 1j*wT*pVec[0] ) ) + + elif ErG1.getType()=='S' and ErG2.getType()=='H': + pVec=com(ErG2.getP1() , ErG1.getP1()) + errorGens.append( propagatableerrorgen( 'C' , [ErG2.getP1() , pVec[1]] , -1j*wT *pVec[0] ) ) + + elif ErG1.getType()=='H' and ErG2.getType()=='C': + pVec1=com(ErG2.getP1() , ErG1.getP1()) + errorGens.append( propagatableerrorgen('C' , [pVec1[1], ErG2.getP2()] , 1j*wT*pVec1[0] ) ) + pVec2=com(ErG2.getP2() , ErG1.getP1()) + errorGens.append( propagatableerrorgen('C' , [pVec2[1] , ErG2.getP1()] , 1j*wT*pVec2[0] ) ) + + elif ErG1.getType()=='C' and ErG2.getType()=='H': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType()=='H' and ErG2.getType()=='A': + pVec1 = com(ErG1.getP1() , ErG2.getP1()) + errorGens.append( propagatableerrorgen('A' , [pVec1[1] , ErG2.getP2()] , -1j*wT*pVec1[0]) ) + pVec2 = com(ErG1.getP1() , ErG2.getP2()) + errorGens.append( propagatableerrorgen('A' , [ErG2.getP1(), pVec2[1]] , -1j*wT*pVec2[0] ) ) + + elif ErG1.getType()=='A' and ErG2.getType()=='H': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType()=='S' and ErG2.getType()=='S': + errorGens.append( propagatableerrorgen('H', ErG1.getP1(),0 )) + + elif ErG1.getType()=='S' and ErG2.getType()=='C': + pVec1=labelMultiply(ErG1.getP1() , ErG2.getP1()) + pVec2=labelMultiply(ErG2.getP2() , ErG1.getP1()) + errorGens.append( propagatableerrorgen( 'A' , [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = labelMultiply(ErG1.getP1() , ErG2.getP2()) + pVec2 = labelMultiply(ErG2.getP1() , ErG1.getP1()) + errorGens.append( propagatableerrorgen( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1 =acom(ErG2.getP1(), ErG2.getP2()) + pVec2 = labelMultiply(pVec1[1],ErG1.getP1()) + errorGens.append( propagatableerrorgen( 'A' ,[pVec2[1], ErG1.getP1()] , -1j*.5*wT*pVec1[0]*pVec2[0])) + pVec1=acom(ErG2.getP1(), ErG2.getP2()) + pVec2=labelMultiply(ErG1.getP1(),pVec1[1]) + errorGens.append( propagatableerrorgen( 'A', [ErG1.getP1() ,pVec2[1]],-1j*.5*wT*pVec1[0]*pVec2[0])) + + elif ErG1.getType() == 'C' and ErG2.getType() == 'S': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType() == 'S' and ErG2.getType() == 'A': + pVec1 =labelMultiply(ErG1.getP1() , ErG2.getP1()) + pVec2=labelMultiply(ErG2.getP2() , ErG1.getP1()) + errorGens.append( propagatableerrorgen( 'C', [pVec1[1], pVec2[1]] ,1j*wT*pVec1[0]*pVec2[0] )) + pVec1=labelMultiply(ErG1.getP1() , ErG2.getP2()) + pVec2=labelMultiply(ErG2.getP1() , ErG1.getP1()) + errorGens.append( propagatableerrorgen( 'C', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1 = com(ErG2.getP1() , ErG2.getP2()) + pVec2 = com(ErG1.getP1(),pVec1[1]) + errorGens.append( propagatableerrorgen( 'A', [ErG1.getP1(), pVec2[1]] ,-.5*wT*pVec1[0]*pVec2[0])) + + elif ErG1.getType() == 'A' and ErG1.getType() == 'S': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType() == 'C' and ErG2.getType() == 'C': + A=ErG1.getP1() + B=ErG1.getP2() + P=ErG2.getP1() + Q=ErG2.getP2() + pVec1 = labelMultiply(A,P) + pVec2 =labelMultiply(Q,B) + errorGens.append( propagatableerrorgen( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = labelMultiply(A,Q) + pVec2 =labelMultiply(P,B) + errorGens.append( propagatableerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = labelMultiply(B,P) + pVec2 =labelMultiply(Q,A) + errorGens.append( propagatableerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = labelMultiply(B,Q) + pVec2 =labelMultiply(P,A) + errorGens.append( propagatableerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(A,B) + pVec2=com(P,pVec1[1]) + errorGens.append( propagatableerrorgen( 'A' , [pVec2[1] , Q ], -.5*1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(A,B) + pVec2=com(Q,pVec1[1]) + errorGens.append( propagatableerrorgen( 'A' , [pVec2[1], P] , -.5*1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(P,Q) + pVec2=com(pVec1[1],A) + errorGens.append( propagatableerrorgen( 'A' , [pVec2[1] , B] , -.5*1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(P,Q) + pVec2=com(pVec1[1],B) + errorGens.append( propagatableerrorgen( 'A' , [pVec2[1] , A ] , -.5*1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(A,B) + pVec2=acom(P,Q) + pVec3=com(pVec1[1],pVec2[1]) + errorGens.append( propagatableerrorgen( 'H', [pVec3[1]] ,.25*1j*wT*pVec1[0]*pVec2[0]*pVec3[0])) + + elif ErG1.getType() == 'C' and ErG2.getType() == 'A': + A=ErG1.getP1() + B=ErG1.getP2() + P=ErG2.getP1() + Q=ErG2.getP2() + pVec1 = labelMultiply(A,P) + pVec2 =labelMultiply(Q,B) + errorGens.append( propagatableerrorgen('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) + pVec1 = labelMultiply(A,Q) + pVec2 =labelMultiply(P,B) + errorGens.append( propagatableerrorgen('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1 = labelMultiply(B,P) + pVec2 =labelMultiply(Q,A) + errorGens.append( propagatableerrorgen('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) + pVec1 = labelMultiply(P,A) + pVec2 =labelMultiply(B,Q) + errorGens.append( propagatableerrorgen('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1 = com(P,Q) + pVec2 =com(A,pVec1[1]) + errorGens.append( propagatableerrorgen('A' , [pVec2[1] , B] , .5*wT*pVec1[0]*pVec2[0] )) + pVec1 = com(P,Q) + pVec2 =com(B,pVec1[1]) + errorGens.append( propagatableerrorgen('A' , [pVec2[1], A ], .5*wT*pVec1[0]*pVec2[0] )) + pVec1 = acom(A,B) + pVec2 =com(P,pVec1[1]) + errorGens.append( propagatableerrorgen('C', [pVec2[1] , Q ], .5*1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = acom(A,B) + pVec2 =com(Q,pVec1[1]) + errorGens.append( propagatableerrorgen('C',[pVec2[1],P ],-.5*1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = com(P,Q) + pVec2 =acom(A,B) + pVec3=com(pVec1[1],pVec2[1]) + errorGens.append( propagatableerrorgen('H',[pVec3[1]],-.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) + + elif ErG1.getType() == 'A' and ErG2.getType() == 'C': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType() == 'A' and ErG2.getType() == 'A': + A=ErG1.getP1() + B=ErG1.getP2() + P=ErG2.getP1() + Q=ErG2.getP2() + pVec1=labelMultiply(Q,B) + pVec2=labelMultiply(A,P) + errorGens.append(propagatableerrorgen('A',[pVec1[1],pVec2[1]] ,-1j*wT*pVec1[0]*pVec2[0])) + pVec1=labelMultiply(P,A) + pVec2=labelMultiply(B,Q) + errorGens.append(propagatableerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + pVec1=labelMultiply(B,P) + pVec2=labelMultiply(Q,A) + errorGens.append(propagatableerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + pVec1=labelMultiply(A,Q) + pVec2=labelMultiply(P,B) + errorGens.append(propagatableerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + pVec1=com(P,Q) + pVec2=com(B,pVec1[1]) + errorGens.append(propagatableerrorgen('C',[pVec2[1],A],.5*wT*pVec1[0]*pVec2[0])) + pVec1=com(P,Q) + pVec2=com(A,pVec1[1]) + errorGens.append(propagatableerrorgen('C',[pVec2[1],B] ,-.5*wT*pVec1[0]*pVec2[0])) + pVec1=com(A,B) + pVec2=com(P,pVec1[1]) + errorGens.append(propagatableerrorgen('C', [pVec2[1],Q] ,.5*wT*pVec1[0]*pVec2[0])) + pVec1=com(A,B) + pVec2=com(Q,pVec1[1]) + errorGens.append(propagatableerrorgen('C', [pVec2[1],P] ,-.5*wT*pVec1[0]*pVec2[0])) + pVec1=com(P,Q) + pVec2=com(A,B) + pVec3=com(pVec1[1],pVec2[1]) + errorGens.append( propagatableerrorgen('H',[pVec3[1]] ,.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) + + + return errorGens + + diff --git a/pygsti/propErrorGens/pyGSTiStimTranslator.py b/pygsti/propErrorGens/pyGSTiStimTranslator.py new file mode 100644 index 000000000..039ada10c --- /dev/null +++ b/pygsti/propErrorGens/pyGSTiStimTranslator.py @@ -0,0 +1,65 @@ +import stim + + + +''' +returns a dictionary capable of translating pygsti standard gate labels to stim tablue representations of gates +''' +def Gate_Translate_Dict_p_2_s(): + pyGSTi_to_stim_GateDict={ + 'Gi' : stim.Tableau.from_named_gate('I'), + 'Gxpi' : stim.Tableau.from_named_gate('X'), + 'Gypi' : stim.Tableau.from_named_gate('Y'), + 'Gzpi' : stim.Tableau.from_named_gate('Z'), + 'Gxpi2' : stim.Tableau.from_named_gate('SQRT_X'), + 'Gypi2' : stim.Tableau.from_named_gate('SQRT_Y'), + 'Gzpi2' : stim.Tableau.from_named_gate('SQRT_Z'), + 'Gxmpi2': stim.Tableau.from_named_gate('SQRT_X_DAG'), + 'Gympi2': stim.Tableau.from_named_gate('SQRT_Y_DAG'), + 'Gzmpi2': stim.Tableau.from_named_gate('SQRT_Z_DAG'), + 'Gh' : stim.Tableau.from_named_gate('H'), + 'Gxx' : stim.Tableau.from_named_gate('SQRT_XX'), + 'Gzz' : stim.Tableau.from_named_gate('SQRT_ZZ'), + 'Gcnot' : stim.Tableau.from_named_gate('CNOT'), + 'Gswap' : stim.Tableau.from_named_gate('SWAP') + } + return pyGSTi_to_stim_GateDict + + +''' +returns a dict translating the stim tableu (gate) key to pyGSTi gate keys +TODO: change the stim tablues to tablues keys +''' +def Gate_Translate_Dict_s_2_p(): + dict = Gate_Translate_Dict_p_2_s() + return {v: k for k, v in dict.items()} + +''' +Takes a layer of pyGSTi gates and composes them into a single stim Tableu +''' +def pyGSTiLayer_to_stimLayer(player,qubits,MultiGateDict={},MultiGate=False): + slayer=stim.Tableau(qubits) + started = False + stimDict=Gate_Translate_Dict_p_2_s() + for sub_lbl in player: + if not MultiGate: + temp = stimDict[sub_lbl.name] + else: + temp = stimDict[MultiGateDict[sub_lbl.name]] + slayer.append(temp,sub_lbl.qubits) + return slayer + +''' +Takes the typical pygsti label for paulis and returns a stim PauliString object +''' +def pyGSTiPauli_2_stimPauli(pauli): + return stim.PauliString(pauli) + + +''' +Converts a stim paulistring to the string typically used in pysti to label paulis +warning: stim ofter stores a pauli phase in the string (i.e +1,-1,+i,-i) this is assumed positive +in this function, if the weight is needed please store paulistring::weight prior to applying this function +''' +def stimPauli_2_pyGSTiPauli(pauliString): + return str(pauliString)[1:].replace('_',"I") \ No newline at end of file From fdd689d36c2261884b4cbca09ffe206cef5a4d58 Mon Sep 17 00:00:00 2001 From: ashenmill <156946147+ashenmill@users.noreply.github.com> Date: Mon, 18 Mar 2024 15:17:37 -0600 Subject: [PATCH 002/102] stim integration to pygsti Moved some files around, integrated stim translations into pyGSTi, Added a tutorial notebook to examples --- .../Propagatable error gens tutorial.ipynb | 167 ++++++++++++++++++ pygsti/circuits/circuit.py | 53 ++++++ .../errorgenpropagation/errorpropagator.py} | 18 +- .../propagatableerrorgen.py | 4 +- .../utilspygstistimtranslator.py} | 1 - pygsti/tools/internalgates.py | 30 ++++ 6 files changed, 262 insertions(+), 11 deletions(-) create mode 100644 jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb rename pygsti/{propErrorGens/ErrorPropagator.py => extras/errorgenpropagation/errorpropagator.py} (94%) rename pygsti/{propErrorGens => extras/errorgenpropagation}/propagatableerrorgen.py (98%) rename pygsti/{propErrorGens/pyGSTiStimTranslator.py => extras/errorgenpropagation/utilspygstistimtranslator.py} (99%) diff --git a/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb b/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb new file mode 100644 index 000000000..7489126d9 --- /dev/null +++ b/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb @@ -0,0 +1,167 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from pygsti.extras.errorgenpropagation.propagatableerrorgen import *\n", + "from pygsti.extras.errorgenpropagation.errorpropagator import *\n", + "from pygsti.circuits import Circuit\n", + "import numpy as np\n", + "import pygsti.processors\n", + "import pygsti\n", + "import pygsti.tools.lindbladtools as _lt\n", + "import scipy\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction to the Propagatable Error Generators Code" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining a circuit and error generators" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Currently Error Propgagation works for any model that meets three criteria\n", + "\n", + " 1. The circuit is clifford\n", + " 2. The errors on each gate can be defined at a time t of interest in the small markovian errors basis\n", + " 3. The error error model is defined such that a gate G has some linear combination of error generators following it\n", + "\n", + "We can therefore, start a code by defining a circuit and an error model by simply following the common pyGSTi notation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "errorModel={\n", + " 'Gxpi2' : {('H','Y'):.01}\n", + "\n", + "}\n", + "c=Circuit(10*[('Gxpi2',0)])" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can take the above definitions and plug them into the errorpropagator function, to get out a list of post-circuit error generators out." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "errors=ErrorPropagator(c,errorModel,BCHOrder=1,BCHLayerwise=False,NonMarkovian=False)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here BCH order determines the to what order the BCH order will be taken to (if applicable). BCHLayerwise will if false, propagatate all errors to the end before taking the BCH expansion, otherwise it will push the errorgens through a layer and combine with the the error generators for that layer by the rules given by the BCHOrder. Non-markovian prevents any simplification or BCH expansions being taken, instead allowing the output to be a list a lists, where the each sublist denotes the errorgenerators that were occuring at time t in the circuit." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Additionally, if you want to describe a gate with multiple associated error definitions you can define it as follows." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('H', ('X',), (0.09999999999999999+0j))]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "MultiGateDict={'Gxpi22' : 'Gxpi2'}\n", + "errorModel={\n", + " 'Gxpi2' : {('H','Y'):.01},\n", + " 'Gxpi22' : {('H','X'):.01}\n", + "\n", + "}\n", + "c=Circuit(10*[('Gxpi2',0),('Gxpi22',0)])\n", + "\n", + "ErrorPropagator(c,errorModel,MultiGateDict=MultiGateDict, MultiGate=True)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once the errors are propagated to the process matrix given by the end of circuit error generators is given by" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "expMat=np.zeros([4**len(c.line_labels),4**len(c.line_labels)],dtype=np.complex128)\n", + "for error in errors:\n", + " expMat +=error.toWeightedErrorBasisMatrix()\n", + "processMatrix = scipy.linalg.expm(expMat)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "PyGSTi_EOC", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 84d1e0a27..bdb5a88d3 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -3644,6 +3644,59 @@ def _write_q_circuit_tex(self, filename): # TODO f.write("\\end{document}") f.close() + + def convert_to_stim_tableau_layers(self,gate_name_conversions=None): + """ + Converts this circuit to a list of stim tableau layers + + Parameters + ---------- + gate_name_conversions : Dict + A map from pygsti gatenames to standard stim tableaus. If set to None a standard set of gate names is used + + Returns + ------- + A layer by layer list of stim tabluaes + """ + try: + import stim + except ImportError: + raise ImportError("Stim is required for this operation, and it does not appear to be installed.") + if gate_name_conversions is None: + gate_name_conversions = _itgs.standard_gatenames_stim_conversions() + + qubits=len(self.line_labels) + stim_layers=[] + for j in range(self.depth): + layer = self.layer(j) + stim_layer=stim.Tableau(qubits) + for sub_lbl in layer: + temp = gate_name_conversions[sub_lbl.name] + stim_layer.append(temp,sub_lbl.qubits) + stim_layers.append(stim_layer) + return stim_layers + + def convert_to_stim_tableau(self,gate_name_conversions=None): + """ + Converts this circuit to a stim tableu + + Parameters + ---------- + gate_name_conversions : Dict + A map from pygsti gatenames to standard stim tableaus. If set to None a standard set of gate names is used + + Returns + ------- + A single stim tableu representing the entire circuit + """ + layers=self.convert_to_stim_tableau_layers(gate_name_conversions) + tableu=layers.pop(0) + for layer in layers: + tableu=tableu*layer + return tableu + + + def convert_to_cirq(self, qubit_conversion, wait_duration=None, diff --git a/pygsti/propErrorGens/ErrorPropagator.py b/pygsti/extras/errorgenpropagation/errorpropagator.py similarity index 94% rename from pygsti/propErrorGens/ErrorPropagator.py rename to pygsti/extras/errorgenpropagation/errorpropagator.py index 0ea24cdca..97a4f0f98 100644 --- a/pygsti/propErrorGens/ErrorPropagator.py +++ b/pygsti/extras/errorgenpropagation/errorpropagator.py @@ -1,9 +1,10 @@ import stim -from pygsti.propErrorGens.propagatableerrorgen import * -from pygsti.propErrorGens.pyGSTiStimTranslator import * +from pygsti.extras.errorgenpropagation.propagatableerrorgen import * +from pygsti.extras.errorgenpropagation.utilspygstistimtranslator import * from numpy import abs from numpy.linalg import multi_dot from scipy.linalg import expm +from pygsti.tools.internalgates import standard_gatenames_stim_conversions ''' @@ -22,12 +23,11 @@ returns: list of propagatableerrorgens ''' def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=False,NonMarkovian=False,MultiGate=False): - qubits=len(circ.line_labels) - stim_layers=[] - for j in range(circ.depth): - layer = circ.layer(j) - stim_layer=pyGSTiLayer_to_stimLayer(layer,qubits,MultiGateDict,MultiGate) - stim_layers.append(stim_layer) + stim_dict=standard_gatenames_stim_conversions() + if MultiGate: + for key in MultiGateDict: + stim_dict[key]=stim_dict[MultiGateDict[key]] + stim_layers=circ.convert_to_stim_tableau_layers(gate_name_conversions=stim_dict) stim_layers.pop(0) #Immeditielty toss the first layer because it is not important, propagation_layers=[] @@ -40,7 +40,7 @@ def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=Fal else: propagation_layers = stim_layers - errorLayers=buildErrorlayers(circ,errorModel,qubits) + errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) num_error_layers=len(errorLayers) fully_propagated_layers=[] diff --git a/pygsti/propErrorGens/propagatableerrorgen.py b/pygsti/extras/errorgenpropagation/propagatableerrorgen.py similarity index 98% rename from pygsti/propErrorGens/propagatableerrorgen.py rename to pygsti/extras/errorgenpropagation/propagatableerrorgen.py index 1fa6febe2..7976b0f50 100644 --- a/pygsti/propErrorGens/propagatableerrorgen.py +++ b/pygsti/extras/errorgenpropagation/propagatableerrorgen.py @@ -1,5 +1,5 @@ from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel -from pygsti.propErrorGens.pyGSTiStimTranslator import * +from pygsti.extras.errorgenpropagation.utilspygstistimtranslator import * import stim from numpy import array,kron from pygsti.tools import change_basis @@ -7,6 +7,8 @@ ''' Similar to errorgenlabel but has an errorrate included as well as additional classes ''' +# Create a new pygsti-ish method where we use a modified dictionary and a modified local error generator where the keys are +# stim PauliStrings class propagatableerrorgen(ElementaryErrorgenLabel): ''' Labels an elementary errorgen by a type, pauli and error rate diff --git a/pygsti/propErrorGens/pyGSTiStimTranslator.py b/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py similarity index 99% rename from pygsti/propErrorGens/pyGSTiStimTranslator.py rename to pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py index 039ada10c..6e33c1e99 100644 --- a/pygsti/propErrorGens/pyGSTiStimTranslator.py +++ b/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py @@ -39,7 +39,6 @@ def Gate_Translate_Dict_s_2_p(): ''' def pyGSTiLayer_to_stimLayer(player,qubits,MultiGateDict={},MultiGate=False): slayer=stim.Tableau(qubits) - started = False stimDict=Gate_Translate_Dict_p_2_s() for sub_lbl in player: if not MultiGate: diff --git a/pygsti/tools/internalgates.py b/pygsti/tools/internalgates.py index e3664f79c..91ac69567 100644 --- a/pygsti/tools/internalgates.py +++ b/pygsti/tools/internalgates.py @@ -315,7 +315,37 @@ def unitary_to_standard_gatename(unitary): if not callable(U) and not callable(unitary) and U.shape == unitary.shape and _np.allclose(unitary, U): return std_name return None +def standard_gatenames_stim_conversions(): + """ + A dictionary converting the gates with standard names to stim tableus for these gates. Currently is only capable of converting + clifford gates, no capability for T gates + Returns + ------- + A dict mapping string to tableu + """ + try: + import stim + except ImportError: + raise ImportError("Stim is required for this operation, and it does not appear to be installed.") + pyGSTi_to_stim_GateDict={ + 'Gi' : stim.Tableau.from_named_gate('I'), + 'Gxpi' : stim.Tableau.from_named_gate('X'), + 'Gypi' : stim.Tableau.from_named_gate('Y'), + 'Gzpi' : stim.Tableau.from_named_gate('Z'), + 'Gxpi2' : stim.Tableau.from_named_gate('SQRT_X'), + 'Gypi2' : stim.Tableau.from_named_gate('SQRT_Y'), + 'Gzpi2' : stim.Tableau.from_named_gate('SQRT_Z'), + 'Gxmpi2': stim.Tableau.from_named_gate('SQRT_X_DAG'), + 'Gympi2': stim.Tableau.from_named_gate('SQRT_Y_DAG'), + 'Gzmpi2': stim.Tableau.from_named_gate('SQRT_Z_DAG'), + 'Gh' : stim.Tableau.from_named_gate('H'), + 'Gxx' : stim.Tableau.from_named_gate('SQRT_XX'), + 'Gzz' : stim.Tableau.from_named_gate('SQRT_ZZ'), + 'Gcnot' : stim.Tableau.from_named_gate('CNOT'), + 'Gswap' : stim.Tableau.from_named_gate('SWAP') + } + return pyGSTi_to_stim_GateDict def standard_gatenames_cirq_conversions(): """ From 9faef8aad145898a14404f4c795dc23aea2012ac Mon Sep 17 00:00:00 2001 From: ashenmill <156946147+ashenmill@users.noreply.github.com> Date: Wed, 20 Mar 2024 10:30:24 -0600 Subject: [PATCH 003/102] Added Single Error Per Gate non Markovianity --- .../errorgenpropagation/errorpropagator.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pygsti/extras/errorgenpropagation/errorpropagator.py b/pygsti/extras/errorgenpropagation/errorpropagator.py index 97a4f0f98..56788b0d9 100644 --- a/pygsti/extras/errorgenpropagation/errorpropagator.py +++ b/pygsti/extras/errorgenpropagation/errorpropagator.py @@ -213,14 +213,16 @@ def buildErrorlayers(circ,errorDict,qubits): def nm_propagators(corr, Elist): Kms = [] for idm in range(len(Elist)): - Am = Elist[idm].toWeightedErrorBasisMatrix - # This assumes that Elist is in reverse chronological order - partials = [] - for idn in range(idm, len(Elist)): - An = Elist[idn].toWeightedErrorBasisMatrix() - partials += [corr[idm,idn] * Am @ An] - partials[0] = partials[0]/2 - Kms += [sum(partials,0)] + for idmm in range(len(Elist[idm][0])): + Am = Elist[idm][0][idmm].toWeightedErrorBasisMatrix() + # This assumes that Elist is in reverse chronological order + partials = [] + for idn in range(idm, len(Elist)): + for idnn in range(len(Elist[idn][0])): + An = Elist[idn][0][idnn].toWeightedErrorBasisMatrix() + partials += [corr[idm,idn] * Am @ An] + partials[0] = partials[0]/2 + Kms += [sum(partials,0)] return Kms def averaged_evolution(corr, Elist): From 3f5d569b84b57758034579845394e42ef1782f67 Mon Sep 17 00:00:00 2001 From: ashenmill <156946147+ashenmill@users.noreply.github.com> Date: Wed, 20 Mar 2024 13:32:03 -0600 Subject: [PATCH 004/102] Fixed a bug in the non-markovianity code, added tutorial --- .../Propagatable error gens tutorial.ipynb | 35 +++++++++++++++---- .../errorgenpropagation/errorpropagator.py | 27 +++++++------- 2 files changed, 44 insertions(+), 18 deletions(-) diff --git a/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb b/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb index 7489126d9..bd8342532 100644 --- a/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb +++ b/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -49,7 +49,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -70,7 +70,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -95,7 +95,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -104,7 +104,7 @@ "[('H', ('X',), (0.09999999999999999+0j))]" ] }, - "execution_count": 4, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -131,7 +131,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -140,6 +140,29 @@ " expMat +=error.toWeightedErrorBasisMatrix()\n", "processMatrix = scipy.linalg.expm(expMat)" ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Non-Markovianity" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you want to use the non markovianity function you need to define an n x n correlation where n is the number of layers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/pygsti/extras/errorgenpropagation/errorpropagator.py b/pygsti/extras/errorgenpropagation/errorpropagator.py index 56788b0d9..9b41ba89f 100644 --- a/pygsti/extras/errorgenpropagation/errorpropagator.py +++ b/pygsti/extras/errorgenpropagation/errorpropagator.py @@ -1,7 +1,8 @@ import stim from pygsti.extras.errorgenpropagation.propagatableerrorgen import * from pygsti.extras.errorgenpropagation.utilspygstistimtranslator import * -from numpy import abs +from numpy import abs,zeros, complex128 + from numpy.linalg import multi_dot from scipy.linalg import expm from pygsti.tools.internalgates import standard_gatenames_stim_conversions @@ -210,21 +211,23 @@ def buildErrorlayers(circ,errorDict,qubits): # There's a factor of a half missing in here. -def nm_propagators(corr, Elist): +def nm_propagators(corr, Elist,qubits): Kms = [] for idm in range(len(Elist)): + Am=zeros([4**qubits,4**qubits],dtype=complex128) for idmm in range(len(Elist[idm][0])): - Am = Elist[idm][0][idmm].toWeightedErrorBasisMatrix() + Am += Elist[idm][0][idmm].toWeightedErrorBasisMatrix() # This assumes that Elist is in reverse chronological order - partials = [] - for idn in range(idm, len(Elist)): - for idnn in range(len(Elist[idn][0])): - An = Elist[idn][0][idnn].toWeightedErrorBasisMatrix() - partials += [corr[idm,idn] * Am @ An] - partials[0] = partials[0]/2 - Kms += [sum(partials,0)] + partials = [] + for idn in range(idm, len(Elist)): + An=zeros([4**qubits,4**qubits],dtype=complex128) + for idnn in range(len(Elist[idn][0])): + An = Elist[idn][0][idnn].toWeightedErrorBasisMatrix() + partials += [corr[idm,idn] * Am @ An] + partials[0] = partials[0]/2 + Kms += [sum(partials,0)] return Kms -def averaged_evolution(corr, Elist): - Kms = nm_propagators(corr, Elist) +def averaged_evolution(corr, Elist,qubits): + Kms = nm_propagators(corr, Elist,qubits) return multi_dot([expm(Km) for Km in Kms]) \ No newline at end of file From 582787d4cfb10343b4e591b8ab159af940d1eb5e Mon Sep 17 00:00:00 2001 From: ashenmill <156946147+ashenmill@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:45:56 -0600 Subject: [PATCH 005/102] added layer defined errors --- pygsti/extras/errorgenpropagation/errorpropagator.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pygsti/extras/errorgenpropagation/errorpropagator.py b/pygsti/extras/errorgenpropagation/errorpropagator.py index 9b41ba89f..d8134540c 100644 --- a/pygsti/extras/errorgenpropagation/errorpropagator.py +++ b/pygsti/extras/errorgenpropagation/errorpropagator.py @@ -23,7 +23,7 @@ MultiGate: lets the code know returns: list of propagatableerrorgens ''' -def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=False,NonMarkovian=False,MultiGate=False): +def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=False,NonMarkovian=False,MultiGate=False,ErrorLayerDef=False): stim_dict=standard_gatenames_stim_conversions() if MultiGate: for key in MultiGateDict: @@ -41,7 +41,10 @@ def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=Fal else: propagation_layers = stim_layers - errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) + if not ErrorLayerDef: + errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) + else: + errorLayers=[[errorModel]]*circ.depth num_error_layers=len(errorLayers) fully_propagated_layers=[] @@ -206,6 +209,7 @@ def buildErrorlayers(circ,errorDict,qubits): paulis.append(p2) errorLayer.append(propagatableerrorgen(errType,paulis,gErrorDict[errs])) ErrorGens.append([errorLayer]) + print(ErrorGens) return ErrorGens From d19a57df5a21675307453093a7dcea18f4b42dba Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 5 Jun 2024 22:57:26 -0600 Subject: [PATCH 006/102] Initial commit porting over LFH code This is the initial commit for the addition of functionality in pygsti for modeling systems with LFH noise present. This adds a new lfh module to extras (for now) which includes a special type of LindbladErrorgen that can have gaussianly fluctuating hamiltonian parameters. Additionally this adds a special LFH aware explicit model class, and a number of specialized forward simulators. --- pygsti/extras/lfh/lfherrorgen.py | 211 +++++++ pygsti/extras/lfh/lfhforwardsims.py | 861 ++++++++++++++++++++++++++++ pygsti/extras/lfh/lfhmodel.py | 78 +++ 3 files changed, 1150 insertions(+) create mode 100644 pygsti/extras/lfh/lfherrorgen.py create mode 100644 pygsti/extras/lfh/lfhforwardsims.py create mode 100644 pygsti/extras/lfh/lfhmodel.py diff --git a/pygsti/extras/lfh/lfherrorgen.py b/pygsti/extras/lfh/lfherrorgen.py new file mode 100644 index 000000000..bb1d19441 --- /dev/null +++ b/pygsti/extras/lfh/lfherrorgen.py @@ -0,0 +1,211 @@ +""" +Defines the LFHLindbladErrorgen class, an extension of LindbladErrorgen with +support for fluctuating Hamiltonian parameters. +""" +#*************************************************************************************************** +# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + +import numpy as _np +import collections as _collections +import itertools as _itertools +from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen +from pygsti.forwardsims import WeakForwardSimulator as _WeakForwardsimulator +from pygsti.forwardsims import MapForwardSimulator as _MapForwardSimulator +from pygsti.forwardsims import SimpleMapForwardSimulator as _SimpleMapForwardSimulator +from pygsti.forwardsims import MatrixForwardSimulator as _MatrixForwardSimulator +from pygsti.evotypes import Evotype as _Evotype + +from pygsti.forwardsims import ForwardSimulator as _ForwardSimulator +from pygsti.models import ExplicitOpModel as _ExplicitOpModel +from pygsti.modelmembers.operations import ExpErrorgenOp as _ExpErrorgenOp +from pygsti.modelmembers.operations import ComposedOp as _ComposedOp +from pygsti.baseobjs import statespace as _statespace +from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel +from pygsti.modelmembers.operations import LindbladParameterization +from pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock as _LindbladCoefficientBlock + +from scipy.special import roots_hermite +from math import sqrt, pi + + +#--------- New LindbladErrorgen ------------# +#Pattern match a bit off of the parameterized lindblad error generator Jordan cooked up +class LFHLindbladErrorgen(_LindbladErrorgen): + """ + A Lindblad error generator with parameters that are combined + to get the target error generator based on some function params_to_coeffs of the parameter vector + params_to_coeffs should return a numpy array + """ + def coeff_dict_from_vector(self): + basis = _BuiltinBasis('pp', 4) + v = self.current_rates + #print(len(v)) + error_rates_dict = {} + for i in range(3): + error_rates_dict[('H',basis.labels[i+1])] = v[i] + labels = [('S', 'X'), ('A','X','Y'),('A','X','Z'),('C','X','Z'),('S','Y'),('A','Y','Z'),('C','X','Y'),('C','Y','Z'),('S','Z')] + for i in range(3,12): + error_rates_dict[(labels[i-3])] = v[i] + return error_rates_dict + + def __init__(self, h_means, otherlindbladparams, h_devs, lindblad_basis='auto', elementary_errorgen_basis='pp', + evotype="default", state_space=1, parameterization='CPTPLND', truncate=True, rng= None): + #Pass in a vector of standard lindblad parameters as well as a vector of standard deviations + #for each of the hamiltonian parameters + + #Store the values of the mean hamiltonian rates. + self.means= h_means + self.otherlindbladparams = otherlindbladparams + + self.paramvals = _np.array([param for param in self.means] + [param for param in self.otherlindbladparams]) #the parameters + self.current_rates = self.paramvals.copy() + + #let's make the h deviations a dictionary instead, so we can control which of the hamiltonian rates are fluctuating + #to make the marginalization more efficient (avoiding duplicated calculations when std. devs are 0. + #We'll make the keys of the dictionary the index in h_means that the deviation corresponds to. + + self.dev_dict = h_devs + self.devs= _np.fromiter(h_devs.values()) + + #set the random number generator used for sampling from a normal distribution. + if rng is not None: + if isinstance(rng, int): + self.rng= _np.random.default_rng(rng) + else: + self.rng = rng + else: + self.rng= _np.random.default_rng() + + #Get the coefficient dictionary for this parameter vector + self.coefficients = self.coeff_dict_from_vector() + #super().from_elementary_errorgens(coeff_dict, state_space = 1) + + state_space = _statespace.StateSpace.cast(state_space) + dim = state_space.dim # Store superop dimension + basis = _Basis.cast(elementary_errorgen_basis, dim) + + #convert elementary errorgen labels to *local* labels (ok to specify w/global labels) + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = state_space.tensor_product_block_labels(0) # just take first TPB labels as all labels + elementary_errorgens = _collections.OrderedDict( + [(_LocalElementaryErrorgenLabel.cast(lbl, sslbls, identity_label_1Q), val) + for lbl, val in self.coefficients.items()]) + + parameterization = LindbladParameterization.minimal_from_elementary_errorgens(elementary_errorgens) \ + if parameterization == "auto" else LindbladParameterization.cast(parameterization) + + eegs_by_typ = { + 'ham': {eeglbl: v for eeglbl, v in elementary_errorgens.items() if eeglbl.errorgen_type == 'H'}, + 'other_diagonal': {eeglbl: v for eeglbl, v in elementary_errorgens.items() if eeglbl.errorgen_type == 'S'}, + 'other': {eeglbl: v for eeglbl, v in elementary_errorgens.items() if eeglbl.errorgen_type != 'H'} + } + + blocks = [] + for blk_type, blk_param_mode in zip(parameterization.block_types, parameterization.param_modes): + relevant_eegs = eegs_by_typ[blk_type] # KeyError => unrecognized block type! + bels = sorted(set(_itertools.chain(*[lbl.basis_element_labels for lbl in relevant_eegs.keys()]))) + blk = _LindbladCoefficientBlock(blk_type, basis, bels, param_mode=blk_param_mode) + blk.set_elementary_errorgens(relevant_eegs, truncate=truncate) + blocks.append(blk) + #print(blk) + + evotype= _Evotype.cast(evotype) + evotype.prefer_dense_reps = True + + super().__init__(blocks, evotype=evotype, state_space=1) + + @property + def num_params(self): + """ + Get the number of independent parameters which specify this operation. + + Returns + ------- + int + the number of independent parameters. + """ + return len(self.paramvals) + len(self.devs) + + def to_vector(self): + ret_vec= [param for param in self.paramvals] + [dev for dev in self.devs] + + return _np.array(ret_vec) + + def from_vector(self,v, close=False, dirty_value=True): + """ + Initialize the operation using a vector of parameters. + + Parameters + ---------- + v : numpy array + The 1D vector of operation parameters. Length + must == num_params() + + close : bool, optional + Whether `v` is close to this operation's current + set of parameters. Under some circumstances, when this + is true this call can be completed more quickly. + + dirty_value : bool, optional + The value to set this object's "dirty flag" to before exiting this + call. This is passed as an argument so it can be updated *recursively*. + Leave this set to `True` unless you know what you're doing. + + Returns + ------- + None + """ + assert(len(v) == self.num_params) + + #split off the terms that go into paramvals and devs + v = _np.array(v) + new_paramvals= v[:len(self.paramvals)] + new_otherlindblad_params = v[3:len(self.paramvals)] + new_devs= v[len(self.paramvals):] + new_means= v[0:3] + + self.paramvals = new_paramvals + self.means= new_means + self.devs= new_devs + self.dev_dict = {key:val for key,val in zip(self.dev_dict.keys(), new_devs)} + self.otherlindbladparams = new_otherlindblad_params + + self.coefficients = self.coeff_dict_from_vector() + + #coefficient blocks and current rates get reset to the new mean values passed in + #resampling can cause the values of the coefficient blocks and the rates to become + #different though. + self.current_rates= self.paramvals.copy() + off = 0 + u = self.paramvals + for blk in self.coefficient_blocks: + blk.from_vector(u[off: off + blk.num_params]) + off += blk.num_params + self._update_rep() + self.dirty = dirty_value + + #Now the special ingredient we need is functionality for resampling + #What we want to be able to do is use the current hamiltonian means + #and std deviations to get a new set of hamiltonian weights. + + def sample_hamiltonian_rates(self):#, dirty_value=True): + + new_h_rates = [self.rng.normal(loc=mean, scale=self.dev_dict[i]) if i in self.dev_dict else mean + for i, mean in enumerate(self.means)] + + #now we want to update the coefficent blocks and current rates: + self.current_rates = _np.array(new_h_rates + [other_lindblad for other_lindblad in self.otherlindbladparams]) + off = 0 + u = self.current_rates + for blk in self.coefficient_blocks: + blk.from_vector(u[off: off + blk.num_params]) + off += blk.num_params + self._update_rep() + #self.dirty = dirty_value \ No newline at end of file diff --git a/pygsti/extras/lfh/lfhforwardsims.py b/pygsti/extras/lfh/lfhforwardsims.py new file mode 100644 index 000000000..c89bd8b06 --- /dev/null +++ b/pygsti/extras/lfh/lfhforwardsims.py @@ -0,0 +1,861 @@ +""" +Defines the various forward simulators for use with models containing operations with +fluctuating Hamiltonian parameters. +""" +#*************************************************************************************************** +# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + + +import numpy as _np +import collections as _collections +import itertools as _itertools +from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen +from pygsti.forwardsims import WeakForwardSimulator as _WeakForwardsimulator +from pygsti.forwardsims import MapForwardSimulator as _MapForwardSimulator +from pygsti.forwardsims import SimpleMapForwardSimulator as _SimpleMapForwardSimulator +from pygsti.forwardsims import MatrixForwardSimulator as _MatrixForwardSimulator +from pygsti.evotypes import Evotype as _Evotype +from pygsti.extras.lfh.lfherrorgen import LFHLindbladErrorgen as _LFHLindbladErrorgen + + + +from pygsti.forwardsims import ForwardSimulator as _ForwardSimulator +from pygsti.models import ExplicitOpModel as _ExplicitOpModel +from pygsti.modelmembers.operations import ExpErrorgenOp as _ExpErrorgenOp +from pygsti.modelmembers.operations import ComposedOp as _ComposedOp +from pygsti.baseobjs import statespace as _statespace +from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel +from pygsti.modelmembers.operations import LindbladParameterization +from pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock as _LindbladCoefficientBlock + +from scipy.special import roots_hermite +from math import sqrt, pi + +#Next we need to define a new custom weak forward simulator +class LFHWeakForwardSimulator(_ForwardSimulator): + """ + Weak forward simulator specialized for dealing with low-frequency hamiltonian models. + """ + + def __init__(self, shots, model=None, base_seed=None): + """ + Construct a new WeakForwardSimulator object. + + Parameters + ---------- + shots: int + Number of times to run each circuit to obtain an approximate probability + model : Model + Optional parent Model to be stored with the Simulator + """ + self.shots = shots + super().__init__(model) + + def bulk_probs(self, circuits, clip_to=None, resource_alloc=None, smartc=None): + """ + Construct a dictionary containing the probabilities for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + clip_to : 2-tuple, optional + (min,max) to clip return value if not None. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + probs : dictionary + A dictionary such that `probs[circuit]` is an ordered dictionary of + outcome probabilities whose keys are outcome labels. + """ + + #We want to loop through each of the circuits in a "rasterization pass" collecting one + #one shot each. At the start of each loop we want to resample the randomly fluctuating + #hamiltonian parameters. + #We should be able to farm out the probability calculation to another forward simulator + #though. + probs_for_shot = [] + for i in range(self.shots): + #Have the model resample the hamiltonian rates: + self.model.sample_hamiltonian_rates() + helper_sim = _MapForwardSimulator(model=self.model) + + #Now that we've sampled the hamiltonian rates calculate the probabilities for + #all of the circuits. + #import pdb + #pdb.set_trace() + probs_for_shot.append(helper_sim.bulk_probs(circuits)) + #Now loop through and perform an averaging over the output probabilities. + #Initialize a dictionary for storing the final results. + #print(probs_for_shot) + outcome_labels= probs_for_shot[0][circuits[0]].keys() + averaged_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for prob_dict in probs_for_shot: + for ckt in circuits: + for lbl in outcome_labels: + averaged_probs[ckt][lbl] += prob_dict[ckt][lbl]/self.shots + + #return the averaged probabilities: + return averaged_probs + + def bulk_dprobs(self, circuits, resource_alloc=None, smartc=None): + """ + Construct a dictionary containing the probability derivatives for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + dprobs : dictionary + A dictionary such that `dprobs[circuit]` is an ordered dictionary of + derivative arrays (one element per differentiated parameter) whose + keys are outcome labels + """ + + #If _compute_circuit_outcome_probability_derivatives is implemented, use it! + #resource_alloc = layout.resource_alloc() + + eps = 1e-7 # hardcoded? +# if param_slice is None: +# param_slice = slice(0, self.model.num_params) +# param_indices = _slct.to_array(param_slice) + +# if dest_param_slice is None: +# dest_param_slice = slice(0, len(param_indices)) +# dest_param_indices = _slct.to_array(dest_param_slice) + +# iParamToFinal = {i: dest_param_indices[ii] for ii, i in enumerate(param_indices)} + + probs = self.bulk_probs(circuits) + orig_vec = self.model.to_vector().copy() + + #pull out the requisite outcome labels: + outcome_labels= probs[circuits[0]].keys() + + #initialize a dprobs array: + dprobs= {ckt: {lbl: _np.empty(self.model.num_params, dtype= _np.double) for lbl in outcome_labels} for ckt in circuits} + + for i in range(self.model.num_params): + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + probs2 = self.bulk_probs(circuits) + + #need to parse this and construct the corresponding entries of the dprobs dict. + + for ckt in circuits: + for lbl in outcome_labels: + dprobs[ckt][lbl][i] = (probs2[ckt][lbl] - probs[ckt][lbl]) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec, close=True) + + return dprobs + + + #Try out a different "weak" forward simulator that doesn't use sampling to do the integration +#over the gaussian, but rather approximates the expectation values using gauss-hermite quadrature +class LFHIntegratingForwardSimulator(_ForwardSimulator): + """ + Weak forward simulator specialized for dealing with low-frequency hamiltonian models. + """ + + def __init__(self, order, model=None, base_seed=None): + """ + Construct a new WeakForwardSimulator object. + + Parameters + ---------- + order: int + order of the gauss-hermite approximation for the integral. + model : Model + Optional parent Model to be stored with the Simulator + """ + self.order = order + self.helper_sim = None + super().__init__(model) + + def build_sampling_grid(self): + #build the grid of sample points and weights + #for the simulators model. + + #Need to identify how many deviation parameters there are. + num_deviances= 0 + dev_values= [] + mean_values = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + dev_values.extend(subop.errorgen.devs) + mean_values.extend(subop.errorgen.means) + num_deviances += len(subop.errorgen.devs) + + #Once we know the number of deviances and their values we can start building + #out the grid of sampling points and weights. + base_one_d_points , base_one_d_weights= roots_hermite(self.order) + + #print(base_one_d_points) + #print(base_one_d_weights) + + #print(mean_values) + #print(dev_values) + + #The weights remain the same, but I need to modify the sampling points + #Now I need to get updates + gaussian_one_d_points = [[] for _ in range(len(dev_values))] + + for i,(dev, mean) in enumerate(zip(dev_values, mean_values)): + for point in base_one_d_points: + gaussian_one_d_points[i].append(mean+sqrt(2)*dev*point) + + #print(gaussian_one_d_points[0]) + + return gaussian_one_d_points, (1/sqrt(pi))*base_one_d_weights + + + def bulk_probs(self, circuits, clip_to=None, resource_alloc=None, smartc=None, return_layout= False, cached_layout= None): + """ + Construct a dictionary containing the probabilities for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + clip_to : 2-tuple, optional + (min,max) to clip return value if not None. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + probs : dictionary + A dictionary such that `probs[circuit]` is an ordered dictionary of + outcome probabilities whose keys are outcome labels. + """ + + sample_points_lists , weights = self.build_sampling_grid() + + #The grid of points is the cartesian product of the sample point lists. + + sample_point_grid = _itertools.product(*sample_points_lists) + + #do this for convienience + weight_grid = _itertools.product(*([weights]*len(sample_points_lists))) + + #I need to identify where in the model vector the sampled hamiltonian weights + #need to go. + hamiltonian_model_indices = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + + orig_vec = self.model.to_vector() + + if self.helper_sim is None: + self.add_helper_sim() + + #create a circuit layout that we can reuse to speed things up + #(We'll be using the same circuit list at every evaluation point) + if cached_layout is None: + ckt_layout = self.helper_sim.create_layout(circuits) + else: + ckt_layout = cached_layout + + weighted_probs_for_point = [] + + for sample_grid_point, weight_grid_point in zip(sample_point_grid, weight_grid): + vec = orig_vec.copy() + vec[hamiltonian_model_indices] = _np.array(sample_grid_point) + + #despite storing it as a grid, we just need the scalar product of the weights + weight_value = _np.prod(weight_grid_point) + + #set the model to this current vec value + self.model.from_vector(vec) + + #next simulate the model using the helper simulator: + #We can pass in a COPAlayout for this instead of a list of circuits, which speeds things up. + probs_for_point = self.helper_sim.bulk_probs(ckt_layout) + #probs_for_point = helper_sim.bulk_probs(circuits) + + #print(probs_for_point) + + #Iterate through and add weight terms. + outcome_labels= probs_for_point[circuits[0]].keys() + weighted_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for ckt in circuits: + for lbl in outcome_labels: + weighted_probs[ckt][lbl] = probs_for_point[ckt][lbl] * weight_value + + weighted_probs_for_point.append(weighted_probs) + + #reset the model to it's original value + self.model.from_vector(orig_vec) + + #print(len(weighted_probs_for_point)) + + #Aggregate all of the probability values into a final_result + averaged_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for prob_dict in weighted_probs_for_point: + for ckt in circuits: + for lbl in outcome_labels: + averaged_probs[ckt][lbl] += prob_dict[ckt][lbl] + + #return the averaged probabilities: + if return_layout: + return averaged_probs, ckt_layout + else: + return averaged_probs + + def bulk_dprobs(self, circuits, resource_alloc=None, smartc=None): + """ + Construct a dictionary containing the probability derivatives for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + dprobs : dictionary + A dictionary such that `dprobs[circuit]` is an ordered dictionary of + derivative arrays (one element per differentiated parameter) whose + keys are outcome labels + """ + + #If _compute_circuit_outcome_probability_derivatives is implemented, use it! + #resource_alloc = layout.resource_alloc() + + eps = 1e-7 # hardcoded? +# if param_slice is None: +# param_slice = slice(0, self.model.num_params) +# param_indices = _slct.to_array(param_slice) + +# if dest_param_slice is None: +# dest_param_slice = slice(0, len(param_indices)) +# dest_param_indices = _slct.to_array(dest_param_slice) + +# iParamToFinal = {i: dest_param_indices[ii] for ii, i in enumerate(param_indices)} + + probs, ckt_layout = self.bulk_probs(circuits, return_layout= True) + orig_vec = self.model.to_vector().copy() + + #pull out the requisite outcome labels: + outcome_labels= probs[circuits[0]].keys() + + #initialize a dprobs array: + dprobs= {ckt: {lbl: _np.empty(self.model.num_params, dtype= _np.double) for lbl in outcome_labels} for ckt in circuits} + + for i in range(self.model.num_params): + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + probs2 = self.bulk_probs(circuits, cached_layout= ckt_layout) + + #need to parse this and construct the corresponding entries of the dprobs dict. + + for ckt in circuits: + for lbl in outcome_labels: + dprobs[ckt][lbl][i] = (probs2[ckt][lbl] - probs[ckt][lbl]) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec) + + return dprobs + + def add_helper_sim(self): + if self.model is not None: + self.helper_sim = _MatrixForwardSimulator(model=self.model) + + def create_layout(self, bulk_circuit_list, dataset, resource_alloc, + array_types, verbosity=1): + + if self.helper_sim is None: + self.add_helper_sim() + + return self.helper_sim.create_layout(bulk_circuit_list, dataset, resource_alloc, + array_types, verbosity=verbosity) + + #Add a bulk_fill_probs method that does something similar to bulk_probs but returns + #an array filled according to a layout instead of an outcome dictionary + def bulk_fill_probs(self, array_to_fill, layout): + + sample_points_lists , weights = self.build_sampling_grid() + + #The grid of points is the cartesian product of the sample point lists. + + sample_point_grid = list(_itertools.product(*sample_points_lists)) + + #do this for convienience + weight_grid = list(_itertools.product(*([weights]*len(sample_points_lists)))) + + #I need to identify where in the model vector the sampled hamiltonian weights + #need to go. + hamiltonian_model_indices = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + + orig_vec = self.model.to_vector() + + #If I have a layout then I should have a helper sim by this point + #if self.helper_sim is None: + # self.add_helper_sim() + + #create copies of the array being filled + temp_arrays = [array_to_fill.copy() for _ in sample_point_grid] + + for i, (sample_grid_point, weight_grid_point) in enumerate(zip(sample_point_grid, weight_grid)): + + vec = orig_vec.copy() + vec[hamiltonian_model_indices] = _np.array(sample_grid_point) + + #despite storing it as a grid, we just need the scalar product of the weights + weight_value = _np.prod(weight_grid_point) + + #set the model to this current vec value + self.model.from_vector(vec) + + #next simulate the model using the helper simulator: + self.helper_sim.bulk_fill_probs(temp_arrays[i], layout) + + #Iterate through and add weight terms. + temp_arrays[i] = weight_value*temp_arrays[i] + + #reset the model to it's original value + self.model.from_vector(orig_vec) + + #Aggregate all of the probability values into a final_result + averaged_array = temp_arrays[0] + for temp_array in temp_arrays[1:]: + averaged_array += temp_array + + #print('averaged: ', averaged_array) + + array_to_fill[:]= averaged_array + #return averaged_array + + + #Next I need a version of bulk_fill_dprobs: + + def bulk_fill_dprobs(self, array_to_fill, layout, pr_array_to_fill=None): + + eps = 1e-7 # hardcoded? + + if pr_array_to_fill is not None: + self.bulk_fill_probs(pr_array_to_fill, layout) + probs = pr_array_to_fill.copy() + + else: + probs = layout.allocate_local_array('e', 'd') + self.bulk_fill_probs(probs, layout) + + orig_vec = self.model.to_vector().copy() + + for i in range(self.model.num_params): + probs2 = probs.copy() + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + self.bulk_fill_probs(probs2,layout) + + #now put this result into the array to be filled. + array_to_fill[: , i] =(probs2 - probs) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec) + + #print('dprobs: ', array_to_fill) + #return dprobs + +class LFHSigmaForwardSimulator(_ForwardSimulator): + """ + Weak forward simulator specialized for dealing with low-frequency hamiltonian models. + This version uses sigma point methods (unscented transform) to approximate the requisite + integrals. + """ + + def __init__(self, model=None): + """ + Construct a new WeakForwardSimulator object. + + Parameters + ---------- + order: int + order of the gauss-hermite approximation for the integral. + model : Model + Optional parent Model to be stored with the Simulator + """ + self.helper_sim = None + super().__init__(model) + + def sigma_points(self): + #build the grid of sample points and weights + #for the simulators model. + + #Need to identify how many deviation parameters there are. + num_deviances= 0 + dev_values= [] + mean_values = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + dev_values.extend(subop.errorgen.devs) + mean_values.extend(subop.errorgen.means) + num_deviances += len(subop.errorgen.devs) + + #Now construct the set of points and weights: + mean_vec = _np.array(mean_values).reshape((num_deviances,1)) + std_vec = _np.array(dev_values) + + #Currently only have _LFHLindbladErrorgen objects that are configured for + #diagonal covariances, so we can simplify the sigma point construction logic + #a bit. Use a heuristic from Julier and Uhlmann. + #The first sigma point is just the mean. + #columns of this matrix will become sigma vectors. + sigma_vec_array = _np.repeat(mean_vec, repeats=2*num_deviances+1, axis=1) + + #calculate a special scaling factor used in the Unscented transform. + #This scale factor is n + kappa in Julier and Uhlmann, and they claim + #a value of n+kappa =3 is a good heuristic for gaussian distributions. + scale_factor = 3 + #columns of offsets correspond to the offset vectors + offsets = _np.diag(_np.sqrt(scale_factor)*std_vec) + #Note: the application of these shifts can be done much more efficiently + #by appropriately using slicing and broadcasting, but this is easy for now. + shifts = _np.concatenate([_np.zeros_like(mean_vec), offsets, -offsets], axis=1) + #Add these offsets to columns 1 to L and subtract from + #columns L+1 to 2L+1 + sigma_vec_array += shifts + + #next we need the weights + kappa = scale_factor - num_deviances + weights = _np.array([kappa/scale_factor, 1/(2*scale_factor)]) + + return sigma_vec_array, weights + + + + def bulk_probs(self, circuits, clip_to=None, resource_alloc=None, smartc=None, return_layout= False, cached_layout= None): + """ + Construct a dictionary containing the probabilities for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + clip_to : 2-tuple, optional + (min,max) to clip return value if not None. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + probs : dictionary + A dictionary such that `probs[circuit]` is an ordered dictionary of + outcome probabilities whose keys are outcome labels. + """ + + sigma_points , weights = self.sigma_points() + + #I need to identify where in the model vector the sampled hamiltonian weights + #need to go. I should probably make this something that gets cached, as it usually + #won't need recomputation. + hamiltonian_model_indices = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + + orig_vec = self.model.to_vector() + + if self.helper_sim is None: + self.add_helper_sim() + + #create a circuit layout that we can reuse to speed things up + #(We'll be using the same circuit list at every evaluation point) + if cached_layout is None: + ckt_layout = self.helper_sim.create_layout(circuits) + else: + ckt_layout = cached_layout + + weighted_probs_for_point = [] + weight_iter = _itertools.chain([0] ,_itertools.repeat(1, sigma_points.shape[1]-1)) + for i, j in zip(range(sigma_points.shape[1]), weight_iter): + vec = orig_vec.copy() + vec[hamiltonian_model_indices] = sigma_points[:,i] + + #set the model to this current vec value + self.model.from_vector(vec) + + #next simulate the model using the helper simulator: + #We can pass in a COPAlayout for this instead of a list of circuits, which speeds things up. + probs_for_point = self.helper_sim.bulk_probs(ckt_layout) + #probs_for_point = helper_sim.bulk_probs(circuits) + + #print(probs_for_point) + + #Iterate through and add weight terms. + outcome_labels= probs_for_point[circuits[0]].keys() + weighted_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for ckt in circuits: + for lbl in outcome_labels: + weighted_probs[ckt][lbl] = probs_for_point[ckt][lbl] * weights[j] + + weighted_probs_for_point.append(weighted_probs) + + #reset the model to it's original value + self.model.from_vector(orig_vec) + + #print(len(weighted_probs_for_point)) + + #Aggregate all of the probability values into a final_result + averaged_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for prob_dict in weighted_probs_for_point: + for ckt in circuits: + for lbl in outcome_labels: + averaged_probs[ckt][lbl] += prob_dict[ckt][lbl] + + #return the averaged probabilities: + if return_layout: + return averaged_probs, ckt_layout + else: + return averaged_probs + + def bulk_dprobs(self, circuits, resource_alloc=None, smartc=None): + """ + Construct a dictionary containing the probability derivatives for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + dprobs : dictionary + A dictionary such that `dprobs[circuit]` is an ordered dictionary of + derivative arrays (one element per differentiated parameter) whose + keys are outcome labels + """ + + #If _compute_circuit_outcome_probability_derivatives is implemented, use it! + #resource_alloc = layout.resource_alloc() + + eps = 1e-7 # hardcoded? +# if param_slice is None: +# param_slice = slice(0, self.model.num_params) +# param_indices = _slct.to_array(param_slice) + +# if dest_param_slice is None: +# dest_param_slice = slice(0, len(param_indices)) +# dest_param_indices = _slct.to_array(dest_param_slice) + +# iParamToFinal = {i: dest_param_indices[ii] for ii, i in enumerate(param_indices)} + + probs, ckt_layout = self.bulk_probs(circuits, return_layout= True) + orig_vec = self.model.to_vector().copy() + + #pull out the requisite outcome labels: + outcome_labels= probs[circuits[0]].keys() + + #initialize a dprobs array: + dprobs= {ckt: {lbl: _np.empty(self.model.num_params, dtype= _np.double) for lbl in outcome_labels} for ckt in circuits} + + for i in range(self.model.num_params): + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + probs2 = self.bulk_probs(circuits, cached_layout= ckt_layout) + + #need to parse this and construct the corresponding entries of the dprobs dict. + + for ckt in circuits: + for lbl in outcome_labels: + dprobs[ckt][lbl][i] = (probs2[ckt][lbl] - probs[ckt][lbl]) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec) + + return dprobs + + def add_helper_sim(self): + if self.model is not None: + self.helper_sim = _MatrixForwardSimulator(model=self.model) + + def create_layout(self, bulk_circuit_list, dataset, resource_alloc, + array_types, verbosity=1): + + if self.helper_sim is None: + self.add_helper_sim() + + return self.helper_sim.create_layout(bulk_circuit_list, dataset, resource_alloc, + array_types, verbosity=verbosity) + + #Add a bulk_fill_probs method that does something similar to bulk_probs but returns + #an array filled according to a layout instead of an outcome dictionary + def bulk_fill_probs(self, array_to_fill, layout): + + sigma_points , weights = self.sigma_points() + + #I need to identify where in the model vector the sampled hamiltonian weights + #need to go. + hamiltonian_model_indices = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + + orig_vec = self.model.to_vector() + + #If I have a layout then I should have a helper sim by this point + #if self.helper_sim is None: + # self.add_helper_sim() + + #create copies of the array being filled + temp_arrays = [array_to_fill.copy() for _ in range(sigma_points.shape[1])] + + weight_iter = _itertools.chain([0] ,_itertools.repeat(1, sigma_points.shape[1]-1)) + + for i, j in zip(range(sigma_points.shape[1]), weight_iter): + + vec = orig_vec.copy() + vec[hamiltonian_model_indices] = sigma_points[:,i] + + #set the model to this current vec value + self.model.from_vector(vec) + + #next simulate the model using the helper simulator: + self.helper_sim.bulk_fill_probs(temp_arrays[i], layout) + + #Iterate through and add weight terms. + temp_arrays[i] = weights[j]*temp_arrays[i] + + #reset the model to it's original value + self.model.from_vector(orig_vec) + + #Aggregate all of the probability values into a final_result + averaged_array = temp_arrays[0] + for temp_array in temp_arrays[1:]: + averaged_array += temp_array + + #print('averaged: ', averaged_array) + + array_to_fill[:]= averaged_array + #return averaged_array + + + #Next I need a version of bulk_fill_dprobs: + + def bulk_fill_dprobs(self, array_to_fill, layout, pr_array_to_fill=None): + + eps = 1e-7 # hardcoded? + + if pr_array_to_fill is not None: + self.bulk_fill_probs(pr_array_to_fill, layout) + probs = pr_array_to_fill.copy() + + else: + probs = layout.allocate_local_array('e', 'd') + self.bulk_fill_probs(probs, layout) + + orig_vec = self.model.to_vector().copy() + + for i in range(self.model.num_params): + probs2 = probs.copy() + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + self.bulk_fill_probs(probs2,layout) + + #now put this result into the array to be filled. + array_to_fill[: , i] =(probs2 - probs) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec) + + #print('dprobs: ', array_to_fill) + #return dprobs \ No newline at end of file diff --git a/pygsti/extras/lfh/lfhmodel.py b/pygsti/extras/lfh/lfhmodel.py new file mode 100644 index 000000000..d4b38c8fd --- /dev/null +++ b/pygsti/extras/lfh/lfhmodel.py @@ -0,0 +1,78 @@ +""" +Defines the LFHExplicitOpModel class, an extension of ExplicitOpModel with +support for fluctuating Hamiltonian parameters. +""" +#*************************************************************************************************** +# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + + +import numpy as np +import collections as _collections +import itertools as _itertools +from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen +from pygsti.forwardsims import WeakForwardSimulator as _WeakForwardsimulator +from pygsti.forwardsims import MapForwardSimulator as _MapForwardSimulator +from pygsti.forwardsims import SimpleMapForwardSimulator as _SimpleMapForwardSimulator +from pygsti.forwardsims import MatrixForwardSimulator as _MatrixForwardSimulator +from pygsti.evotypes import Evotype as _Evotype +from pygsti.extras.lfh.lfherrorgen import LFHLindbladErrorgen as _LFHLindbladErrorgen + +from pygsti.forwardsims import ForwardSimulator as _ForwardSimulator +from pygsti.models import ExplicitOpModel as _ExplicitOpModel +from pygsti.modelmembers.operations import ExpErrorgenOp as _ExpErrorgenOp +from pygsti.modelmembers.operations import ComposedOp as _ComposedOp +from pygsti.baseobjs import statespace as _statespace +from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel +from pygsti.modelmembers.operations import LindbladParameterization +from pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock as _LindbladCoefficientBlock + +from scipy.special import roots_hermite +from math import sqrt, pi + +#I think the last thing I need is a model which can propagate through the resampling to any +#underlying LFHLindbladErrorgen objects +class LFHExplicitOpModel(_ExplicitOpModel): + + #Use the same init as explicit op model: + def __init__(self, state_space, basis="pp", default_gate_type="full", + default_prep_type="auto", default_povm_type="auto", + default_instrument_type="auto", prep_prefix="rho", effect_prefix="E", + gate_prefix="G", povm_prefix="M", instrument_prefix="I", + simulator="auto", evotype="default"): + + super().__init__(state_space, basis, default_gate_type, + default_prep_type, default_povm_type, + default_instrument_type, prep_prefix, effect_prefix, + gate_prefix, povm_prefix, instrument_prefix, + simulator, evotype) + + #Add a method that resamples the hamiltonian rates when requested. + def sample_hamiltonian_rates(self): + #loop through the elements of the operations dictionary + for member in self.operations.values(): + if isinstance(member, _ComposedOp): + #next check is any of the factor ops are exponentiated error generators + for factor in member.factorops: + if isinstance(factor, _ExpErrorgenOp): + #check to see if the error generator is a LFHLindbladErrorgen + if isinstance(factor.errorgen, _LFHLindbladErrorgen): + #then propagate the resampling through. + factor.errorgen.sample_hamiltonian_rates() + #update the representation of the exponentiated error generator + factor._update_rep() + + #Once I have updated the reps of the factors I need to reinitalize the rep of + #the composed op. + #print([op._rep for op in member.factorops]) + member._update_denserep() + #.reinit_factor_op_reps([op._rep for op in member.factorops]) + + #need a version of the circuit_layer_operator method which doesn't call clean_paramvec + #since I think this is what is causing the value of the \ No newline at end of file From b4a1b6633940533f07c827bb1ddee8c3d322776c Mon Sep 17 00:00:00 2001 From: ashenmill <156946147+ashenmill@users.noreply.github.com> Date: Fri, 7 Jun 2024 14:23:01 -0600 Subject: [PATCH 007/102] Started refactor to use dictionaries --- .../Propagatable error gens tutorial.ipynb | 109 ++++++++-- .../errordict_deprecated.py | 10 + .../errorgenpropagation/errorpropagator.py | 4 +- .../errorpropagator_dev.py | 172 +++++++++++++++ .../errorgenpropagation/localstimerrorgen.py | 96 +++++++++ .../propagatableerrorgen.py | 15 ++ .../utilserrorgenpropagation.py | 195 ++++++++++++++++++ .../utilspygstistimtranslator.py | 2 + pygsti/tools/internalgates.py | 2 + 9 files changed, 591 insertions(+), 14 deletions(-) create mode 100644 pygsti/extras/errorgenpropagation/errordict_deprecated.py create mode 100644 pygsti/extras/errorgenpropagation/errorpropagator_dev.py create mode 100644 pygsti/extras/errorgenpropagation/localstimerrorgen.py create mode 100644 pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py diff --git a/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb b/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb index bd8342532..47f95861c 100644 --- a/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb +++ b/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb @@ -2,10 +2,21 @@ "cells": [ { "cell_type": "code", - "execution_count": 7, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", "from pygsti.extras.errorgenpropagation.propagatableerrorgen import *\n", "from pygsti.extras.errorgenpropagation.errorpropagator import *\n", "from pygsti.circuits import Circuit\n", @@ -49,7 +60,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -70,9 +81,26 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", + "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n" + ] + } + ], "source": [ "errors=ErrorPropagator(c,errorModel,BCHOrder=1,BCHLayerwise=False,NonMarkovian=False)" ] @@ -95,7 +123,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -104,7 +132,7 @@ "[('H', ('X',), (0.09999999999999999+0j))]" ] }, - "execution_count": 10, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -131,7 +159,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -154,15 +182,72 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If you want to use the non markovianity function you need to define an n x n correlation where n is the number of layers." + "If you want to use the non markovianity function you need to define an n x n correlation where n is the number of layers. Currently, we are capable of describing each layer to be governed by some stochastic process, that is correlated to the other layers. To using the code is relatively simple, see the below example" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0.5, 1.0, 'White noise dephasing')" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjwAAAHHCAYAAAC7soLdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABWQUlEQVR4nO3dd3gU5d7G8e8mIQklCT20QCjSewuEKgQjIAcQERAhYEUBKTZQEUWKNOUoCIKKqCDtUFSKxtCLgPQOUgSBEBBDgECAZN4/5mVhTYAsKZNs7s917eXuzOzObwdlb595is0wDAMRERERF+ZmdQEiIiIiaU2BR0RERFyeAo+IiIi4PAUeERERcXkKPCIiIuLyFHhERETE5SnwiIiIiMtT4BERERGXp8AjIiIiLk+BRySDs9ls9OnT577Hff3119hsNo4fP572RaWy48ePY7PZ+Prrr60uJV2u463vO27cuDQ7R1J69OhBYGBgup5TJKNQ4BFJI3PnzsVms7Fw4cJE+6pVq4bNZmPlypWJ9hUvXpzg4OBUqeGzzz7LECFCRMRqCjwiaaRhw4YArFu3zmF7TEwMe/bswcPDg/Xr1zvsO3nyJCdPnrS/1xndunXj6tWrlChRwr4tswSeEiVKcPXqVbp162Z1KS5t2rRpHDx40OoyRCzhYXUBIq6qSJEilCxZMlHg2bhxI4Zh0LFjx0T7br1+kMDj7u6Ou7v7gxdsIZvNhre3t9VluLxs2bJZXYKIZdTCI5KGGjZsyPbt27l69ap92/r166lUqRItW7bkt99+IyEhwWGfzWajQYMGiT5r0aJFVK5cGS8vLypVqsTy5csd9v+770lgYCB79+5l9erV2Gw2bDYbTZs2tR8fHR1N//79CQgIwMvLizJlyjB69GiHeu4mMDCQxx57jHXr1lG3bl28vb0pVaoU33zzTaJjjx49SseOHcmbNy85cuSgXr16LFmyxOGYpPrwREZG0rNnT4oVK4aXlxeFCxembdu2ifrWLFu2jEaNGpEzZ058fHxo3bo1e/fuve93ANi7dy/NmjUje/bsFCtWjOHDh9/1+yfnPD169CBXrlwcPXqU0NBQcubMSZEiRRg2bBiGYST5uVOnTqV06dJ4eXlRp04dtmzZ4rB/165d9OjRg1KlSuHt7U2hQoV45pln+Pvvvx2Ou3TpEv379ycwMBAvLy8KFixIixYt2LZtm0N9d/bhubMv0f3qAJg3bx4VK1bE29ubypUrs3DhQvULkkxDLTwiaahhw4Z8++23bNq0yR421q9fT3BwMMHBwVy8eJE9e/ZQtWpV+77y5cuTL18+h89Zt24dCxYs4OWXX8bHx4dPPvmEDh06cOLEiUTH3jJhwgT69u1Lrly5ePvttwHw9/cHIDY2liZNmnDq1ClefPFFihcvzoYNGxg8eDBnzpxhwoQJ9/1uf/zxB0888QTPPvssYWFhfPXVV/To0YNatWpRqVIlAM6ePUtwcDCxsbG88sor5MuXjxkzZvCf//yH+fPn0759+7t+focOHdi7dy99+/YlMDCQqKgowsPDOXHihP0H9ttvvyUsLIzQ0FBGjx5NbGwskydPtgfNe/0QR0ZG8vDDD3Pz5k0GDRpEzpw5mTp1KtmzZ090rDPniY+P59FHH6VevXqMGTOG5cuXM3ToUG7evMmwYcMcPnfWrFlcunSJF198EZvNxpgxY3j88cc5evSovTUmPDyco0eP0rNnTwoVKsTevXuZOnUqe/fu5bfffsNmswHQq1cv5s+fT58+fahYsSJ///0369atY//+/dSsWfOef5bJqWPJkiV06tSJKlWqMGrUKP755x+effZZihYtes/PFskwDBFJM3v37jUA44MPPjAMwzBu3Lhh5MyZ05gxY4ZhGIbh7+9vTJo0yTAMw4iJiTHc3d2N559/3uEzAMPT09P4448/7Nt27txpAMann35q3zZ9+nQDMI4dO2bfVqlSJaNJkyaJ6vrggw+MnDlzGocOHXLYPmjQIMPd3d04ceLEPb9XiRIlDMBYs2aNfVtUVJTh5eVlvPrqq/Zt/fv3NwBj7dq19m2XLl0ySpYsaQQGBhrx8fGGYRjGsWPHDMCYPn26YRiG8c8//xiAMXbs2LvWcOnSJSN37tyJrldkZKTh5+eXaPu/3apt06ZNDt/Bz8/P4To6c56wsDADMPr27WvflpCQYLRu3drw9PQ0zp075/B98+XLZ1y4cMF+7OLFiw3A+PHHH+3bYmNjE9X+/fffJ7r+fn5+Ru/eve/5ncPCwowSJUrYXztTR5UqVYxixYoZly5dsm9btWqVATh8pkhGpVtaImmoQoUK5MuXz943Z+fOnVy5csU+Cis4ONjecXnjxo3Ex8cn2X8nJCSE0qVL219XrVoVX19fjh49+kB1zZs3j0aNGpEnTx7Onz9vf4SEhBAfH8+aNWvu+xkVK1akUaNG9tcFChSgXLlyDjUtXbqUunXrOnynXLly8cILL3D8+HH27duX5Gdnz54dT09PVq1axT///JPkMeHh4URHR9OlSxeH7+Du7k5QUFCSI+DutHTpUurVq0fdunUdvkPXrl1TfJ47pxG4Na3A9evX+fXXXx2O69SpE3ny5LG/vnU977yGd7Y4Xbt2jfPnz1OvXj0Ah9tVuXPnZtOmTZw+ffqe3zsp96vj9OnT7N69m+7du5MrVy77cU2aNKFKlSpOn0/ECrqlJZKGbDYbwcHBrFmzhoSEBNavX0/BggUpU6YMYAaeiRMnAtiDT1KBp3jx4om25cmT565h4H4OHz7Mrl27KFCgQJL7o6Ki7vsZyanpzz//JCgoKNFxFSpUsO+vXLlyov1eXl6MHj2aV199FX9/f+rVq8djjz1G9+7dKVSokP07ADRr1izJ+nx9fe9Z/91qK1eunMNrZ8/j5uZGqVKlHLaVLVsWIFH/o39fw1uh485reOHCBd5//31mz56d6M/l4sWL9udjxowhLCyMgIAAatWqRatWrejevXuiWpJyvzr+/PNPAPu/t3cqU6aMQ/ASyagUeETSWMOGDfnxxx/ZvXu3vf/OLcHBwbz++uucOnWKdevWUaRIkSR/oO42+sq4S0fY+0lISKBFixa88cYbSe6/9QN9L6ld07/179+fNm3asGjRIn7++WeGDBnCqFGjWLFiBTVq1LB3Lv7222/tIehOHh6p89dbWp4nOdfwySefZMOGDbz++utUr16dXLlykZCQwKOPPurQwfrJJ5+kUaNGLFy4kF9++YWxY8cyevRoFixYQMuWLVNch0hmp8AjksbunI9n/fr19O/f376vVq1aeHl5sWrVKjZt2kSrVq1S9dy3OrT+W+nSpbl8+TIhISGper5/K1GiRJLzvhw4cMC+/15Kly7Nq6++yquvvsrhw4epXr0648eP57vvvrPf4itYsOADfY8SJUrYW2/u9O96nT1PQkICR48edQiNhw4dAnB6NNM///xDREQE77//Pu+++659e1J1AxQuXJiXX36Zl19+maioKGrWrMmIESPuG3ju59af0x9//JFoX1LbRDIi9eERSWO1a9fG29ubmTNncurUKYcWHi8vL2rWrMmkSZO4cuXKA82/cy85c+YkOjo60fYnn3ySjRs38vPPPyfaFx0dzc2bN1Pl/K1atWLz5s1s3LjRvu3KlStMnTqVwMBAKlasmOT7YmNjuXbtmsO20qVL4+PjQ1xcHAChoaH4+voycuRIbty4kegzzp07d9/afvvtNzZv3uzwnpkzZzoc9yDnuXWbEsxWkokTJ5ItWzaaN29+z5r+7VbLy79bWv49ii4+Pt7h9haYAa1IkSL265USRYoUoXLlynzzzTdcvnzZvn316tXs3r07xZ8vkh7UwiOSxjw9PalTpw5r167Fy8uLWrVqOewPDg5m/PjxwINNOHgvtWrVYvLkyQwfPpwyZcpQsGBBmjVrxuuvv84PP/zAY489Zh9KfuXKFXbv3s38+fM5fvw4+fPnT/H5Bw0axPfff0/Lli155ZVXyJs3LzNmzODYsWP873//w80t6f/nOnToEM2bN+fJJ5+kYsWKeHh4sHDhQs6ePUvnzp0Bs+/M5MmT6datGzVr1qRz584UKFCAEydOsGTJEho0aOAQPP7tjTfe4Ntvv+XRRx+lX79+9mHpJUqUYNeuXfbjnD2Pt7c3y5cvJywsjKCgIJYtW8aSJUt466237tpn6m58fX1p3LgxY8aM4caNGxQtWpRffvmFY8eOORx36dIlihUrxhNPPEG1atXIlSsXv/76K1u2bLH/u5VSI0eOpG3btjRo0ICePXvyzz//MHHiRCpXruwQgkQyLCuHiIlkFYMHDzYAIzg4ONG+BQsWGIDh4+Nj3Lx5M9F+IMnhxiVKlDDCwsLsr5Malh4ZGWm0bt3a8PHxMQCHIeqXLl0yBg8ebJQpU8bw9PQ08ufPbwQHBxvjxo0zrl+/fs/vU6JECaN169aJtjdp0iTRMPgjR44YTzzxhJE7d27D29vbqFu3rvHTTz85HPPvYennz583evfubZQvX97ImTOn4efnZwQFBRlz585NdM6VK1caoaGhhp+fn+Ht7W2ULl3a6NGjh/H777/f8zsYhmHs2rXLaNKkieHt7W0ULVrU+OCDD4wvv/wy0XVM7nnCwsKMnDlzGkeOHDEeeeQRI0eOHIa/v78xdOhQ+xD8O79vUsPuAWPo0KH213/99ZfRvn17I3fu3Iafn5/RsWNH4/Tp0w7HxcXFGa+//rpRrVo1w8fHx8iZM6dRrVo147PPPnP47LsNS09OHYZhGLNnzzbKly9veHl5GZUrVzZ++OEHo0OHDkb58uXvc6VFrGczDPVKExFJDT169GD+/PlZqsWjevXqFChQgPDwcKtLEbkn9eEREZH7unHjRqK+XatWrWLnzp0OS5aIZFTqwyMiIvd16tQpQkJCePrppylSpAgHDhxgypQpFCpUiF69elldnsh9KfCIiMh95cmTh1q1avHFF19w7tw5cubMSevWrfnwww/vup6bSEaiPjwiIiLi8tSHR0RERFyeAo+IiIi4vCzXhychIYHTp0/j4+Nz12n3RUREJGMxDINLly5RpEiRu05aei9ZLvCcPn2agIAAq8sQERGRB3Dy5EmKFSvm9PuyXODx8fEBzAvm6+trcTUiIiKSHDExMQQEBNh/x52V5QLPrdtYvr6+CjwiIiKZzIN2R1GnZREREXF5CjwiIiLi8hR4RERExOUp8IiIiIjLU+ARERERl6fAIyIiIi5PgUdERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8hR4RERExOUp8IiIiIjLU+ARERERl6fAIyIiIi5PgUdERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8hR4RERExOUp8IiIiIjLU+ARERERl6fAIyIiIi5PgUdERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8iwNPGvWrKFNmzYUKVIEm83GokWL7vueVatWUbNmTby8vChTpgxff/11mtcpIiIimZulgefKlStUq1aNSZMmJev4Y8eO0bp1ax5++GF27NhB//79ee655/j555/TuFIRERHJzDysPHnLli1p2bJlso+fMmUKJUuWZPz48QBUqFCBdevW8fHHHxMaGppWZYqIiEgml6n68GzcuJGQkBCHbaGhoWzcuNGiikRERCQzsLSFx1mRkZH4+/s7bPP39ycmJoarV6+SPXv2RO+Ji4sjLi7O/jomJibN6xQREZGMJVO18DyIUaNG4efnZ38EBARYXZKIiIiks0wVeAoVKsTZs2cdtp09exZfX98kW3cABg8ezMWLF+2PkydPpkepIiIikoFkqlta9evXZ+nSpQ7bwsPDqV+//l3f4+XlhZeXV1qXJiIiIhmYpS08ly9fZseOHezYsQMwh53v2LGDEydOAGbrTPfu3e3H9+rVi6NHj/LGG29w4MABPvvsM+bOncuAAQOsKF9EREQyCUsDz++//06NGjWoUaMGAAMHDqRGjRq8++67AJw5c8YefgBKlizJkiVLCA8Pp1q1aowfP54vvvhCQ9JFRETknmyGYRhWF5GeYmJi8PPz4+LFi/j6+lpdjoiIiCRDSn+/M1WnZREREZEHocAjIiIiLk+BR0RERFyeAo+IiIi4PAUeERERcXkKPKno99/h2DGrqxAREZF/U+BJJYsWQYMG8MQTcO2a1dWIiIjInRR4UkmtWuDrC9u2wSuvWF2NiIiI3EmBJ5UEBMCsWWCzwbRpMH261RWJiIjILQo8qahFCxg2zHz+8svw/0uEiYiIiMUUeFLZW29Bq1ZmP54OHSA62uqKRERERIEnlbm5wbffQmAgHD0KYWGQkGB1VSIiIlmbAk8ayJsX5s8HT0/44QcYO9bqikRERLI2BZ40UqsWTJxoPn/rLVi50tp6REREsjIFnjT03HPQo4d5S6tzZzh1yuqKREREsiYFnjRks8GkSVCtGkRFQadOcOOG1VWJiIhkPQo8aSxHDrM/j68vrF8Pb75pdUUiIiJZjwJPOihTBr75xnz+8ccwb5619YiIiGQ1CjzppG3b2607zzwDBw5YW4+IiEhWosCTjoYPh6ZN4fJlc1LCy5etrkhERCRrUOBJRx4eMHs2FC4M+/bBCy+AYVhdlYiIiOtT4Eln/v4wdy64u8P338Nnn1ldkYiIiOtT4LFAw4a3Z18eMAB++83aekRERFydAo9F+veHJ54w5+Xp2BHOnbO6IhEREdelwGMRmw2+/BLKloW//oKuXSE+3uqqREREXJMCj4V8feF//zMnJwwPh/fft7oiERER16TAY7HKlWHqVPP5Bx/AkiXW1iMiIuKKFHgygK5doXdv83m3bnD8uKXliIiIuBwFngxi/HgICoJ//jE7M1+7ZnVFIiIirkOBJ4Pw8jLn58mXD7ZuhX79rK5IRETEdSjwZCDFi8OsWeYIrqlT4euvra5IRETENSjwZDCPPHJ7tNZLL8HOndbWIyIi4goUeDKgt9+Gli3NfjwdOkB0tNUViYiIZG4KPBmQmxt89x2UKAFHjkCPHlpkVEREJCUUeDKovHlh/nzw9ITFi2+vvSUiIiLOU+DJwGrXhk8/NZ8PHgyrVllajoiISKalwJPBPf88hIVBQgJ07gynT1tdkYiISOajwJPB2Wzw2WdQtSqcPQvt2kFsrNVViYiIZC4KPJlAjhywYIHZr2fLFuje3WzxERERkeRR4MkkSpeGRYvMTsz/+x+89ZbVFYmIiGQeCjyZSKNG8OWX5vPRo28/FxERkXtT4Mlknn4a3n3XfN6rF0REWFuPiIhIZqDAkwm99x489RTcvGnOxLx/v9UViYiIZGwKPJmQzWbezmrQAC5ehNat4dw5q6sSERHJuBR4Milvb1i4EEqVgmPHzOHq165ZXZWIiEjGpMCTiRUoAEuWQO7csGED9OypNbdERESSosCTyZUvbw5T9/CA2bNh6FCrKxIREcl4FHhcQLNmMHWq+fyDD+Cbb6ytR0REJKNR4HERPXvCoEHm8+eegzVrrK1HREQkI1HgcSEjRkDHjnDjBrRvD4cPW12RiIhIxqDA40Lc3GDGDAgKggsXzOHqf/9tdVUiIiLWU+BxMdmzw+LFUKKE2cLz+OMQF2d1VSIiItZS4HFB/v7mcHVfX7MvzwsvaLi6iIhkbQo8LqpSJZg3D9zdzVFbI0ZYXZGIiIh1FHhc2COPwKRJ5vMhQ8x5ekRERLIiBR4X9+KLMHCg+bxHD3NGZhERkaxGgScLGDMG2rY1Oy+3awdHj1pdkYiISPqyPPBMmjSJwMBAvL29CQoKYvPmzfc8fsKECZQrV47s2bMTEBDAgAEDuKZVM+/J3R1mzoSaNc1V1Vu3huhoq6sSERFJP5YGnjlz5jBw4ECGDh3Ktm3bqFatGqGhoURFRSV5/KxZsxg0aBBDhw5l//79fPnll8yZM4e33nornSvPfHLmhB9/hGLF4MABeOIJc4JCERGRrMDSwPPRRx/x/PPP07NnTypWrMiUKVPIkSMHX331VZLHb9iwgQYNGvDUU08RGBjII488QpcuXe7bKiSmIkXgp58gVy6IiICXXtJwdRERyRosCzzXr19n69athISE3C7GzY2QkBA2btyY5HuCg4PZunWrPeAcPXqUpUuX0qpVq7ueJy4ujpiYGIdHVlatmjlay80NvvwSxo61uiIREZG0Z1ngOX/+PPHx8fj7+zts9/f3JzIyMsn3PPXUUwwbNoyGDRuSLVs2SpcuTdOmTe95S2vUqFH4+fnZHwEBAan6PTKj1q1hwgTz+ZtvwoIFlpYjIiKS5izvtOyMVatWMXLkSD777DO2bdvGggULWLJkCR988MFd3zN48GAuXrxof5w8eTIdK864+vaFPn3M508/DVu2WFuPiIhIWvKw6sT58+fH3d2ds2fPOmw/e/YshQoVSvI9Q4YMoVu3bjz33HMAVKlShStXrvDCCy/w9ttv4+aWOL95eXnh5eWV+l/ABXz8sTlEfelSaNMGNm+G4sWtrkpERCT1WdbC4+npSa1atYiIiLBvS0hIICIigvr16yf5ntjY2EShxt3dHQBDvW+d5uFh9uepWhXOnjVvdV28aHVVIiIiqc/SW1oDBw5k2rRpzJgxg/379/PSSy9x5coVevbsCUD37t0ZPHiw/fg2bdowefJkZs+ezbFjxwgPD2fIkCG0adPGHnzEOT4+5sitwoVhzx547DGIjbW6KhERkdRl2S0tgE6dOnHu3DneffddIiMjqV69OsuXL7d3ZD5x4oRDi84777yDzWbjnXfe4dSpUxQoUIA2bdowQitjpkhAgHlbq2lTWLcO2reHH34A3QkUERFXYTOy2L2gmJgY/Pz8uHjxIr6+vlaXk6Fs2AAtWpgtPI8/DnPmmLe9RERErJbS3+9MNUpL0lZwMCxeDJ6e5lD1556DhASrqxIREUk5BR5xEBICc+ea62/NmAH9+mk2ZhERyfwUeCSRtm3h66/BZoOJE2HIEKsrEhERSRkFHknS00/DZ5+Zz0eMgDFjrK1HREQkJRR45K569YLRo83nb74JU6ZYW4+IiMiDUuCRe3rjDbi1VNnLL8N331lbj4iIyINQ4JH7Gj7cXHfLMKBHD3Mkl4iISGaiwCP3ZbPBf/8LYWEQHw9PPgm//mp1VSIiIsmnwCPJ4uYGX3wBHTrA9evmSK4NG6yuSkREJHkUeCTZPDxg5kwIDTVnY27VCnbssLoqERGR+3M68DRp0oRvvvmGq1evpkU9ksF5eZmzMDdsaK6s/sgjcOCA1VWJiIjcm9OBp0aNGrz22msUKlSI559/nt9++y0t6pIMLEcOc4X1mjXh3Dlz/a3jx62uSkRE5O6cDjwTJkzg9OnTTJ8+naioKBo3bkzFihUZN24cZ8+eTYsaJQPy84Off4YKFeCvv8wlKc6csboqERGRpD1QHx4PDw8ef/xxFi9ezF9//cVTTz3FkCFDCAgIoF27dqxYsSK165QMKH9+CA+HkiXhyBHz9tbff1tdlYiISGIp6rS8efNmhg4dyvjx4ylYsCCDBw8mf/78PPbYY7z22mupVaNkYEWLmkPUixSBPXugZUu4dMnqqkRERBzZDMO5tbCjoqL49ttvmT59OocPH6ZNmzY899xzhIaGYrPZAFi3bh2PPvooly9fTpOiUyImJgY/Pz8uXryIr6+v1eW4jH37oHFjs4WnSRNYtgyyZ7e6KhERcRUp/f32cPYNxYoVo3Tp0jzzzDP06NGDAgUKJDqmatWq1KlTx+liJPOqWNHs09OsGaxeDU88AQsXgqen1ZWJiIg8QAvP2rVradSoUVrVk+bUwpO21q0z+/JcvWrOyDxrFri7W12ViIhkdin9/Xa6D09mDjuS9ho2NFt2smWDuXPhxRfNNbhERESs5PQtLYD58+czd+5cTpw4wfXr1x32bdu2LVUKk8wrNBS+/95s4fnyS/DxgY8+MtfkEhERsYLTLTyffPIJPXv2xN/fn+3bt1O3bl3y5cvH0aNHadmyZVrUKJlQhw7w1Vfm8wkT4P33LS1HRESyOKcDz2effcbUqVP59NNP8fT05I033iA8PJxXXnmFixcvpkWNkkmFhcGnn5rP338fxo+3th4REcm6nA48J06cIDg4GIDs2bNz6f8nXenWrRvff/996lYnmV6fPjBihPn8tddg7Fhr6xERkazJ6cBTqFAhLly4AEDx4sXta2kdO3YMJwd8SRYxeDAMGWI+f+MNs7VH/6qIiEh6cjrwNGvWjB9++AGAnj17MmDAAFq0aEGnTp1o3759qhcomZ/NBsOGwciR5uv33oM331ToERGR9OP0PDwJCQkkJCTg4WEO8Jo9ezYbNmzgoYce4sUXX8Qzg880p3l4rPXf/0L//ubz3r3hk0/ALUULnIiISFaQ0t9vpwNPZqfAY71p027Pz9Ozp/lakxOKiMi9pPvEg9OnT2fevHmJts+bN48ZM2Y4XYBkPc8/D998Y4ac6dPh6afhxg2rqxIREVfmdOAZNWoU+fPnT7S9YMGCjLzVSUPkPp5+GubMMWdknj0bOnaEuDirqxIREVf1QMPSS5YsmWh7iRIlOHHiRKoUJVlDhw6waBF4ecHixdC2LcTGWl2ViIi4IqcDT8GCBdm1a1ei7Tt37iRfvnypUpRkHa1awZIlkCOHudp6q1bw/1M7iYiIpBqnA0+XLl145ZVXWLlyJfHx8cTHx7NixQr69etH586d06JGcXHNm8Mvv4CvL6xeba62Hh1tdVUiIuJKnB6ldf36dbp168a8efPsQ9MTEhLo3r07U6ZM0bB0eWC//26GnX/+gRo1zBCURHcxERHJgiwbln7o0CF27txJ9uzZqVKlCiVKlHiQj0l3CjwZ265d0KIFREVBxYrw669QuLDVVYmIiNVS+vvt8aAnDgwMxDAMSpcubW/pEUmpqlXN21rNm8O+fdC4MUREQPHiVlcmIiKZmdN9eGJjY3n22WfJkSMHlSpVso/M6tu3Lx9++GGqFyhZT/nysHYtBAbCH39Ao0Zw5IjVVYmISGbmdOAZPHgwO3fuZNWqVXh7e9u3h4SEMGfOnFQtTrKuUqVgzRooWxZOnDBDz/79VlclIiKZldOBZ9GiRUycOJGGDRtis9ns2ytVqsQR/W+4pKKAAPP2VuXKcOYMNGkCO3daXZWIiGRGTgeec+fOUbBgwUTbr1y54hCARFJDoUKwahXUrAnnzkHTprB5s9VViYhIZuN04KlduzZLliyxv74Vcr744gvq16+fepWJ/L98+cyOy/Xrm/PzhISYfXxERESSy+nhVSNHjqRly5bs27ePmzdv8t///pd9+/axYcMGVq9enRY1ipA7tzkvz3/+AytXQmiouRxFixZWVyYiIpmB0y08DRs2ZMeOHdy8eZMqVarwyy+/ULBgQTZu3EitWrXSokYRAHLlMpehaNkSrl6Fxx6DH3+0uioREckMHnjiwcxKEw9mfnFx0KULLFwIHh4wa5a52rqIiLiudJ948OLFi4SHh3P8+HFsNhulSpWiefPmCg+Sbry8YO5cCAszw07nzmaLT/fuVlcmIiIZlVOB57vvvqNPnz7ExMQ4bPfz82PKlCl06tQpVYsTuRsPD/jmG3OV9S++MMPP5cvw8stWVyYiIhlRsvvwbNu2jZ49e9KuXTu2b9/O1atXiY2N5ffff6dNmzZ069aNnZokRdKRuzt8/jn07Wu+7t0b3ngDEhKsrUtERDKeZPfh6dmzJ5cvX2bevHlJ7n/iiSfw9fXlq6++StUCU5v68Lgew4Dhw+Hdd83XHTvCjBmQPbu1dYmISOpJ6e93slt41q9fz4svvnjX/b169WLdunVOFyCSUjYbDBkC334L2bLBvHnmXD3nz1tdmYiIZBTJDjynT5+mbNmyd91ftmxZTp06lSpFiTyIp5825+rJnRs2bDAnKjx82OqqREQkI0h24ImNjXVYLPTfvLy8uHbtWqoUJfKgmjY1w86tldbr14f1662uSkRErObUKK2ff/4ZPz+/JPdFR0enRj0iKVahAvz2G7RpA1u2QPPmZp8eDSIUEcm6kt1p2c3t/o1BNpuN+Pj4FBeVltRpOeuIjYWnnjKXoAD48ENzFJfWuBURyXzSrdNyQkLCfR8ZPexI1pIjB/zvf9Cvn/l60CB46SW4edPaukREJP05vZaWSGbi7g4TJpgPm82ct6dNG7h0yerKREQkPSnwSJbQr5+59lb27LB8OTRqBBpUKCKSdSjwSJbRti2sXg0FC8LOnRAUBLt2WV2ViIikB8sDz6RJkwgMDMTb25ugoCA2b958z+Ojo6Pp3bs3hQsXxsvLi7Jly7J06dJ0qlYyuzp1zBFcFSqYLTwNG8LPP1tdlYiIpDVLA8+cOXMYOHAgQ4cOZdu2bVSrVo3Q0FCioqKSPP769eu0aNGC48ePM3/+fA4ePMi0adMoWrRoOlcumVnJkubcPE2bmn15Wrc2FyAVERHXlexh6XeKjo5m/vz5HDlyhNdff528efOybds2/P39nQofQUFB1KlTh4kTJwLmSLCAgAD69u3LoEGDEh0/ZcoUxo4dy4EDB8iWLZuzZQMali63xcXBc8/Bd9+Zr996Cz74AJIxA4OIiKSzdBuWfsuuXbsoW7Yso0ePZty4cfYJBxcsWMDgwYOT/TnXr19n69athISE3C7GzY2QkBA2btyY5Ht++OEH6tevT+/evfH396dy5cqMHDnynsPh4+LiiImJcXiIAHh5wTff3F50dORIc3mKuDhr6xIRkdTndOAZOHAgPXr04PDhww5LTbRq1Yo1a9Yk+3POnz9PfHw8/v7+Dtv9/f2JjIxM8j1Hjx5l/vz5xMfHs3TpUoYMGcL48eMZPnz4Xc8zatQo/Pz87I+AgIBk1yiuz2aD99+H6dPBwwO+/x5atIC//7a6MhERSU1OB54tW7YkuWp60aJF7xpUUktCQgIFCxZk6tSp1KpVi06dOvH2228zZcqUu75n8ODBXLx40f44efJkmtYomVOPHuZwdV9fWLsWgoPhyBGrqxIRkdTidODx8vJK8rbQoUOHKFCgQLI/J3/+/Li7u3P27FmH7WfPnqVQoUJJvqdw4cKULVsWd3d3+7YKFSoQGRnJ9evX71qvr6+vw0MkKc2bm52ZixeHQ4egXj1zRJeIiGR+Tgee//znPwwbNowbN24A5vpZJ06c4M0336RDhw7J/hxPT09q1apFRESEfVtCQgIRERHUr18/yfc0aNCAP/74g4SEBPu2Q4cOUbhwYTw9PZ39KiKJVK5shpyaNeH8eXj4YXN5ChERydycDjzjx4/n8uXLFCxYkKtXr9KkSRPKlCmDj48PI0aMcOqzBg4cyLRp05gxYwb79+/npZde4sqVK/Ts2ROA7t27O3SEfumll7hw4QL9+vXj0KFDLFmyhJEjR9K7d29nv4bIXRUubE5Q2Lo1XLsGHTvCRx+B8+MZRUQko/Bw9g1+fn6Eh4ezfv16du7cyeXLl6lZs6bDaKvk6tSpE+fOnePdd98lMjKS6tWrs3z5cntH5hMnTjis0h4QEMDPP//MgAEDqFq1KkWLFqVfv368+eabTp9b5F5y5YJFi8wlKT77DF59Ffbtg4kT4Y6++iIikkk80Dw8mZnm4RFnGAZ8/DG8/jokJEDt2uYtruLFra5MRCRrSfd5eF555RU++eSTRNsnTpxI//79nS5AJCOz2WDgQHMEV7588PvvZv+eX3+1ujIREXGG04Hnf//7Hw0aNEi0PTg4mPnz56dKUSIZTYsWsHUr1KplztETGgoffqh+PSIimYXTgefvv//Gz88v0XZfX1/Onz+fKkWJZEQlSsC6dfDMM+btrcGDoUMH0OTdIiIZn9OBp0yZMixfvjzR9mXLllGqVKlUKUoko/L2Nhca/fxz8PSEhQuhbl2zQ7OIiGRcTo/SGjhwIH369OHcuXM0a9YMgIiICMaPH8+ECRNSuz6RDMdmgxdegOrVzRaegwfN0DN9ujmEXUREMp4HGqU1efJkRowYwenTpwEIDAzkvffeo3v37qleYGrTKC1JTVFR0LkzrFxpvn7tNRg1ylyXS0REUk9Kf79TNCz93LlzZM+enVy5cj3oR6Q7BR5JbTdvwttvw5gx5uuHH4bZs6FgQWvrEhFxJek+LP1OBQoUyFRhRyQteHjA6NEwf745YeHKleZork2brK5MRERucTrwnD17lm7dulGkSBE8PDxwd3d3eIhkVR06wObNUK4c/PUXNG5sdm7W0HUREes53dOgR48enDhxgiFDhlC4cGFsNlta1CWSKVWoYIaenj1hwQLo1cts6Zk0CbJnt7o6EZGsy+k+PD4+Pqxdu5bq1aunUUlpS314JD0Yhtmn5623zDl7atY0l6QIDLS6MhGRzCnd+/AEBASQxZbfEnGazQZvvgm//AL588O2bWa/nl9+sboyEZGsyenAM2HCBAYNGsTx48fToBwR19K8ubkkRe3acOECPPoojBxptvqIiEj6cfqWVp48eYiNjeXmzZvkyJGDbNmyOey/cOFCqhaY2nRLS6xw7Rr07WvO0gzQti3MmAFJrNIiIiJJSOnvt9OdljWbsojzvL1h2jQICoLevWHxYnN25gULoFIlq6sTEXF9KZp4MDNSC49YbcsWcwj7yZOQMyd89RU8+aTVVYmIZGyWTDx45MgR3nnnHbp06UJUVBRgLh66d+/eB/k4kSylTh2zX0/z5nDlCnTqBK++CtevW12ZiIjrcjrwrF69mipVqrBp0yYWLFjA5cuXAdi5cydDhw5N9QJFXFGBArB8uTmSC+Cjj6BBAzh0yNq6RERcldOBZ9CgQQwfPpzw8HA8PT3t25s1a8Zvv/2WqsWJuDIPD/jwQ1i4EPLkgd9/hxo1zL4+WetGs4hI2nM68OzevZv27dsn2l6wYEHOnz+fKkWJZCXt2sGuXdCsGcTGwgsvwOOPg/5zEhFJPU4Hnty5c3PmzJlE27dv307RokVTpSiRrKZYMQgPh3HjIFs2WLQIqlY1t4mISMo5HXg6d+7Mm2++SWRkJDabjYSEBNavX89rr71G9+7d06JGkSzBzc3svLxpk7km15kz8Mgj5ra4OKurExHJ3JwOPCNHjqR8+fIEBARw+fJlKlasSOPGjQkODuadd95JixpFspQaNcz+PC+/bL7+6CNzzh4NghQReXBOzcNjGAYnT56kQIECnD9/nt27d3P58mVq1KjBQw89lJZ1phrNwyOZyU8/wTPPwLlz5uSFY8eaExfabFZXJiKSvlL6++1U4ElISMDb25u9e/dmmoDzbwo8ktlERkLPnuYwdoBWrczJCv39ra1LRCQ9pevEg25ubjz00EP8/fffTp9IRB5MoUKwdCl8+il4eZnPq1SBJUusrkxEJPNwug/Phx9+yOuvv86ePXvSoh4RSYLNBn36mH17qlQxb3E99pi57epVq6sTEcn4UrRauqenJ9mzZ3fYr9XSRdLWtWvw1lvw8cfm6woVYNYsqF7d0rJERNKUVksXyWK8vc2RW48+CmFhsH+/uQr7yJEwYIA5vF1ERBxptXSRTOz8eXjuOVi82HzdvDnMmAGaA1REXI1WSxfJwvLnN9fi+vxzyJEDIiLMGZoXLLC6MhGRjEWrpYtkcjabuf7Wtm1QqxZcuAAdOpgtP///n6eISJan1dJFXES5crBhAwwaZIagL7+EmjVhyxarKxMRsZ5WSxdxIZ6eMGoUrFhhLkh6+DAEB8Pw4XDjhtXViYhYR6uli7igpk1h1y7o2BFu3oQhQ6BOHXMeHxGRrEirpYu4qDx5YM4c+PZbyJcPdu40h6+/9hrExlpdnYhI+tJq6SIuzGaDp5+GffugSxdISIDx483Zmn/91erqRETSzwPPw3PixAn27Nmj1dJFMpElS+Cll+DkSfN1z54wbhzkzWttXSIi95Ouq6W7AgUeyeouXTKXppg0CQzDXHX900/hiSfMFiERkYwo3QNPfHw8X3/9NREREURFRZGQkOCwf8WKFU4XkZ4UeERMGzaYc/Xs32++btvWDEEaeyAiGVG6z7Tcr18/+vXrR3x8PJUrV6ZatWoODxHJHIKDYft2ePddyJbNXJ6iYkVz1uZ//X+MiEim53QLT/78+fnmm29o1apVWtWUptTCI5LYnj1ma8+mTebrxo1h6lRzMkMRkYwg3Vt4PD09KVOmjNMnEpGMq3JlWL8eJkyAnDlhzRqoVs1cgV0TFoqIK3A68Lz66qv897//JYv1dRZxee7u0K+f2doTGgpxcfD225qwUERcQ7JuaT3++OMOr1esWEHevHmpVKkS2bJlc9i3IIMv06xbWiL3Zxgwcyb07w9//w1ubjBgAAwbZq7KLiKS3lL6++2RnIP8/PwcXie1lpaIuI5bExY+8ogZdGbNMicsXLjQ7NQcEmJ1hSIiztE8PCJyX5qwUESslu6dlm85d+4c69atY926dZw7d+5BP0ZEMoHWrWHvXujTx2z9mT7dHMI+b555+0tEJKNzOvBcuXKFZ555hsKFC9O4cWMaN25MkSJFePbZZ4nVioQiLsvHx5yRed06qFABzp6FJ5+E9u3hr7+srk5E5N6cDjwDBw5k9erV/Pjjj0RHRxMdHc3ixYtZvXo1r776alrUKCIZSFITFpYrB8OHw7VrVlcnIpK0B5p4cP78+TRt2tRh+8qVK3nyyScz/O0t9eERST179kCvXuYcPgCBgWbn5vbttS6XiKSudO/DExsbi7+/f6LtBQsW1C0tkSymcmVYu9YcxVW0KBw/Dh06mKO49uyxujoRkducDjz169dn6NChXLuj7frq1au8//771K9fP1WLE5GMz2aDLl3g4EF45x3w8oIVK6B6dejbFy5csLpCEZEHuKW1Z88eQkNDiYuLsy8WunPnTry9vfn555+pVKlSmhSaWnRLSyRtHTsGr70Gt+YgzZcPPvgAXnjBnM1ZRORBpPT3+4Hm4YmNjWXmzJkcOHAAgAoVKtC1a1eyZ8/udAHpTYFHJH2sWHF7qQqAqlXhk0+gSRNr6xKRzMmSwJOZKfCIpJ+bN2HKFHNE1z//mNs6doSxY6FECWtrE5HMJd06LW/dupWHH36YmJiYRPsuXrzIww8/zM6dO50uAGDSpEkEBgbi7e1NUFAQmzdvTtb7Zs+ejc1mo127dg90XhFJWx4e5mSFhw+bMzW7uZmTFZYvD0OHgsY5iEh6SXbgGT9+PM2aNUsyVfn5+dGiRQvGjh3rdAFz5sxh4MCBDB06lG3btlGtWjVCQ0OJioq65/uOHz/Oa6+9RqNGjZw+p4ikr3z54LPPzPl7mjY15+sZNswMPnPmaLZmEUl7yQ48mzZtom3btnfd36ZNGzZs2OB0AR999BHPP/88PXv2pGLFikyZMoUcOXLw1Vdf3fU98fHxdO3alffff59SpUo5fU4RsUbVqmbfnnnzoHhxc22uzp3NELRjh9XViYgrS3bgOXXqFD4+PnfdnytXLs6cOePUya9fv87WrVsJuWPpZTc3N0JCQti4ceNd3zds2DAKFizIs88+e99zxMXFERMT4/AQEevYbPDEE3DgALz/PmTPDmvWQK1a5iSG589bXaGIuKJkB54CBQpw8ODBu+4/cOAA+fPnd+rk58+fJz4+PtFEhv7+/kRGRib5nnXr1vHll18ybdq0ZJ1j1KhR+Pn52R8BAQFO1SgiaSN7drMz84ED0KkTJCTA55/DQw+Zo7lu3LC6QhFxJckOPCEhIYwYMSLJfYZhMGLECIeWmrRw6dIlunXrxrRp05IdrgYPHszFixftj5MnT6ZpjSLinOLFYfZsWL0aqlWD6GhzOHv16hAebnV1IuIqPJJ74DvvvEOtWrUICgri1VdfpVy5coDZsjN+/HgOHTrE119/7dTJ8+fPj7u7O2fPnnXYfvbsWQoVKpTo+CNHjnD8+HHatGlj35aQkGB+EQ8PDh48SOnSpR3e4+XlhZeXl1N1iUj6a9wYtm6FL76At9+GffvgkUegbVsYPdpcoFRE5EElu4WndOnS/Prrr1y5coXOnTtTs2ZNatasSZcuXYiNjSU8PJwyZco4dXJPT09q1apFRESEfVtCQgIRERFJLlNRvnx5du/ezY4dO+yP//znPzz88MPs2LFDt6tEMjl3d3jxRXMY+yuvmK8XL4ZKleC558xOziIiD+KBJh7csWMHhw8fxjAMypYtS/Xq1R+4gDlz5hAWFsbnn39O3bp1mTBhAnPnzuXAgQP4+/vTvXt3ihYtyqhRo5J8f48ePYiOjmbRokXJOp8mHhTJPPbuhcGD4ccfzdeenvDyy/DWW1CggLW1iUj6Sunvd7Jvad2pevXqKQo5d+rUqRPnzp3j3XffJTIykurVq7N8+XJ7R+YTJ07g5ub0Gqci4gIqVYIffoANG8yQs3o1TJhg3vYaONB8+PlZXaWIZAZaWkJEMgXDgF9+MYPPtm3mtrx5zRag3r3NUV8i4rrSbWkJEREr2WwQGgq//25OXFiuHFy4AK+/bg5lnzpVQ9lF5O4UeEQkU7k1ceGePfDVV+aw9lOnzM7OFSvC99+bc/qIiNxJgUdEMiUPD+jZEw4dMvv1FCgAf/wBTz0FNWvCkiVao0tEbkt24BkyZAg3b9686/4TJ07QokWLVClKRCS5vLzMiQqPHIEPPgBfX9i5Ex57DBo1grVrra5QRDKCZAeeGTNmUKdOHfbs2ZNo3+eff07lypXx8HigQV8iIinm4wPvvANHj5r9ery9Yf16c0LDli3NldpFJOtKduDZs2cPVapUoXbt2owaNYqEhAROnDhBSEgIb7zxBuPGjWPZsmVpWauIyH3lywdjxpi3t1580bz1tXy5eZurUye4x5KAIuLCnB6WvnjxYl588UUKFSrEsWPHqFu3Ll988QUlSpRIqxpTlYali2Qtf/wBQ4eanZkNw5y9uUcPc5smZxfJPNJ9WHq9evWoUqUKu3btIiEhgXfeeSfThB0RyXrKlIGZM2HHDmjTBuLj4csvzaHsAwfCuXNWVygi6cGpwPP9999TsWJFEhIS2L9/Py+99BKPPPIIAwYM4Nq1a2lVo4hIilWtas7avH49NGkCcXHw8cdQsiS89hqcPm11hSKSlpIdeDp06MDzzz/Pe++9R0REBOXKlWPMmDGsXLmSpUuXUq1aNTZu3JiWtYqIpFhwMKxcebtfz5UrMH68GXx69TI7PYuI60l24ImMjGT79u307dvXYXtwcDA7duzg0UcfpUmTJqleoIhIartz1ualS6FBA7h+HT7/3LzV9fTT5sSGIuI6kt1pOSEh4b6LeK5Zs4bGjRunSmFpRZ2WRSQpa9fCyJFmy88t//mPuXZXUJB1dYmIKd06LSdnxfKMHnZERO6mUSNYtgy2bjWXrrDZzD4/9epB8+YQEaGZm0UyMy0tISJyh5o1zcVJ9+0zh697eMCKFRASYoafxYu1VpdIZqTAIyKShPLlYfp0c8mKvn3NmZs3b4Z27cwRXzNnwj1W2xGRDEaBR0TkHooXh08+gT//hMGDzbW69u41OzaXLWt2dNasHCIZnwKPiEgyFCxodmr+808YMQLy54djx8yh7CVLwrhxcOmS1VWKyN0o8IiIOCF3bnPk1p9/wn//ay5PERlpLlhaogS89x78/bfVVYrIvynwiIg8gBw54JVXzLW6vvrKvL31zz/w/vtm8NHszSIZiwKPiEgKeHpCz57mqK65c6F6dcfZm597DnbvtrpKEVHgERFJBe7u0LEjbNtmzt7csKE5e/OXX5qjupo1g0WLzMVLRST9KfCIiKQimw1atjRnbl63zgxB7u7m+l3t25urt48fb97+EpH0o8AjIpJGGjQwb3MdPQqDBkHevHD8uNm/p1gxeOkl2L/f6ipFsgYFHhGRNFa8OIwaBX/9BdOmQZUqEBsLU6ZAxYrwyCPw00+awVkkLSnwiIikk+zZzU7MO3eat7jatQM3NwgPhzZtzJFe//0vXLxodaUirkeBR0Qkndls0LQpLFxoDmt/7TVzfp8jR6B/f/N2V9++cOiQxYWKuBAFHhERC5UsCWPHmre7Jk+GChXg8mWYOBHKlYNWrWD5ct3uEkkpBR4RkQwgZ05zmYq9e+GXX+Cxx8yWoGXLzFFfFSvCpElavkLkQSnwiIhkIDYbtGgBP/5o3tLq1w98fODgQejTx7zdNXCgeftLRJJPgUdEJIMqUwYmTIBTp+DTT+GhhyAmBj7+2Hz+n/+YLUCazFDk/hR4REQyOB8fs3XnwAFzFudHHwXDMFuBWrWCwEAYOtSc40dEkqbAIyKSSbi5mf15li0zw88rr5iTGf71FwwbBqVKmXP6zJ0LcXFWVyuSsSjwiIhkQuXKmXP2nDoF338PzZubrT7h4dCpExQtavb12bvX6kpFMgabYRiG1UWkp5iYGPz8/Lh48SK+vr5WlyMikmqOHoWvvoLp0+H06dvb69c3Jzx88knIlcu6+kRSIqW/32rhERFxEaVKwfDh8Oef5lIV7dqZC5du3AjPPguFC8Pzz8OmTWZrkEhWohYeEREXFhkJM2bAF1+YszrfUrmy2erz9NOQL5919Ykkl1p4RETkrgoVgjffNOf0Wb0aunUDb2/Ys8dcxqJIEejcGX79VbM5i2tTC4+ISBYTHQ2zZpmtPtu3394eGGje+urRw5zgUCQjSenvtwKPiEgWtm0bfPklzJx5e5V2Nzdzrp9nn4XWrcHLy9oaRUCBx2kKPCIiicXGwv/+Z7b6rFlze3uePPDEE/DUU9C4sRmGRKygwOMkBR4RkXs7dMhs9fnuO8fh7UWLQpcuZvipXt1c90skvSjwOEmBR0QkeeLjzdaemTNh/vzbt7wAypeHrl3NAFS6tHU1StahwOMkBR4REefFxZlLWsycaa7hdefSFUFBZqtPp07g729djeLaFHicpMAjIpIyMTGwcKE50uvO4exubhASYoaf9u1Bf8VKalLgcZICj4hI6omMNBcrnTXLnMH5Fm9vaNPGDD8tW2qkl6ScAo+TFHhERNLGH3+YC5nOnAkHD97enju340gvd3fLSpRMTIHHSQo8IiJpyzDMCQ1nzTID0L9HenXubIafGjU00kuST4HHSQo8IiLp59ZIr1mzzJFe0dG395UtC48/bj5q11b4kXtT4HGSAo+IiDXi4mD58tsjva5du70vIMDs6Ny+PTRsCB4e1tUpGZMCj5MUeERErBcTA0uXmqO9liyBK1du78ufH9q2NVt+mjdXh2cxKfA4SYFHRCRjuXrVHN6+YAH88ANcuHB7n4+PuZ7X44+bo71y5bKuTrGWAo+TFHhERDKumzfNPj8LFpitP3d2ePbygkceMcNPmzaQL591dUr6U+BxkgKPiEjmkJAAmzebwWfBAnPY+y3u7tC0qdnnp107c/SXuDYFHicp8IiIZD6GAXv23A4/O3c67q9Xz2z5ad8eypSxpkZJWwo8TlLgERHJ/I4cMcPPwoWwYYPjvipVzPDTtq1WdXclCjxOUuAREXEtp0/D4sVmy8/KlebcP7cULmx2dm7Vylzny8/PujolZVL6++2WBjU5bdKkSQQGBuLt7U1QUBCbN2++67HTpk2jUaNG5MmThzx58hASEnLP40VExLUVKQIvvQTh4RAVBTNmmP16cuaEM2fgq6/MpS3y5zf7/YwZY94ey1r/uy+WB545c+YwcOBAhg4dyrZt26hWrRqhoaFERUUlefyqVavo0qULK1euZOPGjQQEBPDII49w6tSpdK5cREQymrx5oXt381bX33+bIWjAAChf3hwBtno1vPmmedurRAno1cscCn/5stWVS1qz/JZWUFAQderUYeLEiQAkJCQQEBBA3759GTRo0H3fHx8fT548eZg4cSLdu3e/7/G6pSUikjUdPQrLlpkTHq5Y4TjTs6cnNGly+/ZX2bLq+5PRZOpbWtevX2fr1q2EhITYt7m5uRESEsLGjRuT9RmxsbHcuHGDvHnzplWZIiLiAkqVgt69zZmdL1wwg0+fPlCyJFy/brYGDRxotgaVKQN9+5oB6epVqyuX1GBp4Dl//jzx8fH4+/s7bPf39ycyMjJZn/Hmm29SpEgRh9B0p7i4OGJiYhweIiKStWXPbrbmfPqpOeLrwAH46COzY3O2bGZr0MSJZmtP3rzmbM+TJsGxY1ZXLg8qUy/P9uGHHzJ79mxWrVqFt7d3kseMGjWK999/P50rExGRzMJmg3LlzMeAAWZ/nogIswVo6VL466/bz8FsAWrZElq0gEaNtNxFZmFpH57r16+TI0cO5s+fT7t27ezbw8LCiI6OZvHixXd977hx4xg+fDi//vortWvXvutxcXFxxMXF2V/HxMQQEBCgPjwiInJftyY8vBV41q93HPbu4WFOeti8ufkICjL7A0nqy/Tz8AQFBVG3bl0+/fRTwOy0XLx4cfr06XPXTstjxoxhxIgR/Pzzz9SrV8+p86nTsoiIPKjoaHOh02XLzFagP/903J8zp9nqcysAVasGbpaPh3YNmT7wzJkzh7CwMD7//HPq1q3LhAkTmDt3LgcOHMDf35/u3btTtGhRRo0aBcDo0aN59913mTVrFg0aNLB/Tq5cuciVjHZFBR4REUkNhmH29YmIMB8rVsD5847H5MsHDz8MzZqZAeihhzT660Fl+sADMHHiRMaOHUtkZCTVq1fnk08+ISgoCICmTZsSGBjI119/DUBgYCB//jtSA0OHDuW9996777kUeEREJC0kJJi3v24FoNWrE8/vExBwO/w0b25OmijJ4xKBJz0p8IiISHq4cQO2bLkdgDZuNIe/36l8+dvhp2lTyJPHklIzBQUeJynwiIiIFWJjYd0689ZXRARs3eq4vIWbG9SsaYafZs0gOFgjwO6kwOMkBR4REckI/vkHVq263QJ04IDjfnd3c7X3Ro2gYUPz8a9p67IUBR4nKfCIiEhGdOqU2fqzYoW56nsS3VV56KHbAahRIyhdOut0glbgcZICj4iIZAZ//WXeAlu71vzn7t2JV3gvVOh260+jRlC1qjk3kCtS4HGSAo+IiGRG//xjdny+FYA2b07cCdrHB+rXvx2AgoLMZTRcgQKPkxR4RETEFVy7Zo4Cu9UKtH49/Hu5yGzZoFat27fBGjQw5wbKjBR4nKTAIyIirig+3pwH6FYAWrsWTp9OfFzFimbwCQqCunXN1+7u6V+vsxR4nKTAIyIiWYFhwPHjjv2A9u9PfFyuXFC7thmAboWgokXTvdz7UuBxkgKPiIhkVefOmbe+fvsNNm2C339PPBs0mIHnzgBUu7b1cwIp8DhJgUdERMQUH2+2+mzadPuxZ4+5TMad3NygUiXHEFSpUvreClPgcZICj4iIyN1dvgzbtjmGoL/+Snxczpy3b4XVrWv+s1ixtKtLgcdJCjwiIiLOOX3aHAZ/KwBt2ZL0rbAiRczg06gRDBiQujUo8DhJgUdERCRl7rwVdisI7d59+1ZY3brmttSU0t9vF52PUURERNKKuztUrmw+nn3W3Hblirkg6ubNGXOuHwUeERERSbGcOaFxY/OREblZXYCIiIhIWlPgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy1PgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy1PgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy1PgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy1PgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy8sQgWfSpEkEBgbi7e1NUFAQmzdvvufx8+bNo3z58nh7e1OlShWWLl2aTpWKiIhIZmR54JkzZw4DBw5k6NChbNu2jWrVqhEaGkpUVFSSx2/YsIEuXbrw7LPPsn37dtq1a0e7du3Ys2dPOlcuIiIimYXNMAzDygKCgoKoU6cOEydOBCAhIYGAgAD69u3LoEGDEh3fqVMnrly5wk8//WTfVq9ePapXr86UKVPue76YmBj8/Py4ePEivr6+qfdFREREJM2k9Pfb0hae69evs3XrVkJCQuzb3NzcCAkJYePGjUm+Z+PGjQ7HA4SGht71eBEREREPK09+/vx54uPj8ff3d9ju7+/PgQMHknxPZGRkksdHRkYmeXxcXBxxcXH21xcvXgTMpCgiIiKZw63f7Qe9MWVp4EkPo0aN4v3330+0PSAgwIJqREREJCUuXbqEn5+f0++zNPDkz58fd3d3zp4967D97NmzFCpUKMn3FCpUyKnjBw8ezMCBA+2vExISuHDhAvny5cNmsyWrzpiYGAICAjh58qT6/aQzXXvr6NpbR9feOrr21rnftTcMg0uXLlGkSJEH+nxLA4+npye1atUiIiKCdu3aAWYgiYiIoE+fPkm+p379+kRERNC/f3/7tvDwcOrXr5/k8V5eXnh5eTlsy5079wPV6+vrq/8ALKJrbx1de+vo2ltH194697r2D9Kyc4vlt7QGDhxIWFgYtWvXpm7dukyYMIErV67Qs2dPALp3707RokUZNWoUAP369aNJkyaMHz+e1q1bM3v2bH7//XemTp1q5dcQERGRDMzywNOpUyfOnTvHu+++S2RkJNWrV2f58uX2jsknTpzAze32YLLg4GBmzZrFO++8w1tvvcVDDz3EokWLqFy5slVfQURERDI4ywMPQJ8+fe56C2vVqlWJtnXs2JGOHTumcVW3eXl5MXTo0ES3xiTt6dpbR9feOrr21tG1t05aX3vLJx4UERERSWuWLy0hIiIiktYUeERERMTlKfCIiIiIy1PgEREREZenwJMMkyZNIjAwEG9vb4KCgti8ebPVJbmcUaNGUadOHXx8fChYsCDt2rXj4MGDDsdcu3aN3r17ky9fPnLlykWHDh0SzbotKfPhhx9is9kcJvbUdU87p06d4umnnyZfvnxkz56dKlWq8Pvvv9v3G4bBu+++S+HChcmePTshISEcPnzYwopdQ3x8PEOGDKFkyZJkz56d0qVL88EHHzis0aRrnzrWrFlDmzZtKFKkCDabjUWLFjnsT851vnDhAl27dsXX15fcuXPz7LPPcvnyZadrUeC5jzlz5jBw4ECGDh3Ktm3bqFatGqGhoURFRVldmktZvXo1vXv35rfffiM8PJwbN27wyCOPcOXKFfsxAwYM4Mcff2TevHmsXr2a06dP8/jjj1tYtWvZsmULn3/+OVWrVnXYruueNv755x8aNGhAtmzZWLZsGfv27WP8+PHkyZPHfsyYMWP45JNPmDJlCps2bSJnzpyEhoZy7do1CyvP/EaPHs3kyZOZOHEi+/fvZ/To0YwZM4ZPP/3Ufoyufeq4cuUK1apVY9KkSUnuT8517tq1K3v37iU8PJyffvqJNWvW8MILLzhfjCH3VLduXaN379721/Hx8UaRIkWMUaNGWViV64uKijIAY/Xq1YZhGEZ0dLSRLVs2Y968efZj9u/fbwDGxo0brSrTZVy6dMl46KGHjPDwcKNJkyZGv379DMPQdU9Lb775ptGwYcO77k9ISDAKFSpkjB071r4tOjra8PLyMr7//vv0KNFltW7d2njmmWcctj3++ONG165dDcPQtU8rgLFw4UL76+Rc53379hmAsWXLFvsxy5YtM2w2m3Hq1Cmnzq8Wnnu4fv06W7duJSQkxL7Nzc2NkJAQNm7caGFlru/ixYsA5M2bF4CtW7dy48YNhz+L8uXLU7x4cf1ZpILevXvTunVrh+sLuu5p6YcffqB27dp07NiRggULUqNGDaZNm2bff+zYMSIjIx2uvZ+fH0FBQbr2KRQcHExERASHDh0CYOfOnaxbt46WLVsCuvbpJTnXeePGjeTOnZvatWvbjwkJCcHNzY1NmzY5db4MMdNyRnX+/Hni4+Pty1zc4u/vz4EDByyqyvUlJCTQv39/GjRoYF8yJDIyEk9Pz0QLv/r7+xMZGWlBla5j9uzZbNu2jS1btiTap+uedo4ePcrkyZMZOHAgb731Flu2bOGVV17B09OTsLAw+/VN6u8fXfuUGTRoEDExMZQvXx53d3fi4+MZMWIEXbt2BdC1TyfJuc6RkZEULFjQYb+Hhwd58+Z1+s9CgUcynN69e7Nnzx7WrVtndSku7+TJk/Tr14/w8HC8vb2tLidLSUhIoHbt2owcORKAGjVqsGfPHqZMmUJYWJjF1bm2uXPnMnPmTGbNmkWlSpXYsWMH/fv3p0iRIrr2Lky3tO4hf/78uLu7JxqRcvbsWQoVKmRRVa6tT58+/PTTT6xcuZJixYrZtxcqVIjr168THR3tcLz+LFJm69atREVFUbNmTTw8PPDw8GD16tV88skneHh44O/vr+ueRgoXLkzFihUdtlWoUIETJ04A2K+v/v5Jfa+//jqDBg2ic+fOVKlShW7dujFgwABGjRoF6Nqnl+Rc50KFCiUaJHTz5k0uXLjg9J+FAs89eHp6UqtWLSIiIuzbEhISiIiIoH79+hZW5noMw6BPnz4sXLiQFStWULJkSYf9tWrVIlu2bA5/FgcPHuTEiRP6s0iB5s2bs3v3bnbs2GF/1K5dm65du9qf67qnjQYNGiSaeuHQoUOUKFECgJIlS1KoUCGHax8TE8OmTZt07VMoNjYWNzfHnz93d3cSEhIAXfv0kpzrXL9+faKjo9m6dav9mBUrVpCQkEBQUJBzJ0xRl+ssYPbs2YaXl5fx9ddfG/v27TNeeOEFI3fu3EZkZKTVpbmUl156yfDz8zNWrVplnDlzxv6IjY21H9OrVy+jePHixooVK4zff//dqF+/vlG/fn0Lq3ZNd47SMgxd97SyefNmw8PDwxgxYoRx+PBhY+bMmUaOHDmM7777zn7Mhx9+aOTOndtYvHixsWvXLqNt27ZGyZIljatXr1pYeeYXFhZmFC1a1Pjpp5+MY8eOGQsWLDDy589vvPHGG/ZjdO1Tx6VLl4zt27cb27dvNwDjo48+MrZv3278+eefhmEk7zo/+uijRo0aNYxNmzYZ69atMx566CGjS5cuTteiwJMMn376qVG8eHHD09PTqFu3rvHbb79ZXZLLAZJ8TJ8+3X7M1atXjZdfftnIkyePkSNHDqN9+/bGmTNnrCvaRf078Oi6p50ff/zRqFy5suHl5WWUL1/emDp1qsP+hIQEY8iQIYa/v7/h5eVlNG/e3Dh48KBF1bqOmJgYo1+/fkbx4sUNb29vo1SpUsbbb79txMXF2Y/RtU8dK1euTPLv9rCwMMMwkned//77b6NLly5Grly5DF9fX6Nnz57GpUuXnK7FZhh3TC0pIiIi4oLUh0dERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8hR4RERExOUp8IiIiIjLU+ARERERl6fAIyIPzGazsWjRojQ9x6pVq7DZbInW88rMvv7660Qr0ItI2lLgEZEkRUZG0rdvX0qVKoWXlxcBAQG0adPGYd2bM2fO0LJlyzStIzg4mDNnzuDn5wckPyxklFARGBjIhAkTrC5DJMvzsLoAEcl4jh8/ToMGDcidOzdjx46lSpUq3Lhxg59//pnevXtz4MABgPuuVnzjxg2yZcuWolo8PT21QrWIpJhaeEQkkZdffhmbzcbmzZvp0KEDZcuWpVKlSgwcOJDffvvNftydt7SOHz+OzWZjzpw5NGnSBG9vb2bOnAnAV199RaVKlfDy8qJw4cL06dPH4T07duywf2Z0dDQ2m41Vq1YBjre0Vq1aRc+ePbl48SI2mw2bzcZ77733QN8xOjqa5557jgIFCuDr60uzZs3YuXOnff97771H9erV+fbbbwkMDMTPz4/OnTtz6dIl+zGXLl2ia9eu5MyZk8KFC/Pxxx/TtGlT+vfvD0DTpk35888/GTBggL3eO/38889UqFCBXLly8eijj3LmzJkH+i4icn8KPCLi4MKFCyxfvpzevXuTM2fORPvvd5to0KBB9OvXj/379xMaGsrkyZPp3bs3L7zwArt37+aHH36gTJkyD1RbcHAwEyZMwNfXlzNnznDmzBlee+21B/qsjh07EhUVxbJly9i6dSs1a9akefPmXLhwwX7MkSNHWLRoET/99BM//fQTq1ev5sMPP7TvHzhwIOvXr+eHH34gPDyctWvXsm3bNvv+BQsWUKxYMYYNG2av95bY2FjGjRvHt99+y5o1azhx4sQDfxcRuT/d0hIRB3/88QeGYVC+fPkHen///v15/PHH7a+HDx/Oq6++Sr9+/ezb6tSp80Cf7enpiZ+fHzabLUW3udatW8fmzZuJiorCy8sLgHHjxrFo0SLmz5/PCy+8AEBCQgJff/01Pj4+AHTr1o2IiAhGjBjBpUuXmDFjBrNmzaJ58+YATJ8+nSJFitjPkzdvXtzd3fHx8UlU740bN5gyZQqlS5cGoE+fPgwbNuyBv5OI3JsCj4g4MAwjRe+vXbu2/XlUVBSnT5+2B4KMYufOnVy+fJl8+fI5bL969SpHjhyxvw4MDLSHHYDChQsTFRUFwNGjR7lx4wZ169a17/fz86NcuXLJqiFHjhz2sPPvzxaR1KfAIyIOHnroIWw2m71jsrPuvA2WPXv2ex7r5mbeVb8zZN24ceOBzuuMy5cvU7hwYXs/oTvdecvu3x2ubTYbCQkJqVJDUp+d0rApInenPjwi4iBv3ryEhoYyadIkrly5kmi/M/Ph+Pj4EBgY6DCU/U4FChQAcOjbcmcH5qR4enoSHx+f7BqSUrNmTSIjI/Hw8KBMmTIOj/z58yfrM0qVKkW2bNnYsmWLfdvFixc5dOhQqtcrIimnFh4RSWTSpEk0aNCAunXrMmzYMKpWrcrNmzcJDw9n8uTJ7N+/P9mf9d5779GrVy8KFixIy5YtuXTpEuvXr6dv375kz56devXq8eGHH1KyZEmioqJ455137vl5gYGBXL58mYiICKpVq0aOHDnIkSNHksfGx8cnClBeXl6EhIRQv3592rVrx5gxYyhbtiynT59myZIltG/f3uG23N34+PgQFhbG66+/Tt68eSlYsCBDhw7Fzc3NYTRWYGAga9asoXPnznh5eSU7UIlI6lILj4gkUqpUKbZt28bDDz/Mq6++SuXKlWnRogURERFMnjzZqc8KCwtjwoQJfPbZZ1SqVInHHnuMw4cP2/d/9dVX3Lx5k1q1atG/f3+GDx9+z88LDg6mV69edOrUiQIFCjBmzJi7Hnv58mVq1Kjh8GjTpg02m42lS5fSuHFjevbsSdmyZencuTN//vkn/v7+yf5uH330EfXr1+exxx4jJCSEBg0aUKFCBby9ve3HDBs2jOPHj1O6dGl7i5aIpD+boZvGIiKp4sqVKxQtWpTx48fz7LPPWl2OiNxBt7RERB7Q9u3bOXDgAHXr1uXixYv2YeVt27a1uDIR+TcFHhGRFBg3bhwHDx7E09OTWrVqsXbtWvXTEcmAdEtLREREXJ46LYuIiIjLU+ARERERl6fAIyIiIi5PgUdERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8hR4RERExOUp8IiIiIjL+z8ANdILegdk5gAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "x_coherence = []\n", + "Ls = range(2,100,5)\n", + "for L in Ls:\n", + " c=Circuit((L)*[('Gi' ,0)])\n", + " ErrorDict={'Gi' : {('H','Z'): 1}}\n", + " EndErrors = ErrorPropagator(c,ErrorDict,NonMarkovian=True)\n", + "\n", + " corr=np.eye(len(c))*.01\n", + " error = averaged_evolution(corr,EndErrors,1)\n", + "\n", + " x_coherence += [np.real(error[1,1])]\n", + "plt.plot(Ls,x_coherence, color='blue')\n", + "plt.ylim(0,1.1)\n", + "plt.xlabel('Circuit Length')\n", + "plt.ylabel('X Coherence Decay')\n", + "plt.title('White noise dephasing')" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]]]\n" + ] + } + ], + "source": [ + "list=[propagatableerrorgen('H',['X'],1)]\n", + "errors=ErrorPropagator(c,list,NonMarkovian=True,ErrorLayerDef=True)\n", + "print(errors)" + ] } ], "metadata": { diff --git a/pygsti/extras/errorgenpropagation/errordict_deprecated.py b/pygsti/extras/errorgenpropagation/errordict_deprecated.py new file mode 100644 index 000000000..95dadea0b --- /dev/null +++ b/pygsti/extras/errorgenpropagation/errordict_deprecated.py @@ -0,0 +1,10 @@ +from pygsti.extras.errorgenpropagation.propagatableerrorgen import propagatableerrorgen +from numpy import complex128 + +class errordict(dict): + + def __setitem__(self, __key: any, __value: any) -> None: + if __key in self : + super().__setitem__(__key,self[__key]+__value) + else: + super().__setitem__(__key,__value) \ No newline at end of file diff --git a/pygsti/extras/errorgenpropagation/errorpropagator.py b/pygsti/extras/errorgenpropagation/errorpropagator.py index d8134540c..2fe015823 100644 --- a/pygsti/extras/errorgenpropagation/errorpropagator.py +++ b/pygsti/extras/errorgenpropagation/errorpropagator.py @@ -44,7 +44,7 @@ def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=Fal if not ErrorLayerDef: errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) else: - errorLayers=[[errorModel]]*circ.depth + errorLayers=[[errorModel]]*circ.depth #this doesn't work num_error_layers=len(errorLayers) fully_propagated_layers=[] @@ -209,7 +209,7 @@ def buildErrorlayers(circ,errorDict,qubits): paulis.append(p2) errorLayer.append(propagatableerrorgen(errType,paulis,gErrorDict[errs])) ErrorGens.append([errorLayer]) - print(ErrorGens) + return ErrorGens diff --git a/pygsti/extras/errorgenpropagation/errorpropagator_dev.py b/pygsti/extras/errorgenpropagation/errorpropagator_dev.py new file mode 100644 index 000000000..68916d815 --- /dev/null +++ b/pygsti/extras/errorgenpropagation/errorpropagator_dev.py @@ -0,0 +1,172 @@ +import stim +from localstimerrorgen import * +from numpy import abs,zeros, complex128 +from numpy.linalg import multi_dot +from scipy.linalg import expm +from pygsti.tools.internalgates import standard_gatenames_stim_conversions +from utilserrorgenpropagation import * + + +def ErrorPropagator(circ,errorModel,MultiGateDict=None,BCHOrder=1,BCHLayerwise=False,NonMarkovian=False,MultiGate=False,ErrorLayerDef=False): + if MultiGate and MultiGateDict is None: + MultiGateDict=dict() + stim_dict=standard_gatenames_stim_conversions() + if MultiGate: + for key in MultiGateDict: + stim_dict[key]=stim_dict[MultiGateDict[key]] + stim_layers=circ.convert_to_stim_tableau_layers(gate_name_conversions=stim_dict) + stim_layers.pop(0) #Immediatly toss the first layer because it is not important, + + propagation_layers=[] + if not BCHLayerwise or NonMarkovian: + while len(stim_layers) != 0: + top_layer=stim_layers.pop(0) + for layer in stim_layers: + top_layer = layer*top_layer + propagation_layers.append(top_layer) + else: + propagation_layers = stim_layers + + if not ErrorLayerDef: + errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) + else: + errorLayers=[[errorModel]]*circ.depth #this doesn't work + + num_error_layers=len(errorLayers) + + fully_propagated_layers=[] + for _ in range(0,num_error_layers-1): + err_layer=errorLayers.pop(0) + layer=propagation_layers.pop(0) + new_error_layer=[] + for err_order in err_layer: + new_error_dict=dict() + for key in err_order: + propagated_error_gen=key.propagate_error_gen_tableau(layer,err_order[key]) + new_error_dict[propagated_error_gen[0]]=propagated_error_gen[1] + new_error_layer.append(new_error_dict) + if BCHLayerwise and not NonMarkovian: + following_layer = errorLayers.pop(0) + new_errors=BCH_Handler(err_layer,following_layer,BCHOrder) + errorLayers.insert(new_errors,0) + else: + fully_propagated_layers.append(new_error_layer) + + fully_propagated_layers.append(errorLayers.pop(0)) + if BCHLayerwise and not NonMarkovian: + final_error=dict() + for order in errorLayers[0]: + for error in order: + if error in final_error: + final_error[error]=final_error[error]+order[error] + else: + final_error[error]=order[error] + return final_error + + elif not BCHLayerwise and not NonMarkovian: + simplified_EOC_errors=dict() + if BCHOrder == 1: + for layer in fully_propagated_layers: + for order in layer: + for error in order: + if error in simplified_EOC_errors: + simplified_EOC_errors[error]=simplified_EOC_errors[error]+order[error] + else: + simplified_EOC_errors[error]=order[error] + + else: + Exception("Higher propagated through Errors are not Implemented Yet") + return simplified_EOC_errors + + else: + return fully_propagated_layers + + + +def buildErrorlayers(circ,errorDict,qubits): + ErrorGens=[] + #For the jth layer of each circuit + for j in range(circ.depth): + l = circ.layer(j) # get the layer + errorLayer=dict() + for _, g in enumerate(l): # for gate in layer l + gErrorDict = errorDict[g.name] #get the errors for the gate + p1=qubits*'I' # make some paulis why? + p2=qubits*'I' + for errs in gErrorDict: #for an error in the accompanying error dictionary + errType=errs[0] + paulis=[] + for ind,el in enumerate(g): #enumerate the gate ind =0 is name ind = 1 is first qubit ind = 2 is second qubit + if ind !=0: #if the gate element of concern is not the name + p1=p1[:el] + errs[1][ind-1] +p1[(el+1):] + + paulis.append(stim.PauliString(p1)) + if errType in "CA": + for ind,el in enumerate(g): + if ind !=0: + p2=p2[:el] + errs[2][ind-1] +p2[(el+1):] + paulis.append(stim.PauliString(p2)) + errorLayer[localstimerrorgen(errType,paulis)]=gErrorDict[errs] + ErrorGens.append([errorLayer]) + return ErrorGens +''' + +Inputs: +_______ +err_layer (list of dictionaries) +following_layer (list of dictionaries) +BCHOrder: + +''' +def BCH_Handler(err_layer,following_layer,BCHOrder): + new_errors=[] + for curr_order in range(0,BCHOrder): + working_order=dict() + #add first order terms into new layer + if curr_order == 0: + for error_key in err_layer[curr_order]: + working_order[error_key]=err_layer[curr_order][error_key] + for error_key in following_layer[curr_order]: + working_order[error_key]=following_layer[curr_order[error_key]] + new_errors.append(working_order) + + elif curr_order ==1: + working_order={} + for error1 in err_layer[curr_order-1]: + for error2 in following_layer[curr_order-1]: + errorlist = commute_errors(error1,error2,BCHweight=1/2*err_layer[error1]*following_layer[error2]) + for error_tuple in errorlist: + working_order[error_tuple[0]]=error_tuple[1] + if len(err_layer)==2: + for error_key in err_layer[1]: + working_order[error_key]=err_layer[1][error_key] + if len(following_layer)==2: + for error_key in following_layer[1]: + working_order[error_key]=following_layer[1][error_key] + new_errors.append(working_order) + + else: + Exception("Higher Orders are not Implemented Yet") + return new_errors + +# There's a factor of a half missing in here. +def nm_propagators(corr, Elist,qubits): + Kms = [] + for idm in range(len(Elist)): + Am=zeros([4**qubits,4**qubits],dtype=complex128) + for key in Elist[idm][0]: + Am += key.toWeightedErrorBasisMatrix() + # This assumes that Elist is in reverse chronological order + partials = [] + for idn in range(idm, len(Elist)): + An=zeros([4**qubits,4**qubits],dtype=complex128) + for key2 in Elist[idn][0]: + An = key2.toWeightedErrorBasisMatrix() + partials += [corr[idm,idn] * Am @ An] + partials[0] = partials[0]/2 + Kms += [sum(partials,0)] + return Kms + +def averaged_evolution(corr, Elist,qubits): + Kms = nm_propagators(corr, Elist,qubits) + return multi_dot([expm(Km) for Km in Kms]) \ No newline at end of file diff --git a/pygsti/extras/errorgenpropagation/localstimerrorgen.py b/pygsti/extras/errorgenpropagation/localstimerrorgen.py new file mode 100644 index 000000000..5d6b57c71 --- /dev/null +++ b/pygsti/extras/errorgenpropagation/localstimerrorgen.py @@ -0,0 +1,96 @@ +from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel +from pygsti.extras.errorgenpropagation.utilspygstistimtranslator import * +import stim +from numpy import array,kron +from pygsti.tools import change_basis +from pygsti.tools.lindbladtools import create_elementary_errorgen + +class localstimerrorgen(ElementaryErrorgenLabel): + + + ''' + Initiates the errorgen object + Inputs: + ______ + errorgen_type: characture can be set to 'H' Hamiltonian, 'S' Stochastic, 'C' Correlated or 'A' active following the conventions + of the taxonomy of small markovian errorgens paper + + basis_element_labels + + Outputs: + Null + ''' + def __init__(self,errorgen_type: str ,basis_element_labels: list): + self.errorgen_type=str(errorgen_type) + self.basis_element_labels=tuple(basis_element_labels) + + ''' + hashes the error gen object + ''' + def __hash__(self): + pauli_hashable=[] + for pauli in self.basis_element_labels: + pauli_hashable.append(str(pauli)) + return hash((self.errorgen_type,tuple(pauli_hashable))) + + def labels_to_strings(self): + strings=[] + for paulistring in self.basis_element_labels: + strings.append(str(paulistring)[1:].replace('_',"I")) + return tuple(strings) + + + ''' + checks and if two error gens have the same type and labels + ''' + def __eq__(self, other): + return (self.errorgen_type == other.errorgen_type + and self.basis_element_labels == other.basis_element_labels) + + ''' + displays the errorgens as strings + ''' + def __str__(self): + return self.errorgen_type + "(" + ",".join(map(str, self.basis_element_labels)) + ")" + + + def __repr__(self): + return str((self.errorgen_type, self.basis_element_labels)) + + ''' + Returns the errorbasis matrix for the associated errorgenerator mulitplied by its error rate + + input: A pygsti defined matrix basis by default can be pauli-product, gellmann 'gm' or then pygsti standard basis 'std' + functions defaults to pauli product if not specified + ''' + def toWeightedErrorBasisMatrix(self,weight=1.0,matrix_basis='pp'): + PauliDict={ + 'I' : array([[1.0,0.0],[0.0,1.0]]), + 'X' : array([[0.0j, 1.0+0.0j], [1.0+0.0j, 0.0j]]), + 'Y' : array([[0.0, -1.0j], [1.0j, 0.0]]), + 'Z' : array([[1.0, 0.0j], [0.0j, -1.0]]) + } + paulis=[] + for paulistring in self.basis_element_labels: + for idx,pauli in enumerate(paulistring): + if idx == 0: + pauliMat = PauliDict[pauli] + else: + pauliMat=kron(pauliMat,PauliDict[pauli]) + paulis.append(pauliMat) + if self.errorgen_type in 'HS': + return weight*change_basis(create_elementary_errorgen(self.errorgen_type,paulis[0]),'std',matrix_basis) + else: + return weight*change_basis(create_elementary_errorgen(self.errorgen_type,paulis[0],paulis[1]),'std',matrix_basis) + + def propagate_error_gen_tableau(self, slayer,weight): + new_basis_labels = [] + weightmod = 1 + for pauli in self.basis_element_labels: + temp = slayer(pauli) + weightmod=weightmod*temp.sign + temp=temp*temp.sign + new_basis_labels.append(temp) + + return (localstimerrorgen(self.errorgen_type,new_basis_labels),weightmod*weight) + diff --git a/pygsti/extras/errorgenpropagation/propagatableerrorgen.py b/pygsti/extras/errorgenpropagation/propagatableerrorgen.py index 7976b0f50..b9e71dbc5 100644 --- a/pygsti/extras/errorgenpropagation/propagatableerrorgen.py +++ b/pygsti/extras/errorgenpropagation/propagatableerrorgen.py @@ -89,6 +89,21 @@ def getP1(self): ''' def getP2(self): return self.basis_element_labels[1] + + def ErrorWeight(self): + def Weight(pauli): + weight=0 + for char in pauli: + if char is 'I': + continue + else: + weight+=1 + return weight + if len(self.basis_element_labels)==1 or Weight(self.basis_element_labels[0]) > Weight(self.basis_element_labels[1]): + return Weight(self.basis_element_labels[0]) + else: + return Weight(self.basis_element_labels[1]) + ''' propagates a propagatableerrorgen object through a clifford layer, returns the created error gen diff --git a/pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py b/pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py new file mode 100644 index 000000000..ca07bdb58 --- /dev/null +++ b/pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py @@ -0,0 +1,195 @@ + +from pygsti.extras.errorgenpropagation.localstimerrorgen import localstimerrorgen +from numpy import conjugate + +''' +Returns the Commutator of two errors +''' +def commute_errors(ErG1,ErG2, weightFlip=1.0, BCHweight=1.0): + def com(P1,P2): + P3=P1*P2-P2*P1 + return (P3.weight,P3*conjugate(P3.weight)) + + def acom(P1,P2): + P3=P1*P2+P2*P1 + return (P3.weight,P3*conjugate(P3.weight)) + + def labelMultiply(P1,P2): + P3=P1*P2 + return (P3.weight,P3*conjugate(P3.weight)) + + errorGens=[] + + wT=weightFlip*BCHweight + + if ErG1.getType()=='H' and ErG2.getType()=='H': + pVec=com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) + errorGens.append( localstimerrorgen( 'H' , [pVec[1]] , -1j*wT *pVec[0] ) ) + + elif ErG1.getType()=='H' and ErG2.getType()=='S': + pVec=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen( 'C' , [ErG2.basis_element_labels[0] , pVec[1]] , 1j*wT*pVec[0] ) ) + + elif ErG1.getType()=='S' and ErG2.getType()=='H': + pVec=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen( 'C' , [ErG2.basis_element_labels[0] , pVec[1]] , -1j*wT *pVec[0] ) ) + + elif ErG1.getType()=='H' and ErG2.getType()=='C': + pVec1=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen('C' , [pVec1[1], ErG2.basis_element_labels[1]] , 1j*wT*pVec1[0] ) ) + pVec2=com(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen('C' , [pVec2[1] , ErG2.basis_element_labels[0]] , 1j*wT*pVec2[0] ) ) + + elif ErG1.getType()=='C' and ErG2.getType()=='H': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType()=='H' and ErG2.getType()=='A': + pVec1 = com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) + errorGens.append( localstimerrorgen('A' , [pVec1[1] , ErG2.basis_element_labels[1]] , -1j*wT*pVec1[0]) ) + pVec2 = com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) + errorGens.append( localstimerrorgen('A' , [ErG2.basis_element_labels[0], pVec2[1]] , -1j*wT*pVec2[0] ) ) + + elif ErG1.getType()=='A' and ErG2.getType()=='H': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType()=='S' and ErG2.getType()=='S': + errorGens.append( localstimerrorgen('H', ErG1.basis_element_labels[0],0 )) + + elif ErG1.getType()=='S' and ErG2.getType()=='C': + pVec1=labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) + pVec2=labelMultiply(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen( 'A' , [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) + pVec2 = labelMultiply(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1 =acom(ErG2.basis_element_labels[0], ErG2.basis_element_labels[1]) + pVec2 = labelMultiply(pVec1[1],ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen( 'A' ,[pVec2[1], ErG1.basis_element_labels[0]] , -1j*.5*wT*pVec1[0]*pVec2[0])) + pVec1=acom(ErG2.basis_element_labels[0], ErG2.basis_element_labels[1]) + pVec2=labelMultiply(ErG1.basis_element_labels[0],pVec1[1]) + errorGens.append( localstimerrorgen( 'A', [ErG1.basis_element_labels[0] ,pVec2[1]],-1j*.5*wT*pVec1[0]*pVec2[0])) + + elif ErG1.getType() == 'C' and ErG2.getType() == 'S': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType() == 'S' and ErG2.getType() == 'A': + pVec1 =labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) + pVec2=labelMultiply(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen( 'C', [pVec1[1], pVec2[1]] ,1j*wT*pVec1[0]*pVec2[0] )) + pVec1=labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) + pVec2=labelMultiply(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) + errorGens.append( localstimerrorgen( 'C', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1 = com(ErG2.basis_element_labels[0] , ErG2.basis_element_labels[1]) + pVec2 = com(ErG1.basis_element_labels[0],pVec1[1]) + errorGens.append( localstimerrorgen( 'A', [ErG1.basis_element_labels[0], pVec2[1]] ,-.5*wT*pVec1[0]*pVec2[0])) + + elif ErG1.getType() == 'A' and ErG1.getType() == 'S': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType() == 'C' and ErG2.getType() == 'C': + A=ErG1.basis_element_labels[0] + B=ErG1.basis_element_labels[1] + P=ErG2.basis_element_labels[0] + Q=ErG2.basis_element_labels[1] + pVec1 = labelMultiply(A,P) + pVec2 =labelMultiply(Q,B) + errorGens.append( localstimerrorgen( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = labelMultiply(A,Q) + pVec2 =labelMultiply(P,B) + errorGens.append( localstimerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = labelMultiply(B,P) + pVec2 =labelMultiply(Q,A) + errorGens.append( localstimerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = labelMultiply(B,Q) + pVec2 =labelMultiply(P,A) + errorGens.append( localstimerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(A,B) + pVec2=com(P,pVec1[1]) + errorGens.append( localstimerrorgen( 'A' , [pVec2[1] , Q ], -.5*1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(A,B) + pVec2=com(Q,pVec1[1]) + errorGens.append( localstimerrorgen( 'A' , [pVec2[1], P] , -.5*1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(P,Q) + pVec2=com(pVec1[1],A) + errorGens.append( localstimerrorgen( 'A' , [pVec2[1] , B] , -.5*1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(P,Q) + pVec2=com(pVec1[1],B) + errorGens.append( localstimerrorgen( 'A' , [pVec2[1] , A ] , -.5*1j*wT*pVec1[0]*pVec2[0])) + pVec1=acom(A,B) + pVec2=acom(P,Q) + pVec3=com(pVec1[1],pVec2[1]) + errorGens.append( localstimerrorgen( 'H', [pVec3[1]] ,.25*1j*wT*pVec1[0]*pVec2[0]*pVec3[0])) + + elif ErG1.getType() == 'C' and ErG2.getType() == 'A': + A=ErG1.basis_element_labels[0] + B=ErG1.basis_element_labels[1] + P=ErG2.basis_element_labels[0] + Q=ErG2.basis_element_labels[1] + pVec1 = labelMultiply(A,P) + pVec2 =labelMultiply(Q,B) + errorGens.append( localstimerrorgen('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) + pVec1 = labelMultiply(A,Q) + pVec2 =labelMultiply(P,B) + errorGens.append( localstimerrorgen('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1 = labelMultiply(B,P) + pVec2 =labelMultiply(Q,A) + errorGens.append( localstimerrorgen('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) + pVec1 = labelMultiply(P,A) + pVec2 =labelMultiply(B,Q) + errorGens.append( localstimerrorgen('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + pVec1 = com(P,Q) + pVec2 =com(A,pVec1[1]) + errorGens.append( localstimerrorgen('A' , [pVec2[1] , B] , .5*wT*pVec1[0]*pVec2[0] )) + pVec1 = com(P,Q) + pVec2 =com(B,pVec1[1]) + errorGens.append( localstimerrorgen('A' , [pVec2[1], A ], .5*wT*pVec1[0]*pVec2[0] )) + pVec1 = acom(A,B) + pVec2 =com(P,pVec1[1]) + errorGens.append( localstimerrorgen('C', [pVec2[1] , Q ], .5*1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = acom(A,B) + pVec2 =com(Q,pVec1[1]) + errorGens.append( localstimerrorgen('C',[pVec2[1],P ],-.5*1j*wT*pVec1[0]*pVec2[0] )) + pVec1 = com(P,Q) + pVec2 =acom(A,B) + pVec3=com(pVec1[1],pVec2[1]) + errorGens.append( localstimerrorgen('H',[pVec3[1]],-.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) + + elif ErG1.getType() == 'A' and ErG2.getType() == 'C': + errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) + + elif ErG1.getType() == 'A' and ErG2.getType() == 'A': + A=ErG1.basis_element_labels[0] + B=ErG1.basis_element_labels[1] + P=ErG2.basis_element_labels[0] + Q=ErG2.basis_element_labels[1] + pVec1=labelMultiply(Q,B) + pVec2=labelMultiply(A,P) + errorGens.append(localstimerrorgen('A',[pVec1[1],pVec2[1]] ,-1j*wT*pVec1[0]*pVec2[0])) + pVec1=labelMultiply(P,A) + pVec2=labelMultiply(B,Q) + errorGens.append(localstimerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + pVec1=labelMultiply(B,P) + pVec2=labelMultiply(Q,A) + errorGens.append(localstimerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + pVec1=labelMultiply(A,Q) + pVec2=labelMultiply(P,B) + errorGens.append(localstimerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + pVec1=com(P,Q) + pVec2=com(B,pVec1[1]) + errorGens.append(localstimerrorgen('C',[pVec2[1],A],.5*wT*pVec1[0]*pVec2[0])) + pVec1=com(P,Q) + pVec2=com(A,pVec1[1]) + errorGens.append(localstimerrorgen('C',[pVec2[1],B] ,-.5*wT*pVec1[0]*pVec2[0])) + pVec1=com(A,B) + pVec2=com(P,pVec1[1]) + errorGens.append(localstimerrorgen('C', [pVec2[1],Q] ,.5*wT*pVec1[0]*pVec2[0])) + pVec1=com(A,B) + pVec2=com(Q,pVec1[1]) + errorGens.append(localstimerrorgen('C', [pVec2[1],P] ,-.5*wT*pVec1[0]*pVec2[0])) + pVec1=com(P,Q) + pVec2=com(A,B) + pVec3=com(pVec1[1],pVec2[1]) + errorGens.append( localstimerrorgen('H',[pVec3[1]] ,.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) + + + return errorGens \ No newline at end of file diff --git a/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py b/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py index 6e33c1e99..98d07a87f 100644 --- a/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py +++ b/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py @@ -1,4 +1,5 @@ import stim +from numpy import conjugate @@ -61,4 +62,5 @@ def pyGSTiPauli_2_stimPauli(pauli): in this function, if the weight is needed please store paulistring::weight prior to applying this function ''' def stimPauli_2_pyGSTiPauli(pauliString): + pauliString=conjugate(pauliString.sign)*pauliString return str(pauliString)[1:].replace('_',"I") \ No newline at end of file diff --git a/pygsti/tools/internalgates.py b/pygsti/tools/internalgates.py index 91ac69567..8f320b93c 100644 --- a/pygsti/tools/internalgates.py +++ b/pygsti/tools/internalgates.py @@ -320,6 +320,8 @@ def standard_gatenames_stim_conversions(): A dictionary converting the gates with standard names to stim tableus for these gates. Currently is only capable of converting clifford gates, no capability for T gates + TODO: Add all standard clifford gate names in + Returns ------- A dict mapping string to tableu From 7a5d94c4784faa56e0635987c74971e509a10e2c Mon Sep 17 00:00:00 2001 From: ashenmill <156946147+ashenmill@users.noreply.github.com> Date: Fri, 7 Jun 2024 15:22:51 -0600 Subject: [PATCH 008/102] Fixed a bug in the translation code --- pygsti/extras/errorgenpropagation/errorpropagator.py | 2 ++ pygsti/extras/errorgenpropagation/propagatableerrorgen.py | 7 ++++--- .../errorgenpropagation/utilspygstistimtranslator.py | 6 ++++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pygsti/extras/errorgenpropagation/errorpropagator.py b/pygsti/extras/errorgenpropagation/errorpropagator.py index 2fe015823..f58115d3e 100644 --- a/pygsti/extras/errorgenpropagation/errorpropagator.py +++ b/pygsti/extras/errorgenpropagation/errorpropagator.py @@ -46,6 +46,7 @@ def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=Fal else: errorLayers=[[errorModel]]*circ.depth #this doesn't work + num_error_layers=len(errorLayers) fully_propagated_layers=[] for _ in range(0,num_error_layers-1): @@ -54,6 +55,7 @@ def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=Fal for err_order in err_layer: for errorGenerator in err_order: errorGenerator.propagate_error_gen_inplace_tableau(layer) + if BCHLayerwise and not NonMarkovian: following_layer = errorLayers.pop(0) new_errors=BCH_Handler(err_layer,following_layer,BCHOrder) diff --git a/pygsti/extras/errorgenpropagation/propagatableerrorgen.py b/pygsti/extras/errorgenpropagation/propagatableerrorgen.py index b9e71dbc5..3fbaa9e33 100644 --- a/pygsti/extras/errorgenpropagation/propagatableerrorgen.py +++ b/pygsti/extras/errorgenpropagation/propagatableerrorgen.py @@ -50,7 +50,7 @@ def __eq__(self, other): displays the errorgens as strings ''' def __str__(self): - return self.errorgen_type + "(" + ",".join(map(str, self.basis_element_labels)) + ")" + ": " + self.error_rate + return self.errorgen_type + "(" + ",".join(map(str, self.basis_element_labels)) + ")" + ": " + str(self.error_rate) def __repr__(self): @@ -94,7 +94,7 @@ def ErrorWeight(self): def Weight(pauli): weight=0 for char in pauli: - if char is 'I': + if char == 'I': continue else: weight+=1 @@ -133,11 +133,12 @@ def propagate_error_gen_inplace_tableau(self, slayer): temp = slayer(temp) weightmod=weightmod*temp.sign new_basis_labels.append(stimPauli_2_pyGSTiPauli(temp)) - if self.errorgen_type in 'HCA': self.error_rate=self.error_rate*weightmod self.basis_element_labels =tuple(new_basis_labels) + + ''' returns the strings representing the pauli labels in the pygsti representation of paulis as stim PauliStrings ''' diff --git a/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py b/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py index 98d07a87f..b6473318f 100644 --- a/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py +++ b/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py @@ -62,5 +62,7 @@ def pyGSTiPauli_2_stimPauli(pauli): in this function, if the weight is needed please store paulistring::weight prior to applying this function ''' def stimPauli_2_pyGSTiPauli(pauliString): - pauliString=conjugate(pauliString.sign)*pauliString - return str(pauliString)[1:].replace('_',"I") \ No newline at end of file + n=1 + if pauliString.sign==1j or pauliString.sign==-1j: + n=2 + return str(pauliString)[n:].replace('_',"I") \ No newline at end of file From 19a4220e00df3afbac5eba21ac37d758fc879aca Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 9 Jun 2024 21:57:30 -0600 Subject: [PATCH 009/102] Start refactors on cumulant expansion code Start refactoring variable names and begin work on making the error generator propagator into a class. --- pygsti/extras/errorgenpropagation/__init__.py | 10 +++ .../errorpropagator_dev.py | 56 ++++++++---- .../errorgenpropagation/localstimerrorgen.py | 5 +- .../utilserrorgenpropagation.py | 86 +++++++++---------- pygsti/extras/lfh/__init__.py | 9 ++ pygsti/extras/lfh/lfherrorgen.py | 2 +- setup.py | 2 + 7 files changed, 104 insertions(+), 66 deletions(-) create mode 100644 pygsti/extras/errorgenpropagation/__init__.py create mode 100644 pygsti/extras/lfh/__init__.py diff --git a/pygsti/extras/errorgenpropagation/__init__.py b/pygsti/extras/errorgenpropagation/__init__.py new file mode 100644 index 000000000..f6b034ea3 --- /dev/null +++ b/pygsti/extras/errorgenpropagation/__init__.py @@ -0,0 +1,10 @@ +""" Error Generator Propagation Sub-package """ +#*************************************************************************************************** +# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + diff --git a/pygsti/extras/errorgenpropagation/errorpropagator_dev.py b/pygsti/extras/errorgenpropagation/errorpropagator_dev.py index 68916d815..ee7a34915 100644 --- a/pygsti/extras/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/extras/errorgenpropagation/errorpropagator_dev.py @@ -1,24 +1,42 @@ import stim -from localstimerrorgen import * +from pygsti.extras.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from numpy import abs,zeros, complex128 from numpy.linalg import multi_dot from scipy.linalg import expm from pygsti.tools.internalgates import standard_gatenames_stim_conversions -from utilserrorgenpropagation import * +from pygsti.extras.errorgenpropagation.utilserrorgenpropagation import commute_errors +class ErrorGeneratorPropagator: -def ErrorPropagator(circ,errorModel,MultiGateDict=None,BCHOrder=1,BCHLayerwise=False,NonMarkovian=False,MultiGate=False,ErrorLayerDef=False): - if MultiGate and MultiGateDict is None: - MultiGateDict=dict() + def __init__(self, model, multi_gate_dict=None, bch_order=1, + bch_layerwise=False, nonmarkovian=False, multi_gate=False, + error_layer_def=False): + self.model = model + self.bch_order = bch_order + self.bch_layerwise = bch_layerwise + + def propagate_errorgen_bch(circuit, bch_order, bch_layerwise): + pass + + def propagate_errorgen_nonmarkovian(circuit, multi_gate_dict): + pass + + + + +def ErrorPropagator(circ,errorModel,multi_gate_dict=None,bch_order=1,bch_layerwise=False, + nonmarkovian=False,multi_gate=False,error_layer_def=False): + if multi_gate and multi_gate_dict is None: + multi_gate_dict=dict() stim_dict=standard_gatenames_stim_conversions() - if MultiGate: - for key in MultiGateDict: - stim_dict[key]=stim_dict[MultiGateDict[key]] + if multi_gate: + for key in multi_gate_dict: + stim_dict[key]=stim_dict[multi_gate_dict[key]] stim_layers=circ.convert_to_stim_tableau_layers(gate_name_conversions=stim_dict) stim_layers.pop(0) #Immediatly toss the first layer because it is not important, propagation_layers=[] - if not BCHLayerwise or NonMarkovian: + if not bch_layerwise or nonmarkovian: while len(stim_layers) != 0: top_layer=stim_layers.pop(0) for layer in stim_layers: @@ -27,7 +45,7 @@ def ErrorPropagator(circ,errorModel,MultiGateDict=None,BCHOrder=1,BCHLayerwise=F else: propagation_layers = stim_layers - if not ErrorLayerDef: + if not error_layer_def: errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) else: errorLayers=[[errorModel]]*circ.depth #this doesn't work @@ -45,15 +63,15 @@ def ErrorPropagator(circ,errorModel,MultiGateDict=None,BCHOrder=1,BCHLayerwise=F propagated_error_gen=key.propagate_error_gen_tableau(layer,err_order[key]) new_error_dict[propagated_error_gen[0]]=propagated_error_gen[1] new_error_layer.append(new_error_dict) - if BCHLayerwise and not NonMarkovian: + if bch_layerwise and not nonmarkovian: following_layer = errorLayers.pop(0) - new_errors=BCH_Handler(err_layer,following_layer,BCHOrder) + new_errors=BCH_Handler(err_layer,following_layer,bch_order) errorLayers.insert(new_errors,0) else: fully_propagated_layers.append(new_error_layer) fully_propagated_layers.append(errorLayers.pop(0)) - if BCHLayerwise and not NonMarkovian: + if bch_layerwise and not nonmarkovian: final_error=dict() for order in errorLayers[0]: for error in order: @@ -63,9 +81,9 @@ def ErrorPropagator(circ,errorModel,MultiGateDict=None,BCHOrder=1,BCHLayerwise=F final_error[error]=order[error] return final_error - elif not BCHLayerwise and not NonMarkovian: + elif not bch_layerwise and not nonmarkovian: simplified_EOC_errors=dict() - if BCHOrder == 1: + if bch_order == 1: for layer in fully_propagated_layers: for order in layer: for error in order: @@ -106,7 +124,7 @@ def buildErrorlayers(circ,errorDict,qubits): if ind !=0: p2=p2[:el] + errs[2][ind-1] +p2[(el+1):] paulis.append(stim.PauliString(p2)) - errorLayer[localstimerrorgen(errType,paulis)]=gErrorDict[errs] + errorLayer[_LSE(errType,paulis)]=gErrorDict[errs] ErrorGens.append([errorLayer]) return ErrorGens ''' @@ -115,12 +133,12 @@ def buildErrorlayers(circ,errorDict,qubits): _______ err_layer (list of dictionaries) following_layer (list of dictionaries) -BCHOrder: +bch_order: ''' -def BCH_Handler(err_layer,following_layer,BCHOrder): +def BCH_Handler(err_layer,following_layer,bch_order): new_errors=[] - for curr_order in range(0,BCHOrder): + for curr_order in range(0,bch_order): working_order=dict() #add first order terms into new layer if curr_order == 0: diff --git a/pygsti/extras/errorgenpropagation/localstimerrorgen.py b/pygsti/extras/errorgenpropagation/localstimerrorgen.py index 5d6b57c71..8a36c59d0 100644 --- a/pygsti/extras/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/extras/errorgenpropagation/localstimerrorgen.py @@ -5,8 +5,7 @@ from pygsti.tools import change_basis from pygsti.tools.lindbladtools import create_elementary_errorgen -class localstimerrorgen(ElementaryErrorgenLabel): - +class LocalStimErrorgenLabel(ElementaryErrorgenLabel): ''' Initiates the errorgen object @@ -92,5 +91,5 @@ def propagate_error_gen_tableau(self, slayer,weight): temp=temp*temp.sign new_basis_labels.append(temp) - return (localstimerrorgen(self.errorgen_type,new_basis_labels),weightmod*weight) + return (LocalStimErrorgenLabel(self.errorgen_type,new_basis_labels),weightmod*weight) diff --git a/pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py b/pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py index ca07bdb58..9d0eab4db 100644 --- a/pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py +++ b/pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py @@ -1,5 +1,5 @@ -from pygsti.extras.errorgenpropagation.localstimerrorgen import localstimerrorgen +from pygsti.extras.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from numpy import conjugate ''' @@ -24,50 +24,50 @@ def labelMultiply(P1,P2): if ErG1.getType()=='H' and ErG2.getType()=='H': pVec=com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) - errorGens.append( localstimerrorgen( 'H' , [pVec[1]] , -1j*wT *pVec[0] ) ) + errorGens.append( _LSE( 'H' , [pVec[1]] , -1j*wT *pVec[0] ) ) elif ErG1.getType()=='H' and ErG2.getType()=='S': pVec=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen( 'C' , [ErG2.basis_element_labels[0] , pVec[1]] , 1j*wT*pVec[0] ) ) + errorGens.append( _LSE( 'C' , [ErG2.basis_element_labels[0] , pVec[1]] , 1j*wT*pVec[0] ) ) elif ErG1.getType()=='S' and ErG2.getType()=='H': pVec=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen( 'C' , [ErG2.basis_element_labels[0] , pVec[1]] , -1j*wT *pVec[0] ) ) + errorGens.append( _LSE( 'C' , [ErG2.basis_element_labels[0] , pVec[1]] , -1j*wT *pVec[0] ) ) elif ErG1.getType()=='H' and ErG2.getType()=='C': pVec1=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen('C' , [pVec1[1], ErG2.basis_element_labels[1]] , 1j*wT*pVec1[0] ) ) + errorGens.append( _LSE('C' , [pVec1[1], ErG2.basis_element_labels[1]] , 1j*wT*pVec1[0] ) ) pVec2=com(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen('C' , [pVec2[1] , ErG2.basis_element_labels[0]] , 1j*wT*pVec2[0] ) ) + errorGens.append( _LSE('C' , [pVec2[1] , ErG2.basis_element_labels[0]] , 1j*wT*pVec2[0] ) ) elif ErG1.getType()=='C' and ErG2.getType()=='H': errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) elif ErG1.getType()=='H' and ErG2.getType()=='A': pVec1 = com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) - errorGens.append( localstimerrorgen('A' , [pVec1[1] , ErG2.basis_element_labels[1]] , -1j*wT*pVec1[0]) ) + errorGens.append( _LSE('A' , [pVec1[1] , ErG2.basis_element_labels[1]] , -1j*wT*pVec1[0]) ) pVec2 = com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) - errorGens.append( localstimerrorgen('A' , [ErG2.basis_element_labels[0], pVec2[1]] , -1j*wT*pVec2[0] ) ) + errorGens.append( _LSE('A' , [ErG2.basis_element_labels[0], pVec2[1]] , -1j*wT*pVec2[0] ) ) elif ErG1.getType()=='A' and ErG2.getType()=='H': errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) elif ErG1.getType()=='S' and ErG2.getType()=='S': - errorGens.append( localstimerrorgen('H', ErG1.basis_element_labels[0],0 )) + errorGens.append( _LSE('H', ErG1.basis_element_labels[0],0 )) elif ErG1.getType()=='S' and ErG2.getType()=='C': pVec1=labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) pVec2=labelMultiply(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen( 'A' , [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE( 'A' , [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) pVec1 = labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) pVec2 = labelMultiply(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) pVec1 =acom(ErG2.basis_element_labels[0], ErG2.basis_element_labels[1]) pVec2 = labelMultiply(pVec1[1],ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen( 'A' ,[pVec2[1], ErG1.basis_element_labels[0]] , -1j*.5*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A' ,[pVec2[1], ErG1.basis_element_labels[0]] , -1j*.5*wT*pVec1[0]*pVec2[0])) pVec1=acom(ErG2.basis_element_labels[0], ErG2.basis_element_labels[1]) pVec2=labelMultiply(ErG1.basis_element_labels[0],pVec1[1]) - errorGens.append( localstimerrorgen( 'A', [ErG1.basis_element_labels[0] ,pVec2[1]],-1j*.5*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A', [ErG1.basis_element_labels[0] ,pVec2[1]],-1j*.5*wT*pVec1[0]*pVec2[0])) elif ErG1.getType() == 'C' and ErG2.getType() == 'S': errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) @@ -75,13 +75,13 @@ def labelMultiply(P1,P2): elif ErG1.getType() == 'S' and ErG2.getType() == 'A': pVec1 =labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) pVec2=labelMultiply(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen( 'C', [pVec1[1], pVec2[1]] ,1j*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE( 'C', [pVec1[1], pVec2[1]] ,1j*wT*pVec1[0]*pVec2[0] )) pVec1=labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) pVec2=labelMultiply(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( localstimerrorgen( 'C', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'C', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) pVec1 = com(ErG2.basis_element_labels[0] , ErG2.basis_element_labels[1]) pVec2 = com(ErG1.basis_element_labels[0],pVec1[1]) - errorGens.append( localstimerrorgen( 'A', [ErG1.basis_element_labels[0], pVec2[1]] ,-.5*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A', [ErG1.basis_element_labels[0], pVec2[1]] ,-.5*wT*pVec1[0]*pVec2[0])) elif ErG1.getType() == 'A' and ErG1.getType() == 'S': errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) @@ -93,32 +93,32 @@ def labelMultiply(P1,P2): Q=ErG2.basis_element_labels[1] pVec1 = labelMultiply(A,P) pVec2 =labelMultiply(Q,B) - errorGens.append( localstimerrorgen( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) pVec1 = labelMultiply(A,Q) pVec2 =labelMultiply(P,B) - errorGens.append( localstimerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) pVec1 = labelMultiply(B,P) pVec2 =labelMultiply(Q,A) - errorGens.append( localstimerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) pVec1 = labelMultiply(B,Q) pVec2 =labelMultiply(P,A) - errorGens.append( localstimerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) pVec1=acom(A,B) pVec2=com(P,pVec1[1]) - errorGens.append( localstimerrorgen( 'A' , [pVec2[1] , Q ], -.5*1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A' , [pVec2[1] , Q ], -.5*1j*wT*pVec1[0]*pVec2[0])) pVec1=acom(A,B) pVec2=com(Q,pVec1[1]) - errorGens.append( localstimerrorgen( 'A' , [pVec2[1], P] , -.5*1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A' , [pVec2[1], P] , -.5*1j*wT*pVec1[0]*pVec2[0])) pVec1=acom(P,Q) pVec2=com(pVec1[1],A) - errorGens.append( localstimerrorgen( 'A' , [pVec2[1] , B] , -.5*1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A' , [pVec2[1] , B] , -.5*1j*wT*pVec1[0]*pVec2[0])) pVec1=acom(P,Q) pVec2=com(pVec1[1],B) - errorGens.append( localstimerrorgen( 'A' , [pVec2[1] , A ] , -.5*1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE( 'A' , [pVec2[1] , A ] , -.5*1j*wT*pVec1[0]*pVec2[0])) pVec1=acom(A,B) pVec2=acom(P,Q) pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( localstimerrorgen( 'H', [pVec3[1]] ,.25*1j*wT*pVec1[0]*pVec2[0]*pVec3[0])) + errorGens.append( _LSE( 'H', [pVec3[1]] ,.25*1j*wT*pVec1[0]*pVec2[0]*pVec3[0])) elif ErG1.getType() == 'C' and ErG2.getType() == 'A': A=ErG1.basis_element_labels[0] @@ -127,32 +127,32 @@ def labelMultiply(P1,P2): Q=ErG2.basis_element_labels[1] pVec1 = labelMultiply(A,P) pVec2 =labelMultiply(Q,B) - errorGens.append( localstimerrorgen('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) pVec1 = labelMultiply(A,Q) pVec2 =labelMultiply(P,B) - errorGens.append( localstimerrorgen('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) pVec1 = labelMultiply(B,P) pVec2 =labelMultiply(Q,A) - errorGens.append( localstimerrorgen('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) pVec1 = labelMultiply(P,A) pVec2 =labelMultiply(B,Q) - errorGens.append( localstimerrorgen('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) + errorGens.append( _LSE('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) pVec1 = com(P,Q) pVec2 =com(A,pVec1[1]) - errorGens.append( localstimerrorgen('A' , [pVec2[1] , B] , .5*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE('A' , [pVec2[1] , B] , .5*wT*pVec1[0]*pVec2[0] )) pVec1 = com(P,Q) pVec2 =com(B,pVec1[1]) - errorGens.append( localstimerrorgen('A' , [pVec2[1], A ], .5*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE('A' , [pVec2[1], A ], .5*wT*pVec1[0]*pVec2[0] )) pVec1 = acom(A,B) pVec2 =com(P,pVec1[1]) - errorGens.append( localstimerrorgen('C', [pVec2[1] , Q ], .5*1j*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE('C', [pVec2[1] , Q ], .5*1j*wT*pVec1[0]*pVec2[0] )) pVec1 = acom(A,B) pVec2 =com(Q,pVec1[1]) - errorGens.append( localstimerrorgen('C',[pVec2[1],P ],-.5*1j*wT*pVec1[0]*pVec2[0] )) + errorGens.append( _LSE('C',[pVec2[1],P ],-.5*1j*wT*pVec1[0]*pVec2[0] )) pVec1 = com(P,Q) pVec2 =acom(A,B) pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( localstimerrorgen('H',[pVec3[1]],-.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) + errorGens.append( _LSE('H',[pVec3[1]],-.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) elif ErG1.getType() == 'A' and ErG2.getType() == 'C': errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) @@ -164,32 +164,32 @@ def labelMultiply(P1,P2): Q=ErG2.basis_element_labels[1] pVec1=labelMultiply(Q,B) pVec2=labelMultiply(A,P) - errorGens.append(localstimerrorgen('A',[pVec1[1],pVec2[1]] ,-1j*wT*pVec1[0]*pVec2[0])) + errorGens.append(_LSE('A',[pVec1[1],pVec2[1]] ,-1j*wT*pVec1[0]*pVec2[0])) pVec1=labelMultiply(P,A) pVec2=labelMultiply(B,Q) - errorGens.append(localstimerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + errorGens.append(_LSE('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) pVec1=labelMultiply(B,P) pVec2=labelMultiply(Q,A) - errorGens.append(localstimerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + errorGens.append(_LSE('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) pVec1=labelMultiply(A,Q) pVec2=labelMultiply(P,B) - errorGens.append(localstimerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) + errorGens.append(_LSE('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) pVec1=com(P,Q) pVec2=com(B,pVec1[1]) - errorGens.append(localstimerrorgen('C',[pVec2[1],A],.5*wT*pVec1[0]*pVec2[0])) + errorGens.append(_LSE('C',[pVec2[1],A],.5*wT*pVec1[0]*pVec2[0])) pVec1=com(P,Q) pVec2=com(A,pVec1[1]) - errorGens.append(localstimerrorgen('C',[pVec2[1],B] ,-.5*wT*pVec1[0]*pVec2[0])) + errorGens.append(_LSE('C',[pVec2[1],B] ,-.5*wT*pVec1[0]*pVec2[0])) pVec1=com(A,B) pVec2=com(P,pVec1[1]) - errorGens.append(localstimerrorgen('C', [pVec2[1],Q] ,.5*wT*pVec1[0]*pVec2[0])) + errorGens.append(_LSE('C', [pVec2[1],Q] ,.5*wT*pVec1[0]*pVec2[0])) pVec1=com(A,B) pVec2=com(Q,pVec1[1]) - errorGens.append(localstimerrorgen('C', [pVec2[1],P] ,-.5*wT*pVec1[0]*pVec2[0])) + errorGens.append(_LSE('C', [pVec2[1],P] ,-.5*wT*pVec1[0]*pVec2[0])) pVec1=com(P,Q) pVec2=com(A,B) pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( localstimerrorgen('H',[pVec3[1]] ,.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) + errorGens.append( _LSE('H',[pVec3[1]] ,.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) return errorGens \ No newline at end of file diff --git a/pygsti/extras/lfh/__init__.py b/pygsti/extras/lfh/__init__.py new file mode 100644 index 000000000..6e09a0017 --- /dev/null +++ b/pygsti/extras/lfh/__init__.py @@ -0,0 +1,9 @@ +""" Low Frequency Hamiltonian Sub-package """ +#*************************************************************************************************** +# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** diff --git a/pygsti/extras/lfh/lfherrorgen.py b/pygsti/extras/lfh/lfherrorgen.py index bb1d19441..2fc373b5c 100644 --- a/pygsti/extras/lfh/lfherrorgen.py +++ b/pygsti/extras/lfh/lfherrorgen.py @@ -72,7 +72,7 @@ def __init__(self, h_means, otherlindbladparams, h_devs, lindblad_basis='auto', #We'll make the keys of the dictionary the index in h_means that the deviation corresponds to. self.dev_dict = h_devs - self.devs= _np.fromiter(h_devs.values()) + self.devs= _np.fromiter(h_devs.values(), dtype = _np.double) #set the random number generator used for sampling from a normal distribution. if rng is not None: diff --git a/setup.py b/setup.py index 7a7a46d80..74d2baf31 100644 --- a/setup.py +++ b/setup.py @@ -164,6 +164,8 @@ def setup_with_extensions(extensions=None): 'pygsti.extras.interpygate', 'pygsti.extras.crosstalk', 'pygsti.extras.devices', + 'pygsti.extras.lfh', + 'pygsti.extras.errorgenpropagation', 'pygsti.forwardsims', 'pygsti.io', 'pygsti.layouts', From 486b690b54f4336154b666c632991ddd532cba92 Mon Sep 17 00:00:00 2001 From: ashenmill <156946147+ashenmill@users.noreply.github.com> Date: Mon, 8 Jul 2024 09:57:54 -0600 Subject: [PATCH 010/102] Analytic Propagation Added Analytic Propagation --- .../errorpropagator_dev.py | 92 ++++++++++++++++++- .../errorgenpropagation/localstimerrorgen.py | 37 +++++++- pygsti/tools/internalgates.py | 2 + 3 files changed, 123 insertions(+), 8 deletions(-) diff --git a/pygsti/extras/errorgenpropagation/errorpropagator_dev.py b/pygsti/extras/errorgenpropagation/errorpropagator_dev.py index 68916d815..6a311e20b 100644 --- a/pygsti/extras/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/extras/errorgenpropagation/errorpropagator_dev.py @@ -1,10 +1,74 @@ import stim -from localstimerrorgen import * +from pygsti.extras.errorgenpropagation.localstimerrorgen import * from numpy import abs,zeros, complex128 from numpy.linalg import multi_dot from scipy.linalg import expm from pygsti.tools.internalgates import standard_gatenames_stim_conversions -from utilserrorgenpropagation import * +from pygsti.extras.errorgenpropagation.utilserrorgenpropagation import * +import copy as _copy + +def ErrorPropagatorAnalytic(circ,errorModel,ErrorLayerDef=False,startingErrors=None): + stim_layers=circ.convert_to_stim_tableau_layers() + + if startingErrors is None: + stim_layers.pop(0) + + propagation_layers=[] + while len(stim_layers)>0: + top_layer=stim_layers.pop(0) + for layer in stim_layers: + top_layer = layer*top_layer + propagation_layers.append(top_layer) + + if not ErrorLayerDef: + errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) + else: + errorLayers=[[_copy.deepcopy(eg) for eg in errorModel] for i in range(circ.depth)] + + if not startingErrors is None: + errorLayers.insert(0,startingErrors) + + fully_propagated_layers=[] + for (idx,layer) in enumerate(errorLayers): + new_error_dict=dict() + if idx Date: Tue, 9 Jul 2024 14:37:31 -0600 Subject: [PATCH 011/102] Update lfhforwardsims.py --- pygsti/extras/lfh/lfhforwardsims.py | 86 +++++++++++++++++++++++++---- 1 file changed, 75 insertions(+), 11 deletions(-) diff --git a/pygsti/extras/lfh/lfhforwardsims.py b/pygsti/extras/lfh/lfhforwardsims.py index c89bd8b06..315217080 100644 --- a/pygsti/extras/lfh/lfhforwardsims.py +++ b/pygsti/extras/lfh/lfhforwardsims.py @@ -22,7 +22,7 @@ from pygsti.forwardsims import MatrixForwardSimulator as _MatrixForwardSimulator from pygsti.evotypes import Evotype as _Evotype from pygsti.extras.lfh.lfherrorgen import LFHLindbladErrorgen as _LFHLindbladErrorgen - +import pygsti.tools.slicetools as _slct from pygsti.forwardsims import ForwardSimulator as _ForwardSimulator @@ -422,8 +422,8 @@ def add_helper_sim(self): if self.model is not None: self.helper_sim = _MatrixForwardSimulator(model=self.model) - def create_layout(self, bulk_circuit_list, dataset, resource_alloc, - array_types, verbosity=1): + def create_layout(self, bulk_circuit_list, dataset=None, resource_alloc=None, + array_types=(), verbosity=1): if self.helper_sim is None: self.add_helper_sim() @@ -561,7 +561,7 @@ def sigma_points(self): if isinstance(subop, _ExpErrorgenOp): if isinstance(subop.errorgen, _LFHLindbladErrorgen): dev_values.extend(subop.errorgen.devs) - mean_values.extend(subop.errorgen.means) + mean_values.extend([subop.errorgen.means[i] for i in subop.errorgen.dev_dict.keys()]) num_deviances += len(subop.errorgen.devs) #Now construct the set of points and weights: @@ -635,7 +635,8 @@ def bulk_probs(self, circuits, clip_to=None, resource_alloc=None, smartc=None, r for subop in op.factorops: if isinstance(subop, _ExpErrorgenOp): if isinstance(subop.errorgen, _LFHLindbladErrorgen): - hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + hamiltonian_model_indices.extend([op.gpindices.start+i for i in subop.errorgen.dev_dict.keys()]) + #hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) orig_vec = self.model.to_vector() @@ -764,8 +765,8 @@ def add_helper_sim(self): if self.model is not None: self.helper_sim = _MatrixForwardSimulator(model=self.model) - def create_layout(self, bulk_circuit_list, dataset, resource_alloc, - array_types, verbosity=1): + def create_layout(self, bulk_circuit_list, dataset=None, resource_alloc=None, + array_types=(), verbosity=1): if self.helper_sim is None: self.add_helper_sim() @@ -787,7 +788,8 @@ def bulk_fill_probs(self, array_to_fill, layout): for subop in op.factorops: if isinstance(subop, _ExpErrorgenOp): if isinstance(subop.errorgen, _LFHLindbladErrorgen): - hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + hamiltonian_model_indices.extend([op.gpindices.start+i for i in subop.errorgen.dev_dict.keys()]) + #hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) orig_vec = self.model.to_vector() @@ -827,9 +829,7 @@ def bulk_fill_probs(self, array_to_fill, layout): array_to_fill[:]= averaged_array #return averaged_array - #Next I need a version of bulk_fill_dprobs: - def bulk_fill_dprobs(self, array_to_fill, layout, pr_array_to_fill=None): eps = 1e-7 # hardcoded? @@ -858,4 +858,68 @@ def bulk_fill_dprobs(self, array_to_fill, layout, pr_array_to_fill=None): self.model.from_vector(orig_vec) #print('dprobs: ', array_to_fill) - #return dprobs \ No newline at end of file + #return dprobs + + #add a version of bulk_fill_hprobs + + def bulk_fill_hprobs(self, array_to_fill, layout, + pr_array_to_fill=None, deriv1_array_to_fill=None, + deriv2_array_to_fill=None): + """ + Compute the outcome probability-Hessians for an entire list of circuits. + + Similar to `bulk_fill_probs(...)`, but fills a 3D array with + the Hessians for each circuit outcome probability. + + Parameters + ---------- + array_to_fill : numpy ndarray + an already-allocated numpy array of shape `(len(layout),M1,M2)` where + `M1` and `M2` are the number of selected model parameters (by `wrt_filter1` + and `wrt_filter2`). + + layout : CircuitOutcomeProbabilityArrayLayout + A layout for `array_to_fill`, describing what circuit outcome each + element corresponds to. Usually given by a prior call to :meth:`create_layout`. + + pr_mx_to_fill : numpy array, optional + when not None, an already-allocated length-`len(layout)` numpy array that is + filled with probabilities, just as in :meth:`bulk_fill_probs`. + + deriv1_array_to_fill : numpy array, optional + when not None, an already-allocated numpy array of shape `(len(layout),M1)` + that is filled with probability derivatives, similar to + :meth:`bulk_fill_dprobs` (see `array_to_fill` for a definition of `M1`). + + deriv2_array_to_fill : numpy array, optional + when not None, an already-allocated numpy array of shape `(len(layout),M2)` + that is filled with probability derivatives, similar to + :meth:`bulk_fill_dprobs` (see `array_to_fill` for a definition of `M2`). + + Returns + ------- + None + """ + + if pr_array_to_fill is not None: + self.bulk_fill_probs(pr_array_to_fill, layout) + if deriv1_array_to_fill is not None: + self.bulk_fill_dprobs(deriv1_array_to_fill, layout) + dprobs = deriv1_array_to_fill.copy() + if deriv2_array_to_fill is not None: + deriv2_array_to_fill[:, :] = deriv1_array_to_fill[:, :] + + eps = 1e-4 # hardcoded? + dprobs = _np.empty((len(layout), self.model.num_params), 'd') + self.bulk_fill_dprobs(dprobs, layout) + + dprobs2 = _np.empty((len(layout), self.model.num_params), 'd') + + orig_vec = self.model.to_vector().copy() + for i in range(self.model.num_params): + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + self.bulk_fill_dprobs(dprobs2, layout) + array_to_fill[:, i, :] = (dprobs2 - dprobs) / eps + self.model.from_vector(orig_vec, close=True) From 0855e6282560627b983b38c70322540f91cd72c1 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Thu, 18 Jul 2024 13:49:09 -0600 Subject: [PATCH 012/102] Relocate error generator propagation code Relocate the error generator propagation code from extras into a proper pyGSTi submodule. --- pygsti/{extras => }/errorgenpropagation/__init__.py | 0 pygsti/{extras => }/errorgenpropagation/errordict_deprecated.py | 0 pygsti/{extras => }/errorgenpropagation/errorpropagator.py | 0 pygsti/{extras => }/errorgenpropagation/errorpropagator_dev.py | 0 pygsti/{extras => }/errorgenpropagation/localstimerrorgen.py | 0 pygsti/{extras => }/errorgenpropagation/propagatableerrorgen.py | 0 .../errorgenpropagation/utilserrorgenpropagation.py | 0 .../errorgenpropagation/utilspygstistimtranslator.py | 0 setup.py | 2 +- 9 files changed, 1 insertion(+), 1 deletion(-) rename pygsti/{extras => }/errorgenpropagation/__init__.py (100%) rename pygsti/{extras => }/errorgenpropagation/errordict_deprecated.py (100%) rename pygsti/{extras => }/errorgenpropagation/errorpropagator.py (100%) rename pygsti/{extras => }/errorgenpropagation/errorpropagator_dev.py (100%) rename pygsti/{extras => }/errorgenpropagation/localstimerrorgen.py (100%) rename pygsti/{extras => }/errorgenpropagation/propagatableerrorgen.py (100%) rename pygsti/{extras => }/errorgenpropagation/utilserrorgenpropagation.py (100%) rename pygsti/{extras => }/errorgenpropagation/utilspygstistimtranslator.py (100%) diff --git a/pygsti/extras/errorgenpropagation/__init__.py b/pygsti/errorgenpropagation/__init__.py similarity index 100% rename from pygsti/extras/errorgenpropagation/__init__.py rename to pygsti/errorgenpropagation/__init__.py diff --git a/pygsti/extras/errorgenpropagation/errordict_deprecated.py b/pygsti/errorgenpropagation/errordict_deprecated.py similarity index 100% rename from pygsti/extras/errorgenpropagation/errordict_deprecated.py rename to pygsti/errorgenpropagation/errordict_deprecated.py diff --git a/pygsti/extras/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py similarity index 100% rename from pygsti/extras/errorgenpropagation/errorpropagator.py rename to pygsti/errorgenpropagation/errorpropagator.py diff --git a/pygsti/extras/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py similarity index 100% rename from pygsti/extras/errorgenpropagation/errorpropagator_dev.py rename to pygsti/errorgenpropagation/errorpropagator_dev.py diff --git a/pygsti/extras/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py similarity index 100% rename from pygsti/extras/errorgenpropagation/localstimerrorgen.py rename to pygsti/errorgenpropagation/localstimerrorgen.py diff --git a/pygsti/extras/errorgenpropagation/propagatableerrorgen.py b/pygsti/errorgenpropagation/propagatableerrorgen.py similarity index 100% rename from pygsti/extras/errorgenpropagation/propagatableerrorgen.py rename to pygsti/errorgenpropagation/propagatableerrorgen.py diff --git a/pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py b/pygsti/errorgenpropagation/utilserrorgenpropagation.py similarity index 100% rename from pygsti/extras/errorgenpropagation/utilserrorgenpropagation.py rename to pygsti/errorgenpropagation/utilserrorgenpropagation.py diff --git a/pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py b/pygsti/errorgenpropagation/utilspygstistimtranslator.py similarity index 100% rename from pygsti/extras/errorgenpropagation/utilspygstistimtranslator.py rename to pygsti/errorgenpropagation/utilspygstistimtranslator.py diff --git a/setup.py b/setup.py index 74d2baf31..a5ad82a99 100644 --- a/setup.py +++ b/setup.py @@ -146,6 +146,7 @@ def setup_with_extensions(extensions=None): 'pygsti.circuits.circuitparser', 'pygsti.data', 'pygsti.drivers', + 'pygsti.errorgenpropagation', 'pygsti.evotypes', 'pygsti.evotypes.densitymx', 'pygsti.evotypes.densitymx_slow', @@ -165,7 +166,6 @@ def setup_with_extensions(extensions=None): 'pygsti.extras.crosstalk', 'pygsti.extras.devices', 'pygsti.extras.lfh', - 'pygsti.extras.errorgenpropagation', 'pygsti.forwardsims', 'pygsti.io', 'pygsti.layouts', From 3eda8c0b608165d25869bb50c2aca12d4f8a24a2 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 23 Oct 2024 22:14:21 -0600 Subject: [PATCH 013/102] Initial Commit For Refactored/Reworked Error Generator Propagation First real commit updating the implementation of the error generator propagation code. Still a big mess at the moment, but there are enough folks wanting to use it even as it currently is that it makes sense to make this available while still partially broken and in development flux. --- pygsti/baseobjs/errorgenbasis.py | 669 ++++++++++-------- pygsti/baseobjs/errorgenlabel.py | 44 ++ pygsti/baseobjs/statespace.py | 54 +- .../errorpropagator_dev.py | 633 ++++++++++++++++- .../errorgenpropagation/localstimerrorgen.py | 182 +++-- .../propagatableerrorgen.py | 2 +- .../utilserrorgenpropagation.py | 13 +- pygsti/models/model.py | 2 +- pygsti/tools/errgenproptools.py | 433 ++++++++++++ pygsti/tools/jamiolkowski.py | 2 +- pygsti/tools/optools.py | 31 - 11 files changed, 1688 insertions(+), 377 deletions(-) create mode 100644 pygsti/tools/errgenproptools.py diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 8f254198e..3c29016f4 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -12,10 +12,11 @@ import numpy as _np import itertools as _itertools -import collections as _collections from pygsti.baseobjs import Basis as _Basis -from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel,\ +LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel + from pygsti.tools import optools as _ot @@ -28,50 +29,175 @@ class ElementaryErrorgenBasis(object): """ def label_indices(self, labels, ok_if_missing=False): - """ TODO: docstring """ + """ + Return a list of indices into this basis's label list + for the specifed list of `ElementaryErrorgenLabels`. + + Parameters + ---------- + labels : list of `ElementaryErrorgenLabel` + A list of elementary error generator labels to extract the + indices of. + + ok_if_missing : bool + If True, then returns `None` instead of an integer when the given label is not present + """ return [self.label_index(lbl, ok_if_missing) for lbl in labels] def __len__(self): - """ Number of elementary errorgen elements in this basis """ + """ + Number of elementary errorgen elements in this basis. + """ return len(self.labels) - +#helper function for checking label types. +def _all_elements_same_type(lst): + if not lst: # Check if the list is empty + return True # An empty list can be considered to have all elements of the same type + + first_type = type(lst[0]) # Get the type of the first element + for element in lst: + if type(element) != first_type: + return False + return True + +#TODO: Unit Testing class ExplicitElementaryErrorgenBasis(ElementaryErrorgenBasis): + """ + This basis object contains the information necessary for building, + storing and accessing a set of explicitly represented basis elements for a user + specified set of of elementary error generators. + """ + + def __init__(self, state_space, labels, basis_1q=None): + """ + Instantiate a new explicit elementary error generator basis. - def __init__(self, state_space, labels, basis1q=None): - # TODO: docstring - labels must be of form (sslbls, elementary_errorgen_lbl) - self._labels = tuple(labels) if not isinstance(labels, tuple) else labels - self._label_indices = _collections.OrderedDict([(lbl, i) for i, lbl in enumerate(self._labels)]) - self.basis_1q = basis1q if (basis1q is not None) else _Basis.cast('pp', 4) + Parameters + ---------- + state_space : `StateSpace` + An object describing the struture of the entire state space upon which the elements + of this error generator basis act. + + labels : list or tuple of `ElementaryErrorgenLabel` + A list of elementary error generator labels for which basis elements will be + constructed. + + basis1q : `Basis` or str, optional (default None) + A `Basis` object, or str which can be cast to one + corresponding to the single-qubit basis elements which + comprise the basis element labels for the values of the + `ElementaryErrorgenLabels` in `labels`. + """ + labels = tuple(labels) + + #add an assertion that the labels are ElementaryErrorgenLabels and that all of the labels are the same type. + msg = '`labels` should be either LocalElementaryErrorgenLabel or GlobalElementaryErrorgenLabel objects.' + if labels: + assert isinstance(labels[0], (_GlobalElementaryErrorgenLabel, _LocalElementaryErrorgenLabel)), msg + assert _all_elements_same_type(labels), 'All of the elementary error generator labels should be of the same type.' + + self._labels = labels + self._label_indices = {lbl: i for i, lbl in enumerate(self._labels)} + + if isinstance(basis_1q, _Basis): + self._basis_1q = basis_1q + elif isinstance(basis_1q, str): + self._basis_1q = _Basis.cast(basis_1q, 4) + else: + self._basis_1q = _Basis.cast('pp', 4) self.state_space = state_space assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" sslbls = self.state_space.sole_tensor_product_block_labels # all the model's state space labels self.sslbls = sslbls # the "support" of this space - the qubit labels - self._cached_elements = None + + #Caching + self._cached_matrices = None + self._cached_dual_matrices = None + self._cached_supports = None @property def labels(self): return self._labels + + @property + def elemgen_supports(self): + """ + Returns a tuple of tuples, each corresponding to the support + of the elementary error generators in this basis, returned in + the same order as they appear in `labels`. + """ + if self._cached_supports is None: + if isinstance(self._labels[0], _GlobalElementaryErrorgenLabel): + self._cached_supports = tuple([elemgen_label.sslbls for elemgen_label in self._labels]) + #Otherwise these are LocalElementaryErrorgenLabels + else: + #LocalElementaryErrorgenLabel doesn't have a sslbls attribute indicating + #support like GlobalElementaryErrorgenLabel does, do index into the `sslbls` + #attribute for this object. + self._cached_supports = tuple([tuple([self.sslbls[i] for i in elemgen_label.support_indices()]) + for elemgen_label in self._labels]) + return self._cached_supports + + #TODO: The implementations of some of the following properties are the same as in + #CompleteElementaryErrorgen, refactor some of this into the parent class. + @property + def elemgen_dual_matrices(self): + """ + Returns a tuple of matrices, each corresponding to the + of the matrix representation of the dual elementary error generators + in this basis, returned in the same order as they appear in `labels`. + """ + if self._cached_dual_matrices is None: + self._cached_dual_matrices = tuple([_ot.create_elementary_errorgen_nqudit_dual( + elemgen_label.errorgen_type, elemgen_label.basis_element_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True) for elemgen_label in self._labels]) + return self._cached_dual_matrices + + @property + def elemgen_matrices(self): + """ + Returns a tuple of matrices, each corresponding to the + of the matrix representation of the elementary error generators + in this basis, returned in the same order as they appear in `labels`. + """ + if self._cached_matrices is None: + self._cached_matrices = tuple([_ot.create_elementary_errorgen_nqudit( + elemgen_label.errorgen_type, elemgen_label.basis_element_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True) for elemgen_label in self._labels]) + return self._cached_matrices + + @property + def elemgen_supports_and_dual_matrices(self): + """ + Returns a tuple of tuples, each containing a tuple of support and a dual matrix representation + each corresponding to an elementary error generator in this basis, returned in the same + order as they appear in `labels`. + """ + return tuple(zip(self.elemgen_supports, self.elemgen_dual_matrices)) @property def elemgen_supports_and_matrices(self): - if self._cached_elements is None: - self._cached_elements = tuple( - ((elemgen_label.sslbls, _ot.lindblad_error_generator( - elemgen_label.errorgen_type, elemgen_label.basis_element_labels, - self.basis_1q, normalize=True, sparse=False, tensorprod_basis=True)) - for elemgen_label in self.labels)) - return self._cached_elements + """ + Returns a tuple of tuples, each containing a tuple of support and a matrix representation + each corresponding to an elementary error generator in this basis, returned in the same + order as they appear in `labels`. + """ + return tuple(zip(self.elemgen_supports, self.elemgen_matrices)) def label_index(self, label, ok_if_missing=False): """ - TODO: docstring + Return the index of the specified elementary error generator label + in this basis' `labels` list. Parameters ---------- - label - + label : `ElementaryErrorgenLabel` + Elementary error generator label to return index for. + ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. """ @@ -79,20 +205,23 @@ def label_index(self, label, ok_if_missing=False): return None return self._label_indices[label] - #@property - #def sslbls(self): - # """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ - # return self.sslbls - - def create_subbasis(self, must_overlap_with_these_sslbls): + def create_subbasis(self, sslbl_overlap): """ Create a sub-basis of this basis by including only the elements that overlap the given support (state space labels) + + Parameters + ---------- + sslbl_overlap : list of sslbls + A list of state space labels corresponding to qudits the support of + an error generator must overlap with (i.e. the support must include at least + one of these qudits) in order to be included in this subbasis. + """ - sub_sslbls = set(must_overlap_with_these_sslbls) + sub_sslbls = set(sslbl_overlap) def overlaps(sslbls): - ret = len(set(sslbls).intersection(must_overlap_with_these_sslbls)) > 0 + ret = len(set(sslbls).intersection(sslbl_overlap)) > 0 if ret: sub_sslbls.update(sslbls) # keep track of all overlaps return ret @@ -100,46 +229,67 @@ def overlaps(sslbls): if overlaps(lbl[0])]) sub_state_space = self.state_space.create_subspace(sub_sslbls) - return ExplicitElementaryErrorgenBasis(sub_state_space, sub_labels, self.basis_1q) + return ExplicitElementaryErrorgenBasis(sub_state_space, sub_labels, self._basis_1q) def union(self, other_basis): - present_labels = self._label_indices.copy() # an OrderedDict, indices don't matter here - if isinstance(other_basis, ExplicitElementaryErrorgenBasis): - present_labels.update(other_basis._label_indices) - else: - - for other_lbl in other_basis.labels: - if other_lbl not in present_labels: - present_labels[other_lbl] = True + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the union of + this basis with another. + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the union with. + """ + #assert that these two bases have compatible label types. + msg = 'Incompatible `ElementaryErrrogenLabel` types, the two `ElementaryErrorgenBasis` should have the same label type.' + assert type(self._labels[0]) == type(other_basis.labels[0]), msg + + #Get the union of the two bases labels. + union_labels = set(self._labels) | set(other_basis.labels) union_state_space = self.state_space.union(other_basis.state_space) - return ExplicitElementaryErrorgenBasis(union_state_space, tuple(present_labels.keys()), self.basis_1q) + return ExplicitElementaryErrorgenBasis(union_state_space, union_labels, self._basis_1q) def intersection(self, other_basis): - if isinstance(other_basis, ExplicitElementaryErrorgenBasis): - common_labels = tuple((lbl for lbl in self.labels if lbl in other_basis._label_indices)) - else: - other_labels = set(other_basis.labels) - common_labels = tuple((lbl for lbl in self.labels if lbl in other_labels)) + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the intersection of + this basis with another. + + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the intersection with. + """ + intersection_labels = set(self._labels) & set(other_basis.labels) intersection_state_space = self.state_space.intersection(other_basis.state_space) - return ExplicitElementaryErrorgenBasis(intersection_state_space, common_labels, self.basis_1q) + return ExplicitElementaryErrorgenBasis(intersection_state_space, intersection_labels, self._basis_1q) def difference(self, other_basis): - if isinstance(other_basis, ExplicitElementaryErrorgenBasis): - remaining_labels = tuple((lbl for lbl in self.labels if lbl not in other_basis._label_indices)) - else: - other_labels = set(other_basis.labels) - remaining_labels = tuple((lbl for lbl in self.labels if lbl not in other_labels)) + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the difference of + this basis with another. (i.e. A basis consisting of the labels contained in this basis + but not the other) - remaining_state_space = self.state_space # TODO: see if we can reduce this space based on remaining_labels? - return ExplicitElementaryErrorgenBasis(remaining_state_space, remaining_labels, self.basis_1q) + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the difference with. + """ + difference_labels = set(self._labels) - set(other_basis.labels) + #TODO: Making the state space equal to the true difference breaks some stuff in the FOGI code + #that relied on the old (kind of incorrect behavior). Revert back to old version temporarily. + #difference_state_space = self.state_space.difference(other_basis.state_space) + difference_state_space = self.state_space + return ExplicitElementaryErrorgenBasis(difference_state_space, difference_labels, self._basis_1q) class CompleteElementaryErrorgenBasis(ElementaryErrorgenBasis): """ - Spanned by the elementary error generators of given type(s) (e.g. "Hamiltonian" and/or "other") - and with elements corresponding to a `Basis`, usually of Paulis. + This basis object contains the information necessary for building, + storing and accessing a set of explicitly represented basis elements + for a basis of elementary error generators spanned by the elementary + error generators of given type(s) (e.g. "Hamiltonian" and/or "other"). """ @classmethod @@ -203,45 +353,22 @@ def _count_uptriangle_labels_for_support(cls, support, left_support, type_str, t return cnt - #UNUSED NOW - @classmethod - def _create_all_labels_for_support(cls, support, left_support, type_str, trivial_bel, nontrivial_bels): - n = len(support) # == weight - all_bels = trivial_bel + nontrivial_bels - left_weight = len(left_support) - if left_weight < n: # n1 < n - factors = [nontrivial_bels if x in left_support else trivial_bel for x in support] \ - + [all_bels if x in left_support else nontrivial_bels for x in support] - return [_GlobalElementaryErrorgenLabel(type_str, (''.join(beltup[0:n]), ''.join(beltup[n:])), support) - for beltup in _itertools.product(*factors)] - # (factors == left_factors + right_factors above) - else: # n1 == n - ret = [] - for left_beltup in _itertools.product(*([nontrivial_bels] * n)): # better itertools call here TODO - left_bel = ''.join(left_beltup) - right_it = _itertools.product(*([all_bels] * n)) # better itertools call here TODO - next(right_it) # advance past first (all I) element - assume trivial el = first!! - ret.extend([_GlobalElementaryErrorgenLabel(type_str, (left_bel, ''.join(right_beltup)), support) - for right_beltup in right_it]) - return ret @classmethod def _create_ordered_labels(cls, type_str, basis_1q, state_space, - max_weight=None, must_overlap_with_these_sslbls=None, + max_weight=None, sslbl_overlap=None, include_offsets=False, initial_offset=0): offsets = {'BEGIN': initial_offset} labels = [] - #labels_by_support = _collections.OrderedDict() - #all_bels = basis_1q.labels[0:] trivial_bel = [basis_1q.labels[0]] nontrivial_bels = basis_1q.labels[1:] # assume first element is identity - if must_overlap_with_these_sslbls is not None and not isinstance(must_overlap_with_these_sslbls, set): - must_overlap_with_these_sslbls = set(must_overlap_with_these_sslbls) + if sslbl_overlap is not None and not isinstance(sslbl_overlap, set): + sslbl_overlap = set(sslbl_overlap) + assert(state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" + sslbls = state_space.sole_tensor_product_block_labels # all the model's state space labels if max_weight is None: - assert(state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" - sslbls = state_space.sole_tensor_product_block_labels # all the model's state space labels max_weight = len(sslbls) # Let k be len(nontrivial_bels) @@ -249,8 +376,8 @@ def _create_ordered_labels(cls, type_str, basis_1q, state_space, # --> for each set of n qubit labels, there are k^n Hamiltonian terms with weight n for weight in range(1, max_weight + 1): for support in _itertools.combinations(sslbls, weight): # NOTE: combinations *MUST* be deterministic - if (must_overlap_with_these_sslbls is not None - and len(must_overlap_with_these_sslbls.intersection(support)) == 0): + if (sslbl_overlap is not None + and len(sslbl_overlap.intersection(support)) == 0): continue offsets[support] = len(labels) + initial_offset labels.extend(cls._create_diag_labels_for_support(support, type_str, nontrivial_bels)) @@ -269,8 +396,8 @@ def _create_ordered_labels(cls, type_str, basis_1q, state_space, # (see _create_ordered_label_offsets) for weight in range(1, max_weight + 1): for support in _itertools.combinations(sslbls, weight): - if (must_overlap_with_these_sslbls is not None - and len(must_overlap_with_these_sslbls.intersection(support)) == 0): + if (sslbl_overlap is not None + and len(sslbl_overlap.intersection(support)) == 0): continue for left_weight in range(1, weight + 1): @@ -286,7 +413,7 @@ def _create_ordered_labels(cls, type_str, basis_1q, state_space, @classmethod def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, - max_weight=None, must_overlap_with_these_sslbls=None, + max_weight=None, sslbl_overlap=None, return_total_support=False, initial_offset=0): """ same as _create_ordered_labels but doesn't actually create the labels - just counts them to get offsets. """ offsets = {'BEGIN': initial_offset} @@ -297,12 +424,12 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, n1Q_nontrivial_bels = n1Q_bels - 1 # assume first element is identity total_support = set() - if must_overlap_with_these_sslbls is not None and not isinstance(must_overlap_with_these_sslbls, set): - must_overlap_with_these_sslbls = set(must_overlap_with_these_sslbls) + if sslbl_overlap is not None and not isinstance(sslbl_overlap, set): + sslbl_overlap = set(sslbl_overlap) + assert(state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" + sslbls = state_space.sole_tensor_product_block_labels # all the model's state space labels if max_weight is None: - assert(state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" - sslbls = state_space.sole_tensor_product_block_labels # all the model's state space labels max_weight = len(sslbls) # Let k be len(nontrivial_bels) @@ -310,8 +437,8 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, # --> for each set of n qubit labels, there are k^n Hamiltonian terms with weight n for weight in range(1, max_weight + 1): for support in _itertools.combinations(sslbls, weight): # NOTE: combinations *MUST* be deterministic - if (must_overlap_with_these_sslbls is not None - and len(must_overlap_with_these_sslbls.intersection(support)) == 0): + if (sslbl_overlap is not None + and len(sslbl_overlap.intersection(support)) == 0): continue offsets[support] = off + initial_offset off += n1Q_nontrivial_bels**weight @@ -320,8 +447,8 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, elif type_str in ('C', 'A'): for weight in range(1, max_weight + 1): for support in _itertools.combinations(sslbls, weight): - if (must_overlap_with_these_sslbls is not None - and len(must_overlap_with_these_sslbls.intersection(support)) == 0): + if (sslbl_overlap is not None + and len(sslbl_overlap.intersection(support)) == 0): continue total_support.update(support) @@ -337,45 +464,67 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, return (offsets, total_support) if return_total_support else offsets def __init__(self, basis_1q, state_space, elementary_errorgen_types=('H', 'S', 'C', 'A'), - max_ham_weight=None, max_other_weight=None, must_overlap_with_these_sslbls=None): - self._basis_1q = basis_1q + max_weights=None, sslbl_overlap=None): + """ + Parameters + ---------- + basis_1q : `Basis` or str, optional (default None) + A `Basis` object, or str which can be cast to one + corresponding to the single-qubit basis elements which + comprise the basis element labels for the values of the + `ElementaryErrorgenLabels` in `labels`. + + state_space : `StateSpace` + An object describing the struture of the entire state space upon which the elements + of this error generator basis act. + + elementary_errorgen_types : tuple of str, optional (default ('H', 'S', 'C', 'A')) + Tuple of strings designating elementary error generator types to include in this + basis. + + max_weights : dict, optional (default None) + A dictionary containing the maximum weight for each of the different error generator + types to include in the constructed basis. If None then + there is no maximum weight. If specified, any error generator + types without entries will have no maximum weight associated + with them. + + sslbl_overlap : list of sslbls, optional (default None) + A list of state space labels corresponding to qudits the support of + an error generator must overlap with (i.e. the support must include at least + one of these qudits) in order to be included in this basis. + """ + + if isinstance(basis_1q, _Basis): + self._basis_1q = basis_1q + elif isinstance(basis_1q, str): + self._basis_1q = _Basis.cast(basis_1q, 4) + else: + self._basis_1q = _Basis.cast('pp', 4) + self._elementary_errorgen_types = tuple(elementary_errorgen_types) # so works for strings like "HSCA" - #REMOVE self._other_mode = other_mode self.state_space = state_space - self._max_ham_weight = max_ham_weight - self._max_other_weight = max_other_weight - self._must_overlap_with_these_sslbls = must_overlap_with_these_sslbls + self.max_weights = max_weights if max_weights is not None else dict() + self._sslbl_overlap = sslbl_overlap assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" assert(all([eetyp in ('H', 'S', 'C', 'A') for eetyp in elementary_errorgen_types])), \ "Invalid elementary errorgen type in %s" % str(elementary_errorgen_types) - self._offsets = _collections.OrderedDict() + self._offsets = dict() present_sslbls = set() istart = 0 for eetyp in elementary_errorgen_types: self._offsets[eetyp], sup = self._create_ordered_label_offsets( eetyp, self._basis_1q, self.state_space, - (self._max_ham_weight if eetyp == 'H' else self._max_other_weight), - self._must_overlap_with_these_sslbls, return_total_support=True, initial_offset=istart) + self.max_weights.get(eetyp, None), + self._sslbl_overlap, return_total_support=True, initial_offset=istart) present_sslbls = present_sslbls.union(sup) # set union istart = self._offsets[eetyp]['END'] -#TODO REMOVE -# self._h_offsets, hsup = self._create_ordered_label_offsets('H', self._basis_1q, self.state_space, -# 'diagonal', self._max_ham_weight, -# self._must_overlap_with_these_sslbls, -# return_total_support=True) -# self._hs_border = self._h_offsets['END'] -# self._s_offsets, ssup = self._create_ordered_label_offsets('S', self._basis_1q, self.state_space, -# other_mode, self._max_other_weight, -# self._must_overlap_with_these_sslbls, -# return_total_support=True) -# present_sslbls = hsup.union(ssup) # set union - #Note: state space can have additional labels that aren't in support - # (this is, I think, only true when must_overlap_with_these_sslbls != None) + # (this is, I think, only true when sslbl_overlap != None) sslbls = self.state_space.sole_tensor_product_block_labels # all the model's state space labels if set(sslbls) == present_sslbls: @@ -387,9 +536,10 @@ def __init__(self, basis_1q, state_space, elementary_errorgen_types=('H', 'S', ' # this should never happen - somehow the statespace doesn't have all the labels! assert(False), "Logic error! State space doesn't contain all of the present labels!!" - #FUTURE: cache these for speed? - but could just create an explicit basis which would be more transparent - #self._cached_labels = None - #self._cached_elements = None + self._cached_labels = None + self._cached_matrices = None + self._cached_dual_matrices = None + self._cached_supports = None # Notes on ordering of labels: # - let there be k nontrivial 1-qubit basis elements (usually k=3) @@ -420,49 +570,115 @@ def __len__(self): return self._offsets[self._elementary_errorgen_types[-1]]['END'] def to_explicit_basis(self): + """ + Creates a new `ExplicitElementaryErrorgenBasis` based on this Basis' elements. + """ return ExplicitElementaryErrorgenBasis(self.state_space, self.labels, self._basis_1q) + #TODO: Why can't this be done at initialization time? @property def labels(self): - labels = [] - for eetype in self._elementary_errorgen_types: - labels.extend(self._create_ordered_labels(eetype, self._basis_1q, self.state_space, - self._max_ham_weight if eetype == 'H' else self._max_other_weight, - self._must_overlap_with_these_sslbls)) - return tuple(labels) + if self._cached_labels is None: + labels = [] + for eetyp in self._elementary_errorgen_types: + labels.extend(self._create_ordered_labels(eetyp, self._basis_1q, self.state_space, + self.max_weights.get(eetyp, None), + self._sslbl_overlap)) + self._cached_labels = tuple(labels) + return self._cached_labels + + def sublabels(self, errorgen_type): + """ + Return a tuple of labels within this basis for the specified error generator + type (may be empty). + + Parameters + ---------- + errorgen_type : 'H', 'S', 'C' or 'A' + String specifying the error generator type to return the labels for. + + Returns + ------- + tuple of `GlobalElementaryErrorgenLabel` + """ + return self._create_ordered_labels(errorgen_type, self._basis_1q, self.state_space, + self.max_weights.get(errorgen_type, None), + self._sslbl_overlap) + @property - def elemgen_supports_and_dual_matrices(self): - return tuple(((elemgen_label.sslbls, - _ot.create_elementary_errorgen_nqudit_dual( + def elemgen_supports(self): + """ + Returns a tuple of tuples, each corresponding to the support + of the elementary error generators in this basis, returned in + the same order as they appear in `labels`. + """ + if self._cached_supports is None: + self._cached_supports = tuple([elemgen_label.sslbls for elemgen_label in self.labels]) + return self._cached_supports + + @property + def elemgen_dual_matrices(self): + """ + Returns a tuple of matrices, each corresponding to the + of the matrix representation of the dual elementary error generators + in this basis, returned in the same order as they appear in `labels`. + """ + if self._cached_dual_matrices is None: + self._cached_dual_matrices = tuple([_ot.create_elementary_errorgen_nqudit_dual( elemgen_label.errorgen_type, elemgen_label.basis_element_labels, self._basis_1q, normalize=False, sparse=False, - tensorprod_basis=True)) # Note: normalize was set to True... - for elemgen_label in self.labels)) - + tensorprod_basis=True) for elemgen_label in self.labels]) + return self._cached_dual_matrices + @property - def elemgen_supports_and_matrices(self): - return tuple(((elemgen_label.sslbls, - _ot.create_elementary_errorgen_nqudit( + def elemgen_matrices(self): + """ + Returns a tuple of matrices, each corresponding to the + of the matrix representation of the elementary error generators + in this basis, returned in the same order as they appear in `labels`. + """ + if self._cached_matrices is None: + self._cached_matrices = tuple([_ot.create_elementary_errorgen_nqudit( elemgen_label.errorgen_type, elemgen_label.basis_element_labels, self._basis_1q, normalize=False, sparse=False, - tensorprod_basis=True)) # Note: normalize was set to True... - for elemgen_label in self.labels)) + tensorprod_basis=True) for elemgen_label in self.labels]) + return self._cached_matrices + + @property + def elemgen_supports_and_dual_matrices(self): + """ + Returns a tuple of tuples, each containing a tuple of support and a dual matrix representation + each corresponding to an elementary error generator in this basis, returned in the same + order as they appear in `labels`. + """ + return tuple(zip(self.elemgen_supports, self.elemgen_dual_matrices)) - def label_index(self, elemgen_label, ok_if_missing=False): + @property + def elemgen_supports_and_matrices(self): + """ + Returns a tuple of tuples, each containing a tuple of support and a matrix representation + each corresponding to an elementary error generator in this basis, returned in the same + order as they appear in `labels`. + """ + return tuple(zip(self.elemgen_supports, self.elemgen_matrices)) + + def label_index(self, label, ok_if_missing=False): """ - TODO: docstring + Return the index of the specified elementary error generator label + in this basis' `labels` list. Parameters ---------- - elemgen_label + label : `ElementaryErrorgenLabel` + Elementary error generator label to return index for. ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. """ - support = elemgen_label.sslbls - eetype = elemgen_label.errorgen_type - bels = elemgen_label.basis_element_labels + support = label.sslbls + eetype = label.errorgen_type + bels = label.basis_element_labels trivial_bel = self._basis_1q.labels[0] # assumes first element is identity nontrivial_bels = self._basis_1q.labels[1:] @@ -489,26 +705,30 @@ def label_index(self, elemgen_label, ok_if_missing=False): else: raise ValueError("Invalid elementary errorgen type: %s" % str(eetype)) - return base + indices[elemgen_label] + return base + indices[label] + - #@property - #def sslbls(self): - # """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ - # return self.sslbls - - def create_subbasis(self, must_overlap_with_these_sslbls, retain_max_weights=True): + def create_subbasis(self, sslbl_overlap, retain_max_weights=True): """ Create a sub-basis of this basis by including only the elements that overlap the given support (state space labels) """ #Note: state_space is automatically reduced within __init__ when necessary, e.g., when - # `must_overlap_with_these_sslbls` is non-None and considerably reduces the basis. + # `sslbl_overlap` is non-None and considerably reduces the basis. return CompleteElementaryErrorgenBasis(self._basis_1q, self.state_space, self._elementary_errorgen_types, - self._max_ham_weight if retain_max_weights else None, - self._max_other_weight if retain_max_weights else None, - must_overlap_with_these_sslbls) + self.max_weights if retain_max_weights else None, + sslbl_overlap) def union(self, other_basis): + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the union of + this basis with another. + + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the union with. + """ # don't convert this basis to an explicit one unless it's necessary - # if `other_basis` is already an explicit basis then let it do the work. if isinstance(other_basis, ExplicitElementaryErrorgenBasis): @@ -517,132 +737,29 @@ def union(self, other_basis): return self.to_explicit_basis().union(other_basis) def intersection(self, other_basis): + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the intersection of + this basis with another. + + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the intersection with. + """ if isinstance(other_basis, ExplicitElementaryErrorgenBasis): return other_basis.intersection(self) else: return self.to_explicit_basis().intersection(other_basis) def difference(self, other_basis): - return self.to_explicit_basis().difference(other_basis) - + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the difference of + this basis with another. (i.e. A basis consisting of the labels contained in this basis + but not the other) -#OLD - maybe not needed? -#class LowWeightElementaryErrorgenBasis(ElementaryErrorgenBasis): -# """ -# Spanned by the elementary error generators of given type(s) (e.g. "Hamiltonian" and/or "other") -# and with elements corresponding to a `Basis`, usually of Paulis. -# """ -# -# def __init__(self, basis_1q, state_space, other_mode, max_ham_weight=None, max_other_weight=None, -# must_overlap_with_these_sslbls=None): -# self._basis_1q = basis_1q -# self._other_mode = other_mode -# self.state_space = state_space -# self._max_ham_weight = max_ham_weight -# self._max_other_weight = max_other_weight -# self._must_overlap_with_these_sslbls = must_overlap_with_these_sslbls -# -# assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" -# sslbls = self.state_space.sole_tensor_product_block_labels # all the model's state space labels -# self.sslbls = sslbls # the "support" of this space - the qubit labels -# -# self._cached_label_indices = None -# self._cached_labels_by_support = None -# self._cached_elements = None -# -# #Needed? -# # self.dim = len(self.labels) # TODO - update this so we don't always need to build labels -# # # (this defeats lazy building via property below) - we can just compute this, especially if -# # # not too fancy -# -# @property -# def labels(self): -# if self._cached_label_indices is None: -# -# def _basis_el_strs(possible_bels, wt): -# for els in _itertools.product(*([possible_bels] * wt)): -# yield ''.join(els) -# -# labels = {} -# all_bels = self.basis_1q.labels[1:] # assume first element is identity -# nontrivial_bels = self.basis_1q.labels[1:] # assume first element is identity -# -# max_weight = self._max_ham_weight if (self._max_ham_weight is not None) else len(self.sslbls) -# for weight in range(1, max_weight + 1): -# for support in _itertools.combinations(self.sslbls, weight): -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(support)) == 0): -# continue -# if support not in labels: labels[support] = [] # always True? -# labels[support].extend([('H', bel) for bel in _basis_el_strs(nontrivial_bels, weight)]) -# -# max_weight = self._max_other_weight if (self._max_other_weight is not None) else len(self.sslbls) -# if self._other_mode != "all": -# for weight in range(1, max_weight + 1): -# for support in _itertools.combinations(self.sslbls, weight): -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(support)) == 0): -# continue -# if support not in labels: labels[support] = [] -# labels[support].extend([('S', bel) for bel in _basis_el_strs(nontrivial_bels, weight)]) -# else: -# #This is messy code that relies on basis labels being single characters -- TODO improve(?) -# idle_char = self.basis_1q.labels[1:] # assume first element is identity -# assert(len(idle_char) == 1 and all([len(c) == 1 for c in nontrivial_bels])), \ -# "All basis el labels must be single chars for other_mode=='all'!" -# for support in _itertools.combinations(self.sslbls, max_weight): -# # Loop over all possible basis elements for this max-weight support, computing the actual support -# # of each one individually and appending it to the appropriate list -# for bel1 in _basis_el_strs(all_bels, max_weight): -# nonidle_indices1 = [i for i in range(max_weight) if bel1[i] != idle_char] -# for bel2 in _basis_el_strs(all_bels, max_weight): -# nonidle_indices2 = [i for i in range(max_weight) if bel2[i] != idle_char] -# nonidle_indices = list(sorted(set(nonidle_indices1) + set(nonidle_indices2))) -# bel1 = ''.join([bel1[i] for i in nonidle_indices]) # trim to actual support -# bel2 = ''.join([bel2[i] for i in nonidle_indices]) # trim to actual support -# actual_support = tuple([support[i] for i in nonidle_indices]) -# -# if (self._must_overlap_with_these_sslbls is not None -# and len(self._must_overlap_with_these_sslbls.intersection(actual_support)) == 0): -# continue -# -# if actual_support not in labels: labels[actual_support] = [] -# labels[actual_support].append(('S', bel1, bel2)) -# -# self._cached_labels_by_support = labels -# self._cached_label_indices = _collections.OrderedDict(((support_lbl, i) for i, support_lbl in enumerate( -# ((support, lbl) for support, lst in labels.items() for lbl in lst)))) -# -# return tuple(self._cached_label_indices.keys()) -# -# @property -# def element_supports_and_matrices(self): -# if self._cached_elements is None: -# self._cached_elements = tuple( -# ((support, _ot.lindblad_error_generator(elemgen_label, self.basis_1q, normalize=True, sparse=False)) -# for support, elemgen_label in self.labels)) -# return self._cached_elements -# -# def element_index(self, label): -# """ -# TODO: docstring -# """ -# if self._cached_label_indices is None: -# self.labels # triggers building of labels -# return self._cached_label_indices[label] -# -# @property -# def sslbls(self): -# """ The support of this errorgen space, e.g., the qubits where its elements may be nontrivial """ -# return self.sslbls -# -# def create_subbasis(self, must_overlap_with_these_sslbls, retain_max_weights=True): -# """ -# Create a sub-basis of this basis by including only the elements -# that overlap the given support (state space labels) -# """ -# #Note: can we reduce self.state_space? -# return CompleteErrorgenBasis(self._basis_1q, self.state_space, self._other_mode, -# self._max_ham_weight if retain_max_weights else None, -# self._max_other_weight if retain_max_weights else None, -# self._must_overlap_with_these_sslbls) + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the difference with. + """ + return self.to_explicit_basis().difference(other_basis) diff --git a/pygsti/baseobjs/errorgenlabel.py b/pygsti/baseobjs/errorgenlabel.py index c83781907..19d0cff49 100644 --- a/pygsti/baseobjs/errorgenlabel.py +++ b/pygsti/baseobjs/errorgenlabel.py @@ -65,6 +65,19 @@ def cast(cls, obj, sslbls=None, identity_label='I'): raise ValueError("Cannot convert %s to a local elementary errorgen label!" % str(obj)) def __init__(self, errorgen_type, basis_element_labels): + """ + Parameters + ---------- + errorgen_type : str + A string corresponding to the error generator sector this error generator label is + an element of. Allowed values are 'H', 'S', 'C' and 'A'. + + basis_element_labels : tuple or list + A list or tuple of strings labeling basis elements used to label this error generator. + This is either length-1 for 'H' and 'S' type error generators, or length-2 for 'C' and 'A' + type. + """ + self.errorgen_type = str(errorgen_type) self.basis_element_labels = tuple(basis_element_labels) @@ -80,6 +93,16 @@ def __str__(self): def __repr__(self): return str((self.errorgen_type, self.basis_element_labels)) + + def support_indices(self, identity_label='I'): + """ + Returns a sorted tuple of the elements of indices of the nontrivial basis + element label entries for this label. + """ + nonidentity_indices = [i for i in range(len(self.basis_element_labels[0])) + if any([bel[i] != identity_label for bel in self.basis_element_labels])] + + return tuple(nonidentity_indices) class GlobalElementaryErrorgenLabel(ElementaryErrorgenLabel): @@ -132,6 +155,27 @@ def cast(cls, obj, sslbls=None, identity_label='I'): raise ValueError("Cannot convert %s to a global elementary errorgen label!" % str(obj)) def __init__(self, errorgen_type, basis_element_labels, sslbls, sort=True): + """ + Parameters + ---------- + errorgen_type : str + A string corresponding to the error generator sector this error generator label is + an element of. Allowed values are 'H', 'S', 'C' and 'A'. + + basis_element_labels : tuple or list + A list or tuple of strings labeling basis elements used to label this error generator. + This is either length-1 for 'H' and 'S' type error generators, or length-2 for 'C' and 'A' + type. + + sslbls : tuple or list + A tuple or list of state space labels corresponding to the qudits upon which this error generator + is supported. + + sort : bool, optional (default True) + If True then the input state space labels are first sorted, and then the used basis element labels + are sorted to match the order to the newly sorted state space labels. + """ + if sort: sorted_indices, sslbls = zip(*sorted(enumerate(sslbls), key=lambda x: x[1])) basis_element_labels = [''.join([bel[i] for i in sorted_indices]) for bel in basis_element_labels] diff --git a/pygsti/baseobjs/statespace.py b/pygsti/baseobjs/statespace.py index 4a358a35f..a4cbe676e 100644 --- a/pygsti/baseobjs/statespace.py +++ b/pygsti/baseobjs/statespace.py @@ -469,7 +469,7 @@ def intersection(self, other_state_space): other_udim = other_state_space.label_udimension(lbl) other_typ = other_state_space.label_type(lbl) if other_iTPB != iTPB or other_udim != udim or other_typ != typ: - raise ValueError(("Cannot take state space union: repeated label '%s' has inconsistent index," + raise ValueError(("Cannot take state space intersection: repeated label '%s' has inconsistent index," " dim, or type!") % str(lbl)) ret_lbls.append(lbl) ret_udims.append(udim) @@ -534,6 +534,58 @@ def union(self, other_state_space): ret_tpb_types[iTPB].append(typ) return ExplicitStateSpace(ret_tpb_labels, ret_tpb_udims, ret_tpb_types) + + + def difference(self, other_state_space): + """ + Create a state space whose labels are the difference of the labels of this space and one other. + I.e. a state space containing the labels of this space which don't appear in the other. + + Dimensions associated with the labels are preserved, as is the tensor product block index. + If the two spaces have the same label, but their dimensions or indices do not agree, an + error is raised. + + Parameters + ---------- + other_state_space : StateSpace + The other state space. + + Returns + ------- + StateSpace + """ + ret_tpb_labels = [] + ret_tpb_udims = [] + ret_tpb_types = [] + + for iTPB, (lbls, udims, typs) in enumerate(zip(self.tensor_product_blocks_labels, + self.tensor_product_blocks_udimensions, + self.tensor_product_blocks_types)): + ret_lbls = []; ret_udims = []; ret_types = [] + for lbl, udim, typ in zip(lbls, udims, typs): + #If the label does appear in the other state space, verify that the + #properties of the label are consistently defined accross the two state spaces + #otherwise raise an error. + if other_state_space.contains_label(lbl): + other_iTPB = other_state_space.label_tensor_product_block_index(lbl) + other_udim = other_state_space.label_udimension(lbl) + other_typ = other_state_space.label_type(lbl) + if other_iTPB != iTPB or other_udim != udim or other_typ != typ: + raise ValueError(("Cannot take state space difference: repeated label '%s' has inconsistent index," + " dim, or type!") % str(lbl)) + continue + #Otherwise add this to the state space. + else: + ret_lbls.append(lbl) + ret_udims.append(udim) + ret_types.append(typ) + + if len(ret_lbls) > 0: + ret_tpb_labels.append(ret_lbls) + ret_tpb_udims.append(ret_udims) + ret_tpb_types.append(ret_types) + + return ExplicitStateSpace(ret_tpb_labels, ret_tpb_udims, ret_tpb_types) def create_stencil_subspace(self, labels): """ diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 92c2bd880..09538dbc9 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -1,27 +1,636 @@ import stim -from pygsti.extras.errorgenpropagation.localstimerrorgen import * +import numpy as _np +import scipy.linalg as _spl +from .localstimerrorgen import LocalStimErrorgenLabel as _LSE from numpy import abs,zeros, complex128 from numpy.linalg import multi_dot from scipy.linalg import expm from pygsti.tools.internalgates import standard_gatenames_stim_conversions -from pygsti.extras.errorgenpropagation.utilserrorgenpropagation import * +from .utilserrorgenpropagation import * import copy as _copy +from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis +import pygsti.tools.errgenproptools as _eprop +import pygsti.tools.basistools as _bt class ErrorGeneratorPropagator: def __init__(self, model, multi_gate_dict=None, bch_order=1, - bch_layerwise=False, nonmarkovian=False, multi_gate=False, - error_layer_def=False): + bch_layerwise=False, nonmarkovian=False, multi_gate=False): self.model = model self.bch_order = bch_order - self.bch_layerwise = bch_layerwise + self.bch_layerwise = bch_layerwise - def propagate_errorgen_bch(circuit, bch_order, bch_layerwise): - pass + def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, use_bch=False, + bch_kwargs=None, mx_basis='pp'): + """ + Propagate all of the error generators for each circuit layer to the end of the circuit + and return the result of exponentiating these error generators, and if necessary taking + their products, to return the end of circuit error channel. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct a set of post gate error generators for. + + multi_gate_dict : dict, optional (default None) + An optional dictionary mapping between gate name aliases and their + standard name counterparts. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + + use_bch : bool, optional (default False) + If True use the BCH approximation as part of the propagation algorithm. + + bch_kwarg : dict, optional (default None) + Only used is `use_bch` is True, this dictionary contains a set of + BCH-specific kwargs which are passed to `propagate_errorgens_bch`. + + mx_basis : Basis or str, optional (default 'pp') + Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the + basis in which to return the process matrix for the error channel. + + Returns + ------- + eoc_error_channel : numpy.ndarray + A numpy array corresponding to the end-of-circuit error channel resulting + from the propagated error generators. This is + """ + + if use_bch: + raise NotImplementedError('Still under development.') + propagated_error_generators = self.propagate_errorgens_bch(circuit, multi_gate_dict=multi_gate_dict, + *bch_kwargs) + + else: + propagated_error_generators = self.propagate_errorgens(circuit, multi_gate_dict, include_spam) + #loop though the propagated error generator layers and construct their error generators. + #Then exponentiate + exp_error_generators = [] + for err_gen_layer_list in propagated_error_generators: + if err_gen_layer_list: #if not empty. Should be length one if not empty. + #Keep the error generator in the standard basis until after the end-of-circuit + #channel is constructed so we can reduce the overhead of changing basis. + exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer_list[0], mx_basis='pp'))) + #Next take the product of these exponentiated error generators. + #These are in circuit ordering, so reverse for matmul. + exp_error_generators.reverse() + #print(exp_error_generators) + eoc_error_channel = _np.linalg.multi_dot(exp_error_generators) + #eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='std', to_basis='pp') + + return eoc_error_channel + + def averaged_eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, mx_basis='pp'): + """ + Propagate all of the error generators for each circuit layer to the end of the circuit, + then apply a second order cumulant expansion to approximate the average of the end of circuit + error channel over the values error generator rates that are stochastic processes. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct a set of post gate error generators for. + + multi_gate_dict : dict, optional (default None) + An optional dictionary mapping between gate name aliases and their + standard name counterparts. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + + mx_basis : Basis or str, optional (default 'pp') + Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the + basis in which to return the process matrix for the error channel. + + Returns + ------- + avg_eoc_error_channel : numpy.ndarray + A numpy array corresponding to the end-of-circuit error channel resulting + from the propagated error generators and averaging over the stochastic processes + for the error generator rates using a second order cumulant approximation. + """ + + #propagate_errorgens_nonmarkovian returns a list of list of + propagated_error_generators = self.propagate_errorgens_nonmarkovian(circuit, multi_gate_dict, include_spam) + + #construct the nonmarkovian propagators + for i in range(len(propagated_error_generators)): + for j in range(i+1): + if i==j: + # term: + + prop_contrib = amam + else: + pass + + + + + + #loop though the propagated error generator layers and construct their error generators. + #Then exponentiate + exp_error_generators = [] + for err_gen_layer_list in propagated_error_generators: + if err_gen_layer_list: #if not empty. Should be length one if not empty. + #Keep the error generator in the standard basis until after the end-of-circuit + #channel is constructed so we can reduce the overhead of changing basis. + exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer_list[0], mx_basis='std'))) + #Next take the product of these exponentiated error generators. + #These are in circuit ordering, so reverse for matmul. + exp_error_generators.reverse() + eoc_error_channel = _np.linalg.multi_dot(exp_error_generators) + eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='std', to_basis='pp') + + return eoc_error_channel + + + def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): + """ + Propagate all of the error generators for each circuit layer to the end without + any recombinations or averaging. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct a set of post gate error generators for. + + multi_gate_dict : dict, optional (default None) + An optional dictionary mapping between gate name aliases and their + standard name counterparts. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + + Returns + ------- + propagated_errorgen_layers : list of lists of dictionaries + A list of lists of dictionaries, each corresponding to the result of propagating + an error generator layer through to the end of the circuit. + """ + #TODO: Check for proper handling of empty circuit and length 1 circuits. + + #start by converting the input circuit into a list of stim Tableaus with the + #first element dropped. + stim_layers = self.construct_stim_layers(circuit, multi_gate_dict, drop_first_layer = not include_spam) + + #We next want to construct a new set of Tableaus corresponding to the cumulative products + #of each of the circuit layers with those that follow. These Tableaus correspond to the + #clifford operations each error generator will be propagated through in order to reach the + #end of the circuit. + propagation_layers = self.construct_propagation_layers(stim_layers) + + #Next we take the input circuit and construct a list of dictionaries, each corresponding + #to the error generators for a particular gate layer. + #TODO: Add proper inferencing for number of qubits: + assert circuit.line_labels is not None and circuit.line_labels != ('*',) + errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam) + + #propagate the errorgen_layers through the propagation_layers to get a list + #of end of circuit error generator dictionaries. + propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) + + return propagated_errorgen_layers + + + def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, multi_gate_dict=None): + """ + Propagate all of the error generators for each circuit to the end, + performing approximation/recombination either along the way (layerwise) + or at the very end using the BCH approximation. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct a set of post gate error generators for. + + bch_order : int, optional (default 1) + Order of the BCH approximation to use. When bch_layerwise is True + this can take the values of either 1 or 2. Otherwise only + a value of 1 is currently implemented. + + bch_layerwise : bool, optional (default False) + If True perform the BCH approximation incrementally, performing the + approximate recombination layer-by-layer during the course of error + generator propagation. If False (the default) then the BCH approximation + is only applied at the very end after all of the error generators have + been propagated to the end. + + multi_gate_dict : dict, optional (default None) + An optional dictionary mapping between gate name aliases and their + standard name counterparts. + """ + + msg = 'When bch_layerwise is True this can take the values of either 1 or 2.'\ + +' Otherwise only a value of 1 is currently implemented.' + if not bch_layerwise: + assert bch_order==1, msg + else: + msg1 = 'When bch_layerwise is False only bch_order values of 1 and 2 are currently'\ + + ' supported.' + assert bch_order==1 or bch_order==2, msg1 + + #if not doing layerwise BCH then we can re-use `propagate_errorgens` fully. + if not bch_layerwise: + propagated_errorgen_layers = self.propagate_errorgens(circuit, multi_gate_dict) + #otherwise we need to do the error generator layer propagation slightly + #differently. + else: + #start by converting the input circuit into a list of stim Tableaus with the + #first element dropped. + stim_layers = self.construct_stim_layers(circuit, multi_gate_dict, drop_first_layer=True) + + #We next want to construct a new set of Tableaus corresponding to the cumulative products + #of each of the circuit layers with those that follow. These Tableaus correspond to the + #clifford operations each error generator will be propagated through in order to reach the + #end of the circuit. + propagation_layers = self.construct_propagation_layers(stim_layers) + + #Next we take the input circuit and construct a list of dictionaries, each corresponding + #to the error generators for a particular gate layer. + #TODO: Add proper inferencing for number of qubits: + assert circuit.line_labels is not None and circuit.line_labels != ('*',) + errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels)) + + #propagate the errorgen_layers through the propagation_layers to get a list + #of end of circuit error generator dictionaries. + propagated_errorgen_layers = self._propagate_errorgen_layers_bch(errorgen_layers, propagation_layers) + + return propagated_errorgen_layers + + + def propagate_errorgens_nonmarkovian(self, circuit, multi_gate_dict=None, include_spam=True): + """ + Propagate all of the error generators for each circuit layer to the end without + any recombinations or averaging. This version also only track the overall modifier/weighting + factor picked up by each of the final error generators over the course of the optimization, + with the actual rates introduced in subsequent stages. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct a set of post gate error generators for. + + multi_gate_dict : dict, optional (default None) + An optional dictionary mapping between gate name aliases and their + standard name counterparts. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + + Returns + ------- + propagated_errorgen_layers : list of lists of dictionaries + A list of lists of dictionaries, each corresponding to the result of propagating + an error generator layer through to the end of the circuit. + + """ + + #TODO: Check for proper handling of empty circuit and length 1 circuits. + + #start by converting the input circuit into a list of stim Tableaus with the + #first element dropped. + stim_layers = self.construct_stim_layers(circuit, multi_gate_dict, drop_first_layer = not include_spam) + + #We next want to construct a new set of Tableaus corresponding to the cumulative products + #of each of the circuit layers with those that follow. These Tableaus correspond to the + #clifford operations each error generator will be propagated through in order to reach the + #end of the circuit. + propagation_layers = self.construct_propagation_layers(stim_layers) + + #Next we take the input circuit and construct a list of dictionaries, each corresponding + #to the error generators for a particular gate layer. + #TODO: Add proper inferencing for number of qubits: + assert circuit.line_labels is not None and circuit.line_labels != ('*',) + errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam, + include_circuit_time=True, fixed_rate=1) - def propagate_errorgen_nonmarkovian(circuit, multi_gate_dict): + #propagate the errorgen_layers through the propagation_layers to get a list + #of end of circuit error generator dictionaries. + propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) + + #in the context of doing propagation for nonmarkovianity we won't be using BCH, so do a partial flattening + #of this data structure. + propagated_errorgen_layers = [errorgen_layers[0] for errorgen_layers in propagated_errorgen_layers] + + return propagated_errorgen_layers + + + + def propagate_errorgens_analytic(self, circuit): pass + def construct_stim_layers(self, circuit, multi_gate_dict=None, drop_first_layer=True): + """ + Converts a `Circuit` to a list of stim Tableau objects corresponding to each + gate layer. + + TODO: Move to a tools module? Locality of behavior considerations. + + Parameters + ---------- + circuit : `Circuit` + Circuit to convert. + multi_gate_dict : dict, optional (default None) + If specified this augments the standard dictionary for conversion between + pygsti gate labels and stim (found in `pygsti.tools.internalgates.standard_gatenames_stim_conversions`) + with additional entries corresponding to aliases for the entries of the standard dictionary. + This is presently used in the context of non-Markovian applications where tracking + circuit time for gate labels is required. + drop_first_layer : bool, optional (default True) + If True the first Tableau for the first gate layer is dropped in the returned output. + This default setting is what is primarily used in the context of error generator + propagation. + + Returns + ------- + stim_layers : list of `stim.Tableau` + A list of `stim.Tableau` objects, each corresponding to the ideal Clifford operation + for each layer of the input pygsti `Circuit`, with the first layer optionally dropped. + """ + + stim_dict=standard_gatenames_stim_conversions() + if multi_gate_dict is not None: + for key in multi_gate_dict: + stim_dict[key]=stim_dict[multi_gate_dict[key]] + stim_layers=circuit.convert_to_stim_tableau_layers(gate_name_conversions=stim_dict) + if drop_first_layer and len(stim_layers)>0: + stim_layers = stim_layers[1:] + return stim_layers + + def construct_propagation_layers(self, stim_layers): + """ + Construct a list of stim Tableau objects corresponding to the Clifford + operation each error generator will be propagated through. This corresponds + to a list of cumulative products of the ideal operations, but in reverse. + I.e. the initial entry corresponds to the product (in matrix multiplication order) + of all elements of `stim_layers`, the second entry is the product of the elements of + `stim_layers[1:]`, then `stim_layers[2:]` and so on until the last entry which is + `stim_layers[-1]`. + + Parameters + ---------- + stim_layers : list of stim.Tableau + The list of stim.Tableau objects corresponding to a set of ideal Clifford + operation for each circuit layer through which we will be propagating error + generators. + + Returns + ------- + propagation_layers : list of `stim.Tableau` + A list of `stim.Tableau` objects, each corresponding to a cumulative product of + ideal Clifford operations for a set of circuit layers, each corresponding to a layer + of operations which we will be propagating error generators through. + """ + if len(stim_layers) > 1: + propagation_layers = [0]*len(stim_layers) + #if propagation_layers is empty that means that stim_layers was empty + #final propagation layer is the final stim layer for the circuit + propagation_layers[-1] = stim_layers[-1] + for layer_idx in reversed(range(len(stim_layers)-1)): + propagation_layers[layer_idx] = propagation_layers[layer_idx+1]*stim_layers[layer_idx] + elif len(stim_layers) == 1: + propagation_layers = stim_layers + else: + propagation_layers = [] + return propagation_layers + + def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, include_circuit_time=False, fixed_rate=None): + """ + Construct a nested list of lists of dictionaries corresponding to the error generators for each circuit layer. + This is currently (as implemented) only well defined for `ExplicitOpModels` where each layer corresponds + to a single 'gate'. This should also in principle work for crosstalk-free `ImplicitOpModels`, but is not + configured to do so just yet. The entries of the top-level list correspond to circuit layers, while the entries + of the second level (i.e. the dictionaries at each layer) correspond to different orders of the BCH approximation. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct the error generator layers for. + + num_qubits : int + Total number of qubits, used for padding out error generator coefficient labels. + + include_spam : bool, optional (default True) + If True then include the error generators for state preparation and measurement. + + include_circuit_time : bool, optional (default False) + If True then include as part of the error generator coefficient labels the circuit + time from which that error generator arose. + + fixed_rate : float, optional (default None) + If specified this rate is used for all of the error generator coefficients, overriding the + value currently found in the model. + Returns + ------- + List of lists of dictionaries, each one containing the error generator coefficients and rates for a circuit layer, + with the error generator coefficients now represented using LocalStimErrorgenLabel. + + """ + #If including spam then start by completing the circuit (i.e. adding in the explicit SPAM labels). + if include_spam: + circuit = self.model.complete_circuit(circuit) + + #TODO: Infer the number of qubits from the model and/or the circuit somehow. + #Pull out the error generator dictionaries for each operation (may need to generalize this for implicit models): + model_error_generator_dict = dict() #key will be a label and value the lindblad error generator dictionary. + for op_lbl, op in self.model.operations.items(): + #TODO add assertion that the operation is a lindblad error generator type modelmember. + model_error_generator_dict[op_lbl] = op.errorgen_coefficients() + #add in the error generators for the prep and measurement if needed. + if include_spam: + for prep_lbl, prep in self.model.preps.items(): + model_error_generator_dict[prep_lbl] = prep.errorgen_coefficients() + for povm_lbl, povm in self.model.povms.items(): + model_error_generator_dict[povm_lbl] = povm.errorgen_coefficients() + + #TODO: Generalize circuit time to not be in one-to-one correspondence with the layer index. + error_gen_dicts_by_layer = [] + for j in range(len(circuit)): + circuit_layer = circuit[j] # get the layer + #can probably relax this if we detect that the model is a crosstalk free model. + assert isinstance(circuit_layer, Label), 'Correct support for parallel gates is still under development.' + errorgen_layer = dict() + layer_errorgen_coeff_dict = model_error_generator_dict[circuit_layer] #get the errors for the gate + for errgen_coeff_lbl, rate in layer_errorgen_coeff_dict.items(): #for an error in the accompanying error dictionary + #TODO: Can probably replace this function call with `padded_basis_element_labels` method of `GlobalElementaryErrorgenLabel` + paulis = _eprop.errgen_coeff_label_to_stim_pauli_strs(errgen_coeff_lbl, num_qubits) + if include_circuit_time: + #TODO: Refactor the fixed rate stuff to reduce the number of if statement evaluations. + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j)] = rate if fixed_rate is None else fixed_rate + else: + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis)] = rate if fixed_rate is None else fixed_rate + error_gen_dicts_by_layer.append([errorgen_layer]) + return error_gen_dicts_by_layer + + def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, include_spam=True): + """ + Propagates the error generator layers through each of the corresponding propagation layers + (i.e. the clifford operations for the remainder of the circuit). This results in a list of + lists of dictionaries, where each sublist corresponds to an order of the BCH approximation + (when not using the BCH approximation this list will be length 1), and the dictionaries + correspond to end of circuit error generators and rates. + + Parameters + ---------- + errorgen_layers : list of lists of dicts + Each sublist corresponds to a circuit layer, with these sublists containing dictionaries + of the error generator coefficients and rates for a circuit layer. Each dictionary corresponds + to a different order of the BCH approximation (when not using the BCH approximation this list will + be length 1). The error generator coefficients are represented using LocalStimErrorgenLabel. + + propagation_layers : list of `stim.Tableau` + A list of `stim.Tableau` objects, each corresponding to a cumulative product of + ideal Clifford operations for a set of circuit layers, each corresponding to a layer + of operations which we will be propagating error generators through. + + include_spam : bool, optional (default True) + If True then include the error generators for state preparation and measurement + are included in errogen_layers, and the state preparation error generator should + be propagated through (the measurement one is simply appended at the end). + + Returns + ------- + fully_propagated_layers : list of lists of dicts + A list of list of dicts with the same structure as errorgen_layers corresponding + to the results of having propagated each of the error generator layers through + the circuit to the end. + """ + + #the stopping index in errorgen_layers will depend on whether the measurement error + #generator is included or not. + if include_spam: + stopping_idx = len(errorgen_layers)-2 + else: + stopping_idx = len(errorgen_layers)-1 + + fully_propagated_layers = [] + for i in range(stopping_idx): + err_layer = errorgen_layers[i] + prop_layer = propagation_layers[i] + new_err_layer = [] + #err_layer should be length 1 if using this method + for bch_level_list in err_layer: + new_error_dict=dict() + #iterate through dictionary of error generator coefficients and propagate each one. + for errgen_coeff_lbl in bch_level_list: + propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, bch_level_list[errgen_coeff_lbl]) + new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] + new_err_layer.append(new_error_dict) + fully_propagated_layers.append(new_err_layer) + #add the final layers which didn't require actual propagation (since they were already at the end). + fully_propagated_layers.extend(errorgen_layers[stopping_idx:]) + return fully_propagated_layers + + #TODO: Add an option to return the results with the different BCH order combined. + def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bch_order=1): + """ + Propagates the error generator layers through each of the corresponding propagation layers + (i.e. the clifford operations for the remainder of the circuit). In this version we + perform a layerwise application of the BCH approximation following each propagation to + recombine the propaged error generator layer with the layer proceeding it before each + successive propagation step. + + Parameters + ---------- + errorgen_layers : list of lists of dicts + Each sublist corresponds to a circuit layer, with these sublists containing dictionaries + of the error generator coefficients and rates for a circuit layer. Each dictionary corresponds + to a different order of the BCH approximation (when not using the BCH approximation this list will + be length 1). The error generator coefficients are represented using LocalStimErrorgenLabel. + + propagation_layers : list of `stim.Tableau` + A list of `stim.Tableau` objects, each corresponding to a cumulative product of + ideal Clifford operations for a set of circuit layers, each corresponding to a layer + of operations which we will be propagating error generators through. + + bch_order : int, optional (default 1) + Order of the BCH approximation to use. + + Returns + ------- + + """ + + #Add temporary errors when trying to do BCH beyond 1st order while the details of the 2nd order + #approximation's implementation are sorted out. + if bch_order != 1: + msg = 'The implementation of the 2nd order BCH approx is still under development. For now only 1st order is supported.' + raise NotImplementedError(msg) + assert all([len(layer)==1 for layer in errorgen_layers]), msg + + fully_propagated_layers=[] + #initialize a variable as temporary storage of the result + #of performing BCH on pairwise between a propagater errorgen + #layer and an unpropagated layer for layerwise BCH. + if len(errorgen_layers)>0: + combined_err_layer = errorgen_layers[0] + + for i in range(len(errorgen_layers)-1): + #err_layer = errorgen_layers[i] + prop_layer = propagation_layers[i] + new_err_layer = [] + #err_layer should be length 1 if using this method + for bch_level_dict in combined_err_layer: + new_error_dict = dict() + #iterate through dictionary of error generator coefficients and propagate each one. + for errgen_coeff_lbl in bch_level_dict: + propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, bch_level_dict[errgen_coeff_lbl]) + new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] + new_err_layer.append(new_error_dict) + #next use BCH to combine new_err_layer with the now adjacent layer of errorgen_layers[i+1] + combined_err_layer = _eprop.bch_approximation(new_err_layer, errorgen_layers[i+1], bch_order=1) + + fully_propagated_layers.append(combined_err_layer) + return fully_propagated_layers + + def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp'): + """ + Helper method for converting from an error generator dictionary in the format + utilized in the `errorgenpropagation` module into a numpy array. + + Parameters + ---------- + errorgen_layer : dict + A dictionary containing the error generator coefficients and rates for a circuit layer, + with the error generator coefficients labels represented using `LocalStimErrorgenLabel`. + + mx_basis : Basis or str, optional (default 'pp') + Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the + basis in which to return the error generator. + Returns + ------- + errorgen : numpy.ndarray + Error generator corresponding to input `errorgen_layer` dictionary as a numpy array. + """ + + #Use the keys of errorgen_layer to construct a new `ExplicitErrorgenBasis` with + #the elements necessary for the construction of the error generator matrix. + + #Construct a list of new errorgen coefficients by looping through the keys of errorgen_layer + #and converting them to LocalElementaryErrorgenLabels. + local_errorgen_coeffs = [coeff_lbl.to_local_eel() for coeff_lbl in errorgen_layer.keys()] + + errorgen_basis = _ExplicitElementaryErrorgenBasis(self.model.state_space, local_errorgen_coeffs, basis_1q='PP') + + #Stack the arrays and then use broadcasting to weight them according to the rates + elemgen_matrices_array = _np.stack(errorgen_basis.elemgen_matrices, axis=-1) + weighted_elemgen_matrices_array = _np.fromiter(errorgen_layer.values(), dtype=_np.double)*elemgen_matrices_array + + #The error generator is then just the sum of weighted_elemgen_matrices_array along the third axis. + errorgen = _np.sum(weighted_elemgen_matrices_array, axis = 2) + + #finally need to change from the standard basis (which is what the error generator is currently in) + #to the pauli basis. + errorgen = _bt.change_basis(errorgen, from_basis='std', to_basis=mx_basis) + + return errorgen + + def ErrorPropagatorAnalytic(circ,errorModel,ErrorLayerDef=False,startingErrors=None): stim_layers=circ.convert_to_stim_tableau_layers() @@ -57,7 +666,6 @@ def ErrorPropagatorAnalytic(circ,errorModel,ErrorLayerDef=False,startingErrors=N new_error_dict[error]=(error,1) fully_propagated_layers.append(new_error_dict) - #print(len(fully_propagated_layers)) return fully_propagated_layers def InverseErrorMap(errorMap): @@ -109,7 +717,7 @@ def ErrorPropagator(circ,errorModel,multi_gate_dict=None,bch_order=1,bch_layerwi propagation_layers = stim_layers if not error_layer_def: - errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) + errorLayers=buildErrorlayers(circ,errorModel, len(circ.line_labels)) else: errorLayers=[[[_copy.deepcopy(eg) for eg in errorModel]] for i in range(circ.depth)] @@ -128,12 +736,14 @@ def ErrorPropagator(circ,errorModel,multi_gate_dict=None,bch_order=1,bch_layerwi new_error_layer.append(new_error_dict) if bch_layerwise and not nonmarkovian: following_layer = errorLayers.pop(0) - new_errors=BCH_Handler(err_layer,following_layer,bch_order) + new_errors=BCH_Handler(err_layer,following_layer,bch_order) #This should be new_error_layer as the first arg. errorLayers.insert(new_errors,0) else: fully_propagated_layers.append(new_error_layer) fully_propagated_layers.append(errorLayers.pop(0)) + + if bch_layerwise and not nonmarkovian: final_error=dict() for order in errorLayers[0]: @@ -256,7 +866,6 @@ def error_stitcher(first_error,second_error): link_dict=second_error.pop(0) new_errors=[] for layer in first_error: - #print(len(layer)) new_layer=dict() for key in layer: if layer[key][0] in link_dict: diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index aec1b28c2..b8f585906 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -1,14 +1,18 @@ -from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel -from pygsti.extras.errorgenpropagation.utilspygstistimtranslator import * +from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel as _ElementaryErrorgenLabel, GlobalElementaryErrorgenLabel as _GEEL,\ +LocalElementaryErrorgenLabel as _LEEL +from .utilspygstistimtranslator import * import stim -from numpy import array,kron +import numpy as _np from pygsti.tools import change_basis from pygsti.tools.lindbladtools import create_elementary_errorgen -class LocalStimErrorgenLabel(ElementaryErrorgenLabel): +class LocalStimErrorgenLabel(_ElementaryErrorgenLabel): - ''' - Initiates the errorgen object + """ + `LocalStimErrorgenLabel` is a specialized `ElementaryErrorgenLabel` + designed to manage the propagation of error generator using Stim primitives for fast Pauli and + Clifford operations, storing propagation related metadata, and storing metadata relevant to the + evaluation of non-Markovian error propagators using cumulant expansion based techniques. Inputs: ______ errorgen_type: characture can be set to 'H' Hamiltonian, 'S' Stochastic, 'C' Correlated or 'A' active following the conventions @@ -21,51 +25,93 @@ class LocalStimErrorgenLabel(ElementaryErrorgenLabel): Outputs: Null - ''' - def __init__(self,errorgen_type: str ,basis_element_labels: list, label=None): + """ + + def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initial_label=None, + label=None): + """ + Create a new instance of `LocalStimErrorgenLabel` + + Parameters + ---------- + errorgen_type : str + A string corresponding to the error generator sector this error generator label is + an element of. Allowed values are 'H', 'S', 'C' and 'A'. + + basis_element_labels : tuple or list + A list or tuple of strings labeling basis elements used to label this error generator. + This is either length-1 for 'H' and 'S' type error generators, or length-2 for 'C' and 'A' + type. + + circuit_time : float, optional (default None) + An optional value which associates this error generator with a particular circuit time at + which it arose. This is primarily utilized in the context of non-Markovian simulations and + estimation where an error generator may notionally be associated with a stochastic process. + + initial_label : `ElementaryErrorgenLabel`, optional (default None) + If not None, then this `ElementaryErrorgenLabel` is stored within this label and is interpreted + as being the 'initial' value of this error generator, prior to any propagation or transformation + during the course of its use. If None, then this is initialized to a `LocalElementaryErrorgenLabel` + matching the `errorgen_type` and `basis_element_labels` of this label. + + label : str, optional (default None) + An optional label string which is included when printing the string representation of this + label. + + """ self.errorgen_type=str(errorgen_type) self.basis_element_labels=tuple(basis_element_labels) self.label=label + self.circuit_time = circuit_time + + #additionally store a copy of the value of the original error generator label which will remain unchanged + #during the course of propagation for later bookkeeping purposes. + if initial_label is not None: + self.initial_label = initial_label + else: + self.initial_label = self.to_local_eel() + + #TODO: Update various methods to account for additional metadata that has been added. - ''' - hashes the error gen object - ''' def __hash__(self): - pauli_hashable=[] - for pauli in self.basis_element_labels: - pauli_hashable.append(str(pauli)) - return hash((self.errorgen_type,tuple(pauli_hashable))) + pauli_hashable = [str(pauli) for pauli in self.basis_element_labels] + return hash((self.errorgen_type, tuple(pauli_hashable))) - def labels_to_strings(self): - strings=[] - for paulistring in self.basis_element_labels: - strings.append(str(paulistring)[1:].replace('_',"I")) - return tuple(strings) + def bel_to_strings(self): + """ + Convert the elements of `basis_element_labels` to python strings + (from stim.PauliString(s)) and return as a tuple. + """ + return tuple([str(ps)[1:].replace('_',"I") for ps in self.basis_element_labels]) - ''' - checks and if two error gens have the same type and labels - ''' def __eq__(self, other): - return (self.errorgen_type == other.errorgen_type - and self.basis_element_labels == other.basis_element_labels) + """ + Performs equality check by seeing if the two error gen labels have the same `errorgen_type` + and `basis_element_labels`. + """ + return isinstance(other, LocalStimErrorgenLabel) and self.errorgen_type == other.errorgen_type \ + and self.basis_element_labels == other.basis_element_labels - ''' - displays the errorgens as strings - ''' + def __str__(self): if self.label is None: return self.errorgen_type + "(" + ",".join(map(str, self.basis_element_labels)) + ")" else: - return self.errorgen_type +" "+str(self.label)+" "+ "(" + ",".join(map(str, self.basis_element_labels)) + ")" - - + return self.errorgen_type + " " + str(self.label)+ " " + "(" \ + + ",".join(map(str, self.basis_element_labels)) + ")" def __repr__(self): if self.label is None: - return str((self.errorgen_type, self.basis_element_labels)) + if self.circuit_time is not None: + return f'({self.errorgen_type}, {self.basis_element_labels}, time={self.circuit_time})' + else: + return f'({self.errorgen_type}, {self.basis_element_labels})' else: - return str((self.errorgen_type, self.label, self.basis_element_labels)) + if self.circuit_time is not None: + return f'({self.errorgen_type}, {self.label}, {self.basis_element_labels}, time={self.circuit_time})' + else: + return f'({self.errorgen_type}, {self.label}, {self.basis_element_labels})' def reduce_label(self,labels): @@ -77,21 +123,21 @@ def reduce_label(self,labels): for idx in labels: pauli=pauli[:idx]+'I'+pauli[(idx+1):] new_labels.append(stim.PauliString(pauli)) - return localstimerrorgen(self.errorgen_type,tuple(new_labels)) + return LocalStimErrorgenLabel(self.errorgen_type,tuple(new_labels)) ''' - Returns the errorbasis matrix for the associated errorgenerator mulitplied by its error rate + Returns the errorbasis matrix for the associated error generator mulitplied by its error rate input: A pygsti defined matrix basis by default can be pauli-product, gellmann 'gm' or then pygsti standard basis 'std' functions defaults to pauli product if not specified ''' - def toWeightedErrorBasisMatrix(self,weight=1.0,matrix_basis='pp'): + def toWeightedErrorBasisMatrix(self, weight=1.0, matrix_basis='pp'): PauliDict={ - 'I' : array([[1.0,0.0],[0.0,1.0]]), - 'X' : array([[0.0j, 1.0+0.0j], [1.0+0.0j, 0.0j]]), - 'Y' : array([[0.0, -1.0j], [1.0j, 0.0]]), - 'Z' : array([[1.0, 0.0j], [0.0j, -1.0]]) + 'I' : _np.array([[1.0,0.0],[0.0,1.0]]), + 'X' : _np.array([[0.0j, 1.0+0.0j], [1.0+0.0j, 0.0j]]), + 'Y' : _np.array([[0.0, -1.0j], [1.0j, 0.0]]), + 'Z' : _np.array([[1.0, 0.0j], [0.0j, -1.0]]) } paulis=[] for paulistring in self.basis_element_labels: @@ -99,26 +145,66 @@ def toWeightedErrorBasisMatrix(self,weight=1.0,matrix_basis='pp'): if idx == 0: pauliMat = PauliDict[pauli] else: - pauliMat=kron(pauliMat,PauliDict[pauli]) + pauliMat=_np.kron(pauliMat,PauliDict[pauli]) paulis.append(pauliMat) if self.errorgen_type in 'HS': - return weight*change_basis(create_elementary_errorgen(self.errorgen_type,paulis[0]),'std',matrix_basis) + return weight*change_basis(create_elementary_errorgen(self.errorgen_type, paulis[0]), 'std', matrix_basis) else: - return weight*change_basis(create_elementary_errorgen(self.errorgen_type,paulis[0],paulis[1]),'std',matrix_basis) + return weight*change_basis(create_elementary_errorgen(self.errorgen_type, paulis[0], paulis[1]),'std', matrix_basis) - def propagate_error_gen_tableau(self, slayer,weight): + #TODO: Rework this to not directly modify the weights, and only return the sign modifier. + def propagate_error_gen_tableau(self, slayer, weight): + """ + Parameters + ---------- + slayer : + + weight : float + + """ if self.errorgen_type =='Z' or self.errorgen_type=='I': - return (self,weight) + return (self, weight) new_basis_labels = [] weightmod = 1 for pauli in self.basis_element_labels: temp = slayer(pauli) - weightmod=weightmod*temp.sign + weightmod=_np.real(temp.sign) * weightmod temp=temp*temp.sign new_basis_labels.append(temp) if self.errorgen_type =='S': weightmod=1.0 - - return (LocalStimErrorgenLabel(self.errorgen_type,new_basis_labels),weightmod*weight) + return (LocalStimErrorgenLabel(self.errorgen_type, new_basis_labels, initial_label=self.initial_label, circuit_time=self.circuit_time), + weightmod*weight) + + def to_global_eel(self, sslbls = None): + """ + Returns a `GlobalElementaryErrorgenLabel` equivalent to this `LocalStimErrorgenLabel`. + + sslbls : list (optional, default None) + A list of state space labels corresponding to the qubits corresponding to each + of the paulis in the local basis element label. If None this defaults a list of integers + ranging from 0 to N where N is the number of paulis in the basis element labels. + """ + + #first get the pauli strings corresponding to the stim.PauliString object(s) that are the + #basis_element_labels. + pauli_strings = self.bel_to_strings() + if sslbls is None: + sslbls = list(range(len(pauli_strings[0]))) #The two pauli strings should be the same length, so take the first. + #GlobalElementaryErrorgenLabel should have built-in support for casting from a tuple of the error gen type + #and the paulis for the basis element labels, so long as it is given appropriate sslbls to use. + return _GEEL.cast((self.errorgen_type,) + pauli_strings, sslbls= sslbls) + + + def to_local_eel(self): + """ + Returns a `LocalElementaryErrorgenLabel` equivalent to this `LocalStimErrorgenLabel`. + + Returns + ------- + `LocalElementaryErrorgenLabel` + """ + return _LEEL(self.errorgen_type, self.bel_to_strings()) + diff --git a/pygsti/errorgenpropagation/propagatableerrorgen.py b/pygsti/errorgenpropagation/propagatableerrorgen.py index 3fbaa9e33..239446d0e 100644 --- a/pygsti/errorgenpropagation/propagatableerrorgen.py +++ b/pygsti/errorgenpropagation/propagatableerrorgen.py @@ -1,5 +1,5 @@ from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel -from pygsti.extras.errorgenpropagation.utilspygstistimtranslator import * +from .utilspygstistimtranslator import * import stim from numpy import array,kron from pygsti.tools import change_basis diff --git a/pygsti/errorgenpropagation/utilserrorgenpropagation.py b/pygsti/errorgenpropagation/utilserrorgenpropagation.py index 9d0eab4db..0ad96e529 100644 --- a/pygsti/errorgenpropagation/utilserrorgenpropagation.py +++ b/pygsti/errorgenpropagation/utilserrorgenpropagation.py @@ -1,5 +1,5 @@ -from pygsti.extras.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE +from .localstimerrorgen import LocalStimErrorgenLabel as _LSE from numpy import conjugate ''' @@ -8,15 +8,15 @@ def commute_errors(ErG1,ErG2, weightFlip=1.0, BCHweight=1.0): def com(P1,P2): P3=P1*P2-P2*P1 - return (P3.weight,P3*conjugate(P3.weight)) - + return (P3.weight, P3*conjugate(P3.weight)) + # returns (sign def acom(P1,P2): P3=P1*P2+P2*P1 - return (P3.weight,P3*conjugate(P3.weight)) + return (P3.weight, P3*conjugate(P3.weight)) def labelMultiply(P1,P2): P3=P1*P2 - return (P3.weight,P3*conjugate(P3.weight)) + return (P3.weight, P3*conjugate(P3.weight)) errorGens=[] @@ -24,7 +24,8 @@ def labelMultiply(P1,P2): if ErG1.getType()=='H' and ErG2.getType()=='H': pVec=com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) - errorGens.append( _LSE( 'H' , [pVec[1]] , -1j*wT *pVec[0] ) ) + if pVec[0] != 0: + errorGens.append( _LSE( 'H' , [pVec[1]] , -1j*wT *pVec[0] ) ) elif ErG1.getType()=='H' and ErG2.getType()=='S': pVec=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) diff --git a/pygsti/models/model.py b/pygsti/models/model.py index b5298c98d..b82c89722 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -1897,7 +1897,7 @@ def _format_gauge_action_matrix(self, mx, op, reduce_to_model_space, row_basis, if reduce_to_model_space: allowed_lbls = op.errorgen_coefficient_labels() allowed_lbls_set = set(allowed_lbls) - allowed_row_basis = _ExplicitElementaryErrorgenBasis(self.state_space, allowed_lbls, basis1q=None) + allowed_row_basis = _ExplicitElementaryErrorgenBasis(self.state_space, allowed_lbls, basis_1q=None) disallowed_indices = [i for i, lbl in enumerate(row_basis.labels) if lbl not in allowed_lbls_set] if len(disallowed_indices) > 0: diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py new file mode 100644 index 000000000..956f4f600 --- /dev/null +++ b/pygsti/tools/errgenproptools.py @@ -0,0 +1,433 @@ +""" +Tools for the propagation of error generators through circuits. +""" +#*************************************************************************************************** +# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + +import stim +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GEEL +from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE +from pygsti.modelmembers.operations import LindbladErrorgen as _LinbladErrorgen +from numpy import conjugate + +def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): + """ + Converts an input `GlobalElementaryErrorgenLabel` to a tuple of stim.PauliString + objects, padded with an appropriate number of identities. + + Parameters + ---------- + err_gen_coeff_label : `GlobalElementaryErrorgenLabel` + The error generator coefficient label to construct the tuple of pauli + strings for. + + num_qubits : int + Number of total qubits to use for the Pauli strings. Used to determine + the number of identities added when padding. + + Returns + ------- + tuple of stim.PauliString + A tuple of either length 1 (for H and S) or length 2 (for C and A) + whose entries are stim.PauliString representations of the indices for the + input error generator label, padded with an appropriate number of identities + given the support of the error generator label. + + """ + assert isinstance(err_gen_coeff_label, _GEEL), 'Only `GlobalElementaryErrorgenLabel is currently supported.' + + #the coefficient label is a tuple with 3 elements. + #The first element is the error generator type. + #the second element is a tuple of paulis either of length 1 or 2 depending on the error gen type. + #the third element is a tuple of subsystem labels. + errorgen_typ = err_gen_coeff_label.errorgen_type + pauli_lbls = err_gen_coeff_label.basis_element_labels + sslbls = err_gen_coeff_label.support + + #double check that the number of qubits specified is greater than or equal to the length of the + #basis element labels. + #assert len(pauli_lbls) >= num_qubits, 'Specified `num_qubits` is less than the length of the basis element labels.' + + if errorgen_typ == 'H' or errorgen_typ == 'S': + pauli_string = num_qubits*['I'] + pauli_lbl = pauli_lbls[0] + for i, sslbl in enumerate(sslbls): + pauli_string[sslbl] = pauli_lbl[i] + pauli_string = stim.PauliString(''.join(pauli_string)) + return (pauli_string,) + elif errorgen_typ == 'C' or errorgen_typ == 'A': + pauli_strings = [] + for pauli_lbl in pauli_lbls: #iterate through both pauli labels + pauli_string = num_qubits*['I'] + for i, sslbl in enumerate(sslbls): + pauli_string[sslbl] = pauli_lbl[i] + pauli_strings.append(stim.PauliString(''.join(pauli_string))) + return tuple(pauli_strings) + else: + raise ValueError(f'Unsupported error generator type {errorgen_typ}') + + +def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): + """ + Apply the BCH approximation at the given order to combine the input dictionaries + of error generator rates. + + Parameters + ---------- + errgen_layer_1 : list of dicts + Each lists contains dictionaries of the error generator coefficients and rates for a circuit layer. + Each dictionary corresponds to a different order of the BCH approximation. + The error generator coefficients are represented using LocalStimErrorgenLabel. + + errgen_layer_2 : list of dicts + See errgen_layer_1. + + Returns + ------- + combined_errgen_layer : list of dicts? + A list with the same general structure as `errgen_layer_1` and `errgen_layer_2`, but with the + rates combined according to the selected order of the BCH approximation. + + """ + if bch_order != 1: + msg = 'The implementation of the 2nd order BCH approx is still under development. For now only 1st order is supported.' + raise NotImplementedError(msg) + + new_errorgen_layer=[] + for curr_order in range(0,bch_order): + working_order_dict = dict() + #add first order terms into new layer + if curr_order == 0: + #get the dictionaries of error generator coefficient labels and rates + #for the current working BCH order. + current_errgen_dict_1 = errgen_layer_1[curr_order] + current_errgen_dict_2 = errgen_layer_2[curr_order] + #Get a combined set of error generator coefficient labels for these two + #dictionaries. + current_combined_coeff_lbls = set(current_errgen_dict_1.keys()) | set(current_errgen_dict_2.keys()) + + #loop through the combined set of coefficient labels and add them to the new dictionary for the current BCH + #approximation order. If present in both we sum the rates. + for coeff_lbl in current_combined_coeff_lbls: + working_order_dict[coeff_lbl] = current_errgen_dict_1.get(coeff_lbl, 0) + current_errgen_dict_2.get(coeff_lbl, 0) + new_errorgen_layer.append(working_order_dict) + #second order BCH terms. + elif curr_order == 1: + current_errgen_dict_1 = errgen_layer_1[curr_order-1] + current_errgen_dict_2 = errgen_layer_2[curr_order-1] + #calculate the pairwise commutators between each of the error generators in current_errgen_dict_1 and + #current_errgen_dict_2. + for error1 in current_errgen_dict_1.keys(): + for error2 in current_errgen_dict_2.keys(): + #get the list of error generator labels + commuted_errgen_list = commute_error_generators(error1, error2, + weight=1/2*current_errgen_dict_1[error1]*current_errgen_dict_1[error2]) + print(commuted_errgen_list) + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list: + working_order_dict[error_tuple[0]]=error_tuple[1] + + + if len(errgen_layer_1)==2: + for error_key in errgen_layer_1[1]: + working_order_dict[error_key]=errgen_layer_1[1][error_key] + if len(errgen_layer_2)==2: + for error_key in errgen_layer_2[1]: + working_order_dict[error_key]=errgen_layer_2[1][error_key] + new_errorgen_layer.append(working_order_dict) + + else: + raise ValueError("Higher Orders are not Implemented Yet") + return new_errorgen_layer + + +def commute_error_generators(errorgen_1, errorgen_2, flip_weight=False, weight=1.0): + """ + Returns the commutator of two error generators. I.e. [errorgen_1, errorgen_2]. + + Parameters + ---------- + errorgen1 : `LocalStimErrorgenLabel` + First error generator. + + errorgen2 : `LocalStimErrorgenLabel` + Second error generator + + flip_weight : bool, optional (default False) + If True flip the sign of the input value of weight kwarg. + + weight : float, optional (default 1.0) + An optional weighting value to apply to the value of the commutator. + + Returns + ------- + list of `LocalStimErrorgenLabel`s corresponding to the commutator of the two input error generators, + weighted by the specified value of `weight`. + """ + + errorGens=[] + + if flip_weight: + w= -weight + else: + w = weight + + errorgen_1_type = errorgen_1.errorgen_type + errorgen_2_type = errorgen_2.errorgen_type + + #The first basis element label is always well defined, + #the second we'll define only of the error generator is C or A type. + errorgen_1_bel_0 = errorgen_1.basis_element_labels[0] + errorgen_2_bel_0 = errorgen_2.basis_element_labels[0] + + if errorgen_1_type == 'C' or errorgen_1_type == 'A': + errorgen_1_bel_1 = errorgen_1.basis_element_labels[1] + if errorgen_2_type == 'C' or errorgen_2_type == 'A': + errorgen_2_bel_1 = errorgen_2.basis_element_labels[1] + + #create the identity stim.PauliString for later comparisons. + identity = stim.PauliString('I'*len(errorgen_1_bel_0)) + + if errorgen_1_type=='H' and errorgen_2_type=='H': + ptup = com(errorgen_1_bel_0 , errorgen_2_bel_0) + if ptup is not None: + errorGens.append((_LSE('H', [ptup[1]]), -1j*w *ptup[0]) ) + + elif errorgen_1_type=='H' and errorgen_2_type=='S': + ptup = com(errorgen_2_bel_0 , errorgen_1_bel_0) + if ptup is not None: + if errorgen_2_bel_0 == ptup[1]: + errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup[0]) ) + else: + errorGens.append(( _LSE('C', [errorgen_2_bel_0, ptup[1]]), 1j*w*ptup[0]) ) + + elif errorgen_1_type=='S' and errorgen_2_type=='H': + ptup = com(errorgen_2_bel_0 , errorgen_1_bel_0) + if ptup is not None: + if errorgen_2_bel_0 == ptup[1]: + errorGens.append(( _LSE('S', [errorgen_2_bel_0]), -1j*w*ptup[0]) ) + else: + errorGens.append(( _LSE('C', [errorgen_2_bel_0, ptup[1]]), -1j*w*ptup[0]) ) + + + elif errorgen_1_type=='H' and errorgen_2_type=='C': + ptup1 = com(errorgen_2_bel_0 , errorgen_1_bel_0) + ptup2 = com(errorgen_2_bel_1 , errorgen_1_bel_0) + if ptup1 is not None: + if ptup1[1] == errorgen_2_bel_1: + errorGens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0]) ) + else: + errorGens.append((_LSE('C', [ptup1[1], errorgen_2_bel_1]), 1j*w*ptup1[0]) ) + if ptup2 is not None: + if ptup2[1] == errorgen_2_bel_0: + errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup2[0]) ) + else: + errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_0]), 1j*w*ptup2[0]) ) + + elif errorgen_1_type=='C' and errorgen_2_type=='H': + errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type=='H' and errorgen_2_type=='A': + ptup1 = com(errorgen_1_bel_0 , errorgen_2_bel_0) + ptup2 = com(errorgen_1_bel_0 , errorgen_2_bel_1) + if ptup1 is not None: + if ptup1[1] != errorgen_2_bel_1: + errorGens.append((_LSE('A', [ptup1[1], errorgen_2_bel_1]), -1j*w*ptup1[0]) ) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_0: + errorGens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), -1j*w*ptup2[0]) ) + + elif errorgen_1_type=='A' and errorgen_2_type=='H': + errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type=='S' and errorgen_2_type=='S': + #Commutator of S with S is zero. + pass + + elif errorgen_1_type=='S' and errorgen_2_type=='C': + ptup1 = product(errorgen_1_bel_0 , errorgen_2_bel_0) + ptup2 = product(errorgen_2_bel_1 , errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append(( _LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + elif ptup1[1] == identity: + errorGens.append(( _LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + else: #ptup2[1] == identity + errorGens.append(( _LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + + ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = product(ptup1[1], errorgen_1_bel_0) + #it shouldn't be possible for ptup2[1] to equal ptup1[1], + #as that would imply that errorgen_1_bel_0 was the identity. + if ptup2[1] == identity: + errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup2[0]) ) + else: + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup2[0])) + + #ptup3 is just the product from ptup2 in reverse, so this can be done + #more efficiently, but I'm not going to do that at present... + ptup3 = product(errorgen_1_bel_0, ptup1[1]) + if ptup3[1] == identity: + errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup3[0]) ) + else: + errorGens.append((_LSE('A', [ptup3[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup3[0])) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'S': + errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + #07/29/24 : I've completed up to this point. + + elif errorgen_1_type == 'S' and errorgen_2_type == 'A': + ptup1 =product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2=product(errorgen_2_bel_1, errorgen_1_bel_0) + errorGens.append( _LSE( 'C', [ptup1[1], ptup2[1]] ,1j*w*ptup1[0]*ptup2[0] )) + ptup1=product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2=product(errorgen_2_bel_0, errorgen_1_bel_0) + errorGens.append( _LSE( 'C', [ptup1[1], ptup2[1]] , -1j*w*ptup1[0]*ptup2[0])) + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + ptup2 = com(errorgen_1_bel_0, ptup1[1]) + errorGens.append( _LSE( 'A', [errorgen_1_bel_0, ptup2[1]] ,-.5*w*ptup1[0]*ptup2[0])) + + elif errorgen_1_type == 'A' and errorgen_1_type == 'S': + errorGens = commute_error_generators(errorgen_2,errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'C': + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 =product(errorgen_2_bel_1, errorgen_1_bel_1) + errorGens.append( _LSE( 'A', [ptup1[1], ptup2[1]] , -1j*w*ptup1[0]*ptup2[0] )) + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 =product(errorgen_2_bel_0, errorgen_1_bel_1) + errorGens.append( _LSE( 'A', [ptup1[1] , ptup2[1]] , -1j*w*ptup1[0]*ptup2[0] )) + ptup1 = product(errorgen_1_bel_1,errorgen_2_bel_0) + ptup2 =product(errorgen_2_bel_1,errorgen_1_bel_0) + errorGens.append( _LSE( 'A', [ptup1[1] , ptup2[1]] , -1j*w*ptup1[0]*ptup2[0] )) + ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_1) + ptup2 =product(errorgen_2_bel_0, errorgen_1_bel_0) + errorGens.append( _LSE( 'A' , [ptup1[1] , ptup2[1]] , -1j*w*ptup1[0]*ptup2[0])) + ptup1=acom(errorgen_1_bel_0, errorgen_1_bel_1) + ptup2=com(errorgen_2_bel_0, ptup1[1]) + errorGens.append( _LSE( 'A' , [ptup2[1] , errorgen_2_bel_1 ], -.5*1j*w*ptup1[0]*ptup2[0])) + ptup1=acom(errorgen_1_bel_0,errorgen_1_bel_1) + ptup2=com(errorgen_2_bel_1,ptup1[1]) + errorGens.append( _LSE( 'A' , [ptup2[1], errorgen_2_bel_0] , -.5*1j*w*ptup1[0]*ptup2[0])) + ptup1=acom(errorgen_2_bel_0,errorgen_2_bel_1) + ptup2=com(ptup1[1],errorgen_1_bel_0) + errorGens.append( _LSE( 'A' , [ptup2[1] , errorgen_1_bel_1] , -.5*1j*w*ptup1[0]*ptup2[0])) + ptup1=acom(errorgen_2_bel_0,errorgen_2_bel_1) + ptup2=com(ptup1[1],errorgen_1_bel_1) + errorGens.append( _LSE( 'A' , [ptup2[1] , errorgen_1_bel_0 ] , -.5*1j*w*ptup1[0]*ptup2[0])) + ptup1=acom(errorgen_1_bel_0,errorgen_1_bel_1) + ptup2=acom(errorgen_2_bel_0,errorgen_2_bel_1) + ptup3=com(ptup1[1],ptup2[1]) + errorGens.append( _LSE( 'H', [ptup3[1]] ,.25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'A': + ptup1 = product(errorgen_1_bel_0,errorgen_2_bel_0) + ptup2 =product(errorgen_2_bel_1,errorgen_1_bel_1) + errorGens.append( _LSE('C' , [ptup1[1],ptup2[1]] , 1j*w*ptup1[0]*ptup2[0])) + ptup1 = product(errorgen_1_bel_0,errorgen_2_bel_1) + ptup2 =product(errorgen_2_bel_0,errorgen_1_bel_1) + errorGens.append( _LSE('C' ,[ptup1[1],ptup2[1]] , -1j*w*ptup1[0]*ptup2[0])) + ptup1 = product(errorgen_1_bel_1,errorgen_2_bel_0) + ptup2 =product(errorgen_2_bel_1,errorgen_1_bel_0) + errorGens.append( _LSE('C' , [ptup1[1],ptup2[1]] , 1j*w*ptup1[0]*ptup2[0])) + ptup1 = product(errorgen_2_bel_0,errorgen_1_bel_0) + ptup2 =product(errorgen_1_bel_1,errorgen_2_bel_1) + errorGens.append( _LSE('C' ,[ptup1[1],ptup2[1]] , -1j*w*ptup1[0]*ptup2[0])) + ptup1 = com(errorgen_2_bel_0,errorgen_2_bel_1) + ptup2 =com(errorgen_1_bel_0,ptup1[1]) + errorGens.append( _LSE('A' , [ptup2[1] , errorgen_1_bel_1] , .5*w*ptup1[0]*ptup2[0] )) + ptup1 = com(errorgen_2_bel_0,errorgen_2_bel_1) + ptup2 =com(errorgen_1_bel_1,ptup1[1]) + errorGens.append( _LSE('A' , [ptup2[1], errorgen_1_bel_0 ], .5*w*ptup1[0]*ptup2[0] )) + ptup1 = acom(errorgen_1_bel_0,errorgen_1_bel_1) + ptup2 =com(errorgen_2_bel_0,ptup1[1]) + errorGens.append( _LSE('C', [ptup2[1] , errorgen_2_bel_1 ], .5*1j*w*ptup1[0]*ptup2[0] )) + ptup1 = acom(errorgen_1_bel_0,errorgen_1_bel_1) + ptup2 =com(errorgen_2_bel_1,ptup1[1]) + errorGens.append( _LSE('C',[ptup2[1],errorgen_2_bel_0 ],-.5*1j*w*ptup1[0]*ptup2[0] )) + ptup1 = com(errorgen_2_bel_0,errorgen_2_bel_1) + ptup2 =acom(errorgen_1_bel_0,errorgen_1_bel_1) + ptup3=com(ptup1[1],ptup2[1]) + errorGens.append( _LSE('H',[ptup3[1]],-.25*w*ptup1[0]*ptup2[0]*ptup3[0])) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'C': + errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'A': + ptup1=product(errorgen_2_bel_1,errorgen_1_bel_1) + ptup2=product(errorgen_1_bel_0,errorgen_2_bel_0) + errorGens.append(_LSE('A',[ptup1[1],ptup2[1]] ,-1j*w*ptup1[0]*ptup2[0])) + ptup1=product(errorgen_2_bel_0,errorgen_1_bel_0) + ptup2=product(errorgen_1_bel_1,errorgen_2_bel_1) + errorGens.append(_LSE('A',[ptup1[1],ptup2[1]],-1j*w*ptup1[0]*ptup2[0])) + ptup1=product(errorgen_1_bel_1,errorgen_2_bel_0) + ptup2=product(errorgen_2_bel_1,errorgen_1_bel_0) + errorGens.append(_LSE('A',[ptup1[1],ptup2[1]],-1j*w*ptup1[0]*ptup2[0])) + ptup1=product(errorgen_1_bel_0,errorgen_2_bel_1) + ptup2=product(errorgen_2_bel_0,errorgen_1_bel_1) + errorGens.append(_LSE('A',[ptup1[1],ptup2[1]],-1j*w*ptup1[0]*ptup2[0])) + ptup1=com(errorgen_2_bel_0,errorgen_2_bel_1) + ptup2=com(errorgen_1_bel_1,ptup1[1]) + errorGens.append(_LSE('C',[ptup2[1],errorgen_1_bel_0],.5*w*ptup1[0]*ptup2[0])) + ptup1=com(errorgen_2_bel_0,errorgen_2_bel_1) + ptup2=com(errorgen_1_bel_0,ptup1[1]) + errorGens.append(_LSE('C',[ptup2[1],errorgen_1_bel_1] ,-.5*w*ptup1[0]*ptup2[0])) + ptup1=com(errorgen_1_bel_0,errorgen_1_bel_1) + ptup2=com(errorgen_2_bel_0,ptup1[1]) + errorGens.append(_LSE('C', [ptup2[1],errorgen_2_bel_1] ,.5*w*ptup1[0]*ptup2[0])) + ptup1=com(errorgen_1_bel_0,errorgen_1_bel_1) + ptup2=com(errorgen_2_bel_1,ptup1[1]) + errorGens.append(_LSE('C', [ptup2[1],errorgen_2_bel_0] ,-.5*w*ptup1[0]*ptup2[0])) + ptup1=com(errorgen_2_bel_0,errorgen_2_bel_1) + ptup2=com(errorgen_1_bel_0,errorgen_1_bel_1) + ptup3=com(ptup1[1],ptup2[1]) + errorGens.append( _LSE('H',[ptup3[1]] ,.25*w*ptup1[0]*ptup2[0]*ptup3[0])) + + return errorGens + +def com(P1, P2): + #P1 and P2 either commute or anticommute. + if P1.commutes(P2): + P3 = 0 + return None + else: + P3 = P1*P2 + return (P3.sign*2, P3 / P3.sign) + #return (sign(P3) * 2 if P1 and P2 anticommute, 0 o.w., + # unsigned P3) + +def acom(P1, P2): + #P1 and P2 either commute or anticommute. + if P1.commutes(P2): + P3 = P1*P2 + return (P3.sign*2, P3 / P3.sign) + else: + return None + + #return (sign(P3) * 2 if P1 and P2 commute, 0 o.w., + # unsigned P3) + +def product(P1, P2): + P3 = P1*P2 + return (P3.sign, P3 / P3.sign) + #return (sign(P3), + # unsigned P3) \ No newline at end of file diff --git a/pygsti/tools/jamiolkowski.py b/pygsti/tools/jamiolkowski.py index e204c5206..42cedbdf4 100644 --- a/pygsti/tools/jamiolkowski.py +++ b/pygsti/tools/jamiolkowski.py @@ -327,7 +327,7 @@ def sums_of_negative_choi_eigenvalues(model): """ ret = [] for (_, gate) in model.operations.items(): - J = fast_jamiolkowski_iso_std(gate, model.basis) # Choi mx basis doesn't matter + J = fast_jamiolkowski_iso_std(gate.to_dense(), model.basis) # Choi mx basis doesn't matter evals = _np.linalg.eigvals(J) # could use eigvalsh, but wary of this since eigh can be wrong... sumOfNeg = 0.0 for ev in evals: diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index 6b894e939..e194c8f5b 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -1979,16 +1979,6 @@ def project_model(model, target_model, basis = model.basis proj_basis = basis # just use the same basis here (could make an arg later?) - #OLD REMOVE - ##The projection basis needs to be a basis for density matrices - ## (i.e. 2x2 mxs in 1Q case) rather than superoperators (4x4 mxs - ## in 1Q case) - whcih is what model.basis is. So, we just extract - ## a builtin basis name for the projection basis. - #if basis.name in ('pp', 'gm', 'std', 'qt'): - # proj_basis_name = basis.name - #else: - # proj_basis_name = 'pp' # model.basis is weird so just use paulis as projection basis - if basis.name != target_model.basis.name: raise ValueError("Basis mismatch between model (%s) and target (%s)!" % (model.basis.name, target_model.basis.name)) @@ -2029,8 +2019,6 @@ def project_model(model, target_model, otherGens = otherBlk.create_lindblad_term_superoperators(mx_basis=basis) #Note: return values *can* be None if an empty/None basis is given - #lnd_error_gen = _np.einsum('i,ijk', HProj, HGens) + \ - # _np.einsum('ij,ijkl', OProj, OGens) lnd_error_gen = _np.tensordot(HBlk.block_data, HGens, (0, 0)) + \ _np.tensordot(otherBlk.block_data, otherGens, ((0, 1), (0, 1))) @@ -2061,32 +2049,13 @@ def project_model(model, target_model, pos_evals = evals.clip(0, 1e100) # clip negative eigenvalues to 0 OProj_cp = _np.dot(U, _np.dot(_np.diag(pos_evals), _np.linalg.inv(U))) #OProj_cp is now a pos-def matrix - #lnd_error_gen_cp = _np.einsum('i,ijk', HProj, HGens) + \ - # _np.einsum('ij,ijkl', OProj_cp, OGens) lnd_error_gen_cp = _np.tensordot(HBlk.block_data, HGens, (0, 0)) + \ _np.tensordot(OProj_cp, otherGens, ((0, 1), (0, 1))) - #lnd_error_gen_cp = _bt.change_basis(lnd_error_gen_cp, "std", basis) gsDict['LND'].operations[gl] = operation_from_error_generator( lnd_error_gen_cp, targetOp, basis, gen_type) NpDict['LND'] += HBlk.block_data.size + otherBlk.block_data.size - #Removed attempt to contract H+S to CPTP by removing positive stochastic projections, - # but this doesn't always return the gate to being CPTP (maybe b/c of normalization)... - #sto_error_gen_cp = _np.einsum('i,ijk', stoProj.clip(None,0), stoGens) - # # (only negative stochastic projections OK) - #sto_error_gen_cp = _tools.std_to_pp(sto_error_gen_cp) - #gsHSCP.operations[gl] = _tools.operation_from_error_generator( - # ham_error_gen, targetOp, gen_type) #+sto_error_gen_cp - - #DEBUG!!! - #print("DEBUG: BEST sum neg evals = ",_tools.sum_of_negative_choi_eigenvalues(model)) - #print("DEBUG: LNDCP sum neg evals = ",_tools.sum_of_negative_choi_eigenvalues(gsDict['LND'])) - - #Check for CPTP where expected - #assert(_tools.sum_of_negative_choi_eigenvalues(gsHSCP) < 1e-6) - #assert(_tools.sum_of_negative_choi_eigenvalues(gsDict['LND']) < 1e-6) - #Collect and return requrested results: ret_gs = [gsDict[p] for p in projectiontypes] ret_Nps = [NpDict[p] for p in projectiontypes] From ad85a98c6ad638f5294d3cda23f085579e6dfc9a Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 23 Oct 2024 22:53:40 -0600 Subject: [PATCH 014/102] Patch broken BCH code path I hadn't yet updated that codepath after adding in the 'include_spam' kwarg so needed to patch this in. --- .../errorpropagator_dev.py | 33 +++++++++++++++---- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 09538dbc9..1060a599e 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -196,7 +196,8 @@ def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): return propagated_errorgen_layers - def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, multi_gate_dict=None): + def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, multi_gate_dict=None, + include_spam=True): """ Propagate all of the error generators for each circuit to the end, performing approximation/recombination either along the way (layerwise) @@ -222,6 +223,10 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul multi_gate_dict : dict, optional (default None) An optional dictionary mapping between gate name aliases and their standard name counterparts. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. """ msg = 'When bch_layerwise is True this can take the values of either 1 or 2.'\ @@ -235,13 +240,14 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul #if not doing layerwise BCH then we can re-use `propagate_errorgens` fully. if not bch_layerwise: - propagated_errorgen_layers = self.propagate_errorgens(circuit, multi_gate_dict) + propagated_errorgen_layers = self.propagate_errorgens(circuit, multi_gate_dict, + include_spam=include_spam) #otherwise we need to do the error generator layer propagation slightly #differently. else: #start by converting the input circuit into a list of stim Tableaus with the #first element dropped. - stim_layers = self.construct_stim_layers(circuit, multi_gate_dict, drop_first_layer=True) + stim_layers = self.construct_stim_layers(circuit, multi_gate_dict, drop_first_layer= not include_spam) #We next want to construct a new set of Tableaus corresponding to the cumulative products #of each of the circuit layers with those that follow. These Tableaus correspond to the @@ -253,11 +259,12 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul #to the error generators for a particular gate layer. #TODO: Add proper inferencing for number of qubits: assert circuit.line_labels is not None and circuit.line_labels != ('*',) - errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels)) + errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam) #propagate the errorgen_layers through the propagation_layers to get a list #of end of circuit error generator dictionaries. - propagated_errorgen_layers = self._propagate_errorgen_layers_bch(errorgen_layers, propagation_layers) + propagated_errorgen_layers = self._propagate_errorgen_layers_bch(errorgen_layers, propagation_layers, + include_spam = include_spam) return propagated_errorgen_layers @@ -527,7 +534,7 @@ def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, includ return fully_propagated_layers #TODO: Add an option to return the results with the different BCH order combined. - def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bch_order=1): + def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bch_order=1, include_spam=True): """ Propagates the error generator layers through each of the corresponding propagation layers (i.e. the clifford operations for the remainder of the circuit). In this version we @@ -551,6 +558,11 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc bch_order : int, optional (default 1) Order of the BCH approximation to use. + include_spam : bool, optional (default True) + If True then include the error generators for state preparation and measurement + are included in errogen_layers, and the state preparation error generator should + be propagated through (the measurement one is simply appended at the end). + Returns ------- @@ -570,7 +582,14 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc if len(errorgen_layers)>0: combined_err_layer = errorgen_layers[0] - for i in range(len(errorgen_layers)-1): + #the stopping index in errorgen_layers will depend on whether the measurement error + #generator is included or not. + if include_spam: + stopping_idx = len(errorgen_layers)-2 + else: + stopping_idx = len(errorgen_layers)-1 + + for i in range(stopping_idx): #err_layer = errorgen_layers[i] prop_layer = propagation_layers[i] new_err_layer = [] From 75612776aefebc6c5811121cbae06dba7710a622 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 26 Oct 2024 18:44:52 -0600 Subject: [PATCH 015/102] Fix BCH bug Fixes an error in the first order BCH logic that meant that when including SPAM the final measurement layer's error generator wasn't getting combined in. --- pygsti/errorgenpropagation/errorpropagator_dev.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 1060a599e..617e3fb72 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -604,6 +604,11 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc #next use BCH to combine new_err_layer with the now adjacent layer of errorgen_layers[i+1] combined_err_layer = _eprop.bch_approximation(new_err_layer, errorgen_layers[i+1], bch_order=1) + #If we are including spam then there will be one last error generator which we don't propagate + #through which needs to be combined using BCH. + if include_spam: + combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=1) + fully_propagated_layers.append(combined_err_layer) return fully_propagated_layers From 896bb9e3bde6fdfab979086189a44b6dd0123222 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 27 Oct 2024 22:44:56 -0600 Subject: [PATCH 016/102] Attempt at completed error generator commutators My initial (complete) attempt at updating the implementation of the error generator commutator psuedocode to get it working. Next up is testing. --- .../errorpropagator_dev.py | 11 +- pygsti/tools/errgenproptools.py | 395 +++++++++++++----- 2 files changed, 299 insertions(+), 107 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 617e3fb72..cc347e809 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -565,7 +565,12 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc Returns ------- - + fully_propagated_layers : list of lists of dicts + A list of list of dicts with the same structure as errorgen_layers corresponding + to the results of having propagated each of the error generator layers through + the circuit to the end while combining the layers in a layerwise fashion using the + BCH approximation. As a result of this combination, this list should have a length + of one. """ #Add temporary errors when trying to do BCH beyond 1st order while the details of the 2nd order @@ -604,8 +609,8 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc #next use BCH to combine new_err_layer with the now adjacent layer of errorgen_layers[i+1] combined_err_layer = _eprop.bch_approximation(new_err_layer, errorgen_layers[i+1], bch_order=1) - #If we are including spam then there will be one last error generator which we don't propagate - #through which needs to be combined using BCH. + #If we are including spam then there will be one last error generator which we doesn't have an associated propagation + #which needs to be combined using BCH. if include_spam: combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=1) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 956f4f600..4cf0fbf52 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -198,23 +198,23 @@ def commute_error_generators(errorgen_1, errorgen_2, flip_weight=False, weight=1 if errorgen_1_type=='H' and errorgen_2_type=='H': ptup = com(errorgen_1_bel_0 , errorgen_2_bel_0) if ptup is not None: - errorGens.append((_LSE('H', [ptup[1]]), -1j*w *ptup[0]) ) + errorGens.append((_LSE('H', [ptup[1]]), -1j*w *ptup[0])) elif errorgen_1_type=='H' and errorgen_2_type=='S': ptup = com(errorgen_2_bel_0 , errorgen_1_bel_0) if ptup is not None: if errorgen_2_bel_0 == ptup[1]: - errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup[0]) ) + errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup[0])) else: - errorGens.append(( _LSE('C', [errorgen_2_bel_0, ptup[1]]), 1j*w*ptup[0]) ) + errorGens.append(( _LSE('C', [errorgen_2_bel_0, ptup[1]]), 1j*w*ptup[0])) elif errorgen_1_type=='S' and errorgen_2_type=='H': ptup = com(errorgen_2_bel_0 , errorgen_1_bel_0) if ptup is not None: if errorgen_2_bel_0 == ptup[1]: - errorGens.append(( _LSE('S', [errorgen_2_bel_0]), -1j*w*ptup[0]) ) + errorGens.append(( _LSE('S', [errorgen_2_bel_0]), -1j*w*ptup[0])) else: - errorGens.append(( _LSE('C', [errorgen_2_bel_0, ptup[1]]), -1j*w*ptup[0]) ) + errorGens.append(( _LSE('C', [errorgen_2_bel_0, ptup[1]]), -1j*w*ptup[0])) elif errorgen_1_type=='H' and errorgen_2_type=='C': @@ -222,14 +222,14 @@ def commute_error_generators(errorgen_1, errorgen_2, flip_weight=False, weight=1 ptup2 = com(errorgen_2_bel_1 , errorgen_1_bel_0) if ptup1 is not None: if ptup1[1] == errorgen_2_bel_1: - errorGens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0]) ) + errorGens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0])) else: - errorGens.append((_LSE('C', [ptup1[1], errorgen_2_bel_1]), 1j*w*ptup1[0]) ) + errorGens.append((_LSE('C', [ptup1[1], errorgen_2_bel_1]), 1j*w*ptup1[0])) if ptup2 is not None: if ptup2[1] == errorgen_2_bel_0: - errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup2[0]) ) + errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup2[0])) else: - errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_0]), 1j*w*ptup2[0]) ) + errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_0]), 1j*w*ptup2[0])) elif errorgen_1_type=='C' and errorgen_2_type=='H': errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) @@ -239,10 +239,10 @@ def commute_error_generators(errorgen_1, errorgen_2, flip_weight=False, weight=1 ptup2 = com(errorgen_1_bel_0 , errorgen_2_bel_1) if ptup1 is not None: if ptup1[1] != errorgen_2_bel_1: - errorGens.append((_LSE('A', [ptup1[1], errorgen_2_bel_1]), -1j*w*ptup1[0]) ) + errorGens.append((_LSE('A', [ptup1[1], errorgen_2_bel_1]), -1j*w*ptup1[0])) if ptup2 is not None: if ptup2[1] != errorgen_2_bel_0: - errorGens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), -1j*w*ptup2[0]) ) + errorGens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), -1j*w*ptup2[0])) elif errorgen_1_type=='A' and errorgen_2_type=='H': errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) @@ -275,10 +275,10 @@ def commute_error_generators(errorgen_1, errorgen_2, flip_weight=False, weight=1 ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: ptup2 = product(ptup1[1], errorgen_1_bel_0) - #it shouldn't be possible for ptup2[1] to equal ptup1[1], + #it shouldn't be possible for ptup2[1] to equal errorgen_1_bel_0, #as that would imply that errorgen_1_bel_0 was the identity. if ptup2[1] == identity: - errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup2[0]) ) + errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup2[0])) else: errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup2[0])) @@ -288,119 +288,306 @@ def commute_error_generators(errorgen_1, errorgen_2, flip_weight=False, weight=1 if ptup3[1] == identity: errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup3[0]) ) else: - errorGens.append((_LSE('A', [ptup3[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup3[0])) + errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup3[1]]) , -1j*.5*w*ptup1[0]*ptup3[0])) elif errorgen_1_type == 'C' and errorgen_2_type == 'S': errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) - - #07/29/24 : I've completed up to this point. elif errorgen_1_type == 'S' and errorgen_2_type == 'A': - ptup1 =product(errorgen_1_bel_0, errorgen_2_bel_0) - ptup2=product(errorgen_2_bel_1, errorgen_1_bel_0) - errorGens.append( _LSE( 'C', [ptup1[1], ptup2[1]] ,1j*w*ptup1[0]*ptup2[0] )) - ptup1=product(errorgen_1_bel_0, errorgen_2_bel_1) - ptup2=product(errorgen_2_bel_0, errorgen_1_bel_0) - errorGens.append( _LSE( 'C', [ptup1[1], ptup2[1]] , -1j*w*ptup1[0]*ptup2[0])) + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), 1j*w*ptup1[0]*ptup2[0])) + else: + if ptup[1] != identity: + errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) + if ptup[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + if ptup[1] != identity: + errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) - ptup2 = com(errorgen_1_bel_0, ptup1[1]) - errorGens.append( _LSE( 'A', [errorgen_1_bel_0, ptup2[1]] ,-.5*w*ptup1[0]*ptup2[0])) - + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_0, ptup1[1]) + if ptup2 is not None: + #it shouldn't be possible for errorgen_1_bel_0 to be equal to ptup2, + #since that would imply + #com(errorgen_1_bel_0,com(errorgen_2_bel_0, errorgen_2_bel_1)) == errorgen_1_bel_0 + #Which I don't think is possible when these come from valid error genator indices. + #errorgen_1_bel_0 can't be the identity, + #And com(errorgen_1_bel_0,com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be by the same + #argument that it can't be errorgen_1_bel_0 + errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -1j*.5*w*ptup1[0]*ptup2[0])) + elif errorgen_1_type == 'A' and errorgen_1_type == 'S': errorGens = commute_error_generators(errorgen_2,errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'C' and errorgen_2_type == 'C': ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) - ptup2 =product(errorgen_2_bel_1, errorgen_1_bel_1) - errorGens.append( _LSE( 'A', [ptup1[1], ptup2[1]] , -1j*w*ptup1[0]*ptup2[0] )) + ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) - ptup2 =product(errorgen_2_bel_0, errorgen_1_bel_1) - errorGens.append( _LSE( 'A', [ptup1[1] , ptup2[1]] , -1j*w*ptup1[0]*ptup2[0] )) + ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + ptup1 = product(errorgen_1_bel_1,errorgen_2_bel_0) - ptup2 =product(errorgen_2_bel_1,errorgen_1_bel_0) - errorGens.append( _LSE( 'A', [ptup1[1] , ptup2[1]] , -1j*w*ptup1[0]*ptup2[0] )) + ptup2 = product(errorgen_2_bel_1,errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_1) - ptup2 =product(errorgen_2_bel_0, errorgen_1_bel_0) - errorGens.append( _LSE( 'A' , [ptup1[1] , ptup2[1]] , -1j*w*ptup1[0]*ptup2[0])) - ptup1=acom(errorgen_1_bel_0, errorgen_1_bel_1) - ptup2=com(errorgen_2_bel_0, ptup1[1]) - errorGens.append( _LSE( 'A' , [ptup2[1] , errorgen_2_bel_1 ], -.5*1j*w*ptup1[0]*ptup2[0])) - ptup1=acom(errorgen_1_bel_0,errorgen_1_bel_1) - ptup2=com(errorgen_2_bel_1,ptup1[1]) - errorGens.append( _LSE( 'A' , [ptup2[1], errorgen_2_bel_0] , -.5*1j*w*ptup1[0]*ptup2[0])) - ptup1=acom(errorgen_2_bel_0,errorgen_2_bel_1) - ptup2=com(ptup1[1],errorgen_1_bel_0) - errorGens.append( _LSE( 'A' , [ptup2[1] , errorgen_1_bel_1] , -.5*1j*w*ptup1[0]*ptup2[0])) - ptup1=acom(errorgen_2_bel_0,errorgen_2_bel_1) - ptup2=com(ptup1[1],errorgen_1_bel_1) - errorGens.append( _LSE( 'A' , [ptup2[1] , errorgen_1_bel_0 ] , -.5*1j*w*ptup1[0]*ptup2[0])) - ptup1=acom(errorgen_1_bel_0,errorgen_1_bel_1) - ptup2=acom(errorgen_2_bel_0,errorgen_2_bel_1) - ptup3=com(ptup1[1],ptup2[1]) - errorGens.append( _LSE( 'H', [ptup3[1]] ,.25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) + ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_1: + #errorgen_2_bel_1 can't be the identity, + #And com(errorgen_2_bel_0, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. + errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_0: + #errorgen_2_bel_0 can't be the identity. + #And com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. + errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(ptup1[1], errorgen_1_bel_0) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_1: + #errorgen_1_bel_1 can't be the identity. + #And com(acom(errorgen_2_bel_0, errorgen_2_bel_1), errorgen_2_bel_0) can't be either + errorGens.append((_LSE('A', [ptup2[1] , errorgen_1_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(ptup1[1], errorgen_1_bel_1) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_0: + #errorgen_1_bel_0 can't be the identity. + #And com(acom(errorgen_2_bel_0, errorgen_2_bel_1), errorgen_2_bel_1) can't be either + errorGens.append((_LSE('A', [ptup2[1] , errorgen_1_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = acom(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup2 is not None: + ptup3 = com(ptup1[1], ptup2[1]) + if ptup3 is not None: + #It shouldn't be possible for ptup3 to be the identity given valid error generator indices. + errorGens.append((_LSE('H', [ptup3[1]]), .25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) elif errorgen_1_type == 'C' and errorgen_2_type == 'A': - ptup1 = product(errorgen_1_bel_0,errorgen_2_bel_0) - ptup2 =product(errorgen_2_bel_1,errorgen_1_bel_1) - errorGens.append( _LSE('C' , [ptup1[1],ptup2[1]] , 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_0,errorgen_2_bel_1) - ptup2 =product(errorgen_2_bel_0,errorgen_1_bel_1) - errorGens.append( _LSE('C' ,[ptup1[1],ptup2[1]] , -1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_1,errorgen_2_bel_0) - ptup2 =product(errorgen_2_bel_1,errorgen_1_bel_0) - errorGens.append( _LSE('C' , [ptup1[1],ptup2[1]] , 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_2_bel_0,errorgen_1_bel_0) - ptup2 =product(errorgen_1_bel_1,errorgen_2_bel_1) - errorGens.append( _LSE('C' ,[ptup1[1],ptup2[1]] , -1j*w*ptup1[0]*ptup2[0])) - ptup1 = com(errorgen_2_bel_0,errorgen_2_bel_1) - ptup2 =com(errorgen_1_bel_0,ptup1[1]) - errorGens.append( _LSE('A' , [ptup2[1] , errorgen_1_bel_1] , .5*w*ptup1[0]*ptup2[0] )) - ptup1 = com(errorgen_2_bel_0,errorgen_2_bel_1) - ptup2 =com(errorgen_1_bel_1,ptup1[1]) - errorGens.append( _LSE('A' , [ptup2[1], errorgen_1_bel_0 ], .5*w*ptup1[0]*ptup2[0] )) - ptup1 = acom(errorgen_1_bel_0,errorgen_1_bel_1) - ptup2 =com(errorgen_2_bel_0,ptup1[1]) - errorGens.append( _LSE('C', [ptup2[1] , errorgen_2_bel_1 ], .5*1j*w*ptup1[0]*ptup2[0] )) + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if ptup1[1] != identity and ptup2[1] != identity: + errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), 1j*w*ptup1[0]*ptup2[0])) + else: #ptup[1] == ptup[2] + if ptup1[1] != identity: + errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if ptup1[1] != identity and ptup2[1] != identity: + errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup[1] == ptup[2] + if ptup1[1] != identity: + errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + + ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_0) + ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if ptup1[1] != identity and ptup2[1] != identity: + errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), 1j*w*ptup1[0]*ptup2[0])) + else: #ptup[1] == ptup[2] + if ptup1[1] != identity: + errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup2 = product(errorgen_1_bel_1, errorgen_2_bel_1) + if ptup1[1] != ptup2[1]: + if ptup1[1] != identity and ptup2[1] != identity: + errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup[1] == ptup[2] + if ptup1[1] != identity: + errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_1: + #errorgen_1_bel_1 can't be the identity. + #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), .5*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_0: + #errorgen_1_bel_0 can't be the identity. + #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_1: + #errorgen_2_bel_1 can't be the identity. + #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either + errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_1]), .5*1j*w*ptup1[0]*ptup2[0])) + ptup1 = acom(errorgen_1_bel_0,errorgen_1_bel_1) - ptup2 =com(errorgen_2_bel_1,ptup1[1]) - errorGens.append( _LSE('C',[ptup2[1],errorgen_2_bel_0 ],-.5*1j*w*ptup1[0]*ptup2[0] )) - ptup1 = com(errorgen_2_bel_0,errorgen_2_bel_1) - ptup2 =acom(errorgen_1_bel_0,errorgen_1_bel_1) - ptup3=com(ptup1[1],ptup2[1]) - errorGens.append( _LSE('H',[ptup3[1]],-.25*w*ptup1[0]*ptup2[0]*ptup3[0])) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_0: + #errorgen_2_bel_0 can't be the identity. + #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either + errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup2 is not None: + ptup3= com(ptup1[1], ptup2[1]) + if ptup3 is not None: + #it shouldn't be possible for ptup3 to be identity given valid error generator + #indices. + errorGens.append((_LSE('H', [ptup3[1]]), -.25*w*ptup1[0]*ptup2[0]*ptup3[0])) elif errorgen_1_type == 'A' and errorgen_2_type == 'C': errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'A' and errorgen_2_type == 'A': - ptup1=product(errorgen_2_bel_1,errorgen_1_bel_1) - ptup2=product(errorgen_1_bel_0,errorgen_2_bel_0) - errorGens.append(_LSE('A',[ptup1[1],ptup2[1]] ,-1j*w*ptup1[0]*ptup2[0])) - ptup1=product(errorgen_2_bel_0,errorgen_1_bel_0) - ptup2=product(errorgen_1_bel_1,errorgen_2_bel_1) - errorGens.append(_LSE('A',[ptup1[1],ptup2[1]],-1j*w*ptup1[0]*ptup2[0])) - ptup1=product(errorgen_1_bel_1,errorgen_2_bel_0) - ptup2=product(errorgen_2_bel_1,errorgen_1_bel_0) - errorGens.append(_LSE('A',[ptup1[1],ptup2[1]],-1j*w*ptup1[0]*ptup2[0])) - ptup1=product(errorgen_1_bel_0,errorgen_2_bel_1) - ptup2=product(errorgen_2_bel_0,errorgen_1_bel_1) - errorGens.append(_LSE('A',[ptup1[1],ptup2[1]],-1j*w*ptup1[0]*ptup2[0])) - ptup1=com(errorgen_2_bel_0,errorgen_2_bel_1) - ptup2=com(errorgen_1_bel_1,ptup1[1]) - errorGens.append(_LSE('C',[ptup2[1],errorgen_1_bel_0],.5*w*ptup1[0]*ptup2[0])) - ptup1=com(errorgen_2_bel_0,errorgen_2_bel_1) - ptup2=com(errorgen_1_bel_0,ptup1[1]) - errorGens.append(_LSE('C',[ptup2[1],errorgen_1_bel_1] ,-.5*w*ptup1[0]*ptup2[0])) - ptup1=com(errorgen_1_bel_0,errorgen_1_bel_1) - ptup2=com(errorgen_2_bel_0,ptup1[1]) - errorGens.append(_LSE('C', [ptup2[1],errorgen_2_bel_1] ,.5*w*ptup1[0]*ptup2[0])) - ptup1=com(errorgen_1_bel_0,errorgen_1_bel_1) - ptup2=com(errorgen_2_bel_1,ptup1[1]) - errorGens.append(_LSE('C', [ptup2[1],errorgen_2_bel_0] ,-.5*w*ptup1[0]*ptup2[0])) - ptup1=com(errorgen_2_bel_0,errorgen_2_bel_1) - ptup2=com(errorgen_1_bel_0,errorgen_1_bel_1) - ptup3=com(ptup1[1],ptup2[1]) - errorGens.append( _LSE('H',[ptup3[1]] ,.25*w*ptup1[0]*ptup2[0]*ptup3[0])) + ptup1 = product(errorgen_2_bel_1, errorgen_1_bel_1) + ptup2 = product(errorgen_1_bel_0, errorgen_2_bel_0) + + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + + ptup1 = product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup2 = product(errorgen_1_bel_1, errorgen_2_bel_1) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + + ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_0) + ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + + ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_0: + #errorgen_1_bel_0 can't be the identity. + #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + errorGens.append((_LSE('C', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_1: + #errorgen_1_bel_1 can't be the identity. + #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + errorGens.append((_LSE('C', [ptup2[1], errorgen_1_bel_1]), -.5*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_1: + #errorgen_2_bel_1 can't be the identity. + #com(errorgen_2_bel_0, com(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. + errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_1]), .5*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_0: + #errorgen_2_bel_0 can't be the identity. + #com(errorgen_2_bel_1, com(errorgen_1_bel_0,errorgen_1_bel_1)) can't be either. + errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_0]), -.5*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup2 is not None: + ptup3 = com(ptup1[1], ptup2[1]) + if ptup3 is not None: + #it shouldn't be possible for ptup3 to be identity given valid error generator + #indices. + errorGens.append((_LSE('H', [ptup3[1]]), .25*w*ptup1[0]*ptup2[0]*ptup3[0])) return errorGens From 3d0764f470bd5877e802f2fe6c1d19482c147134 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 28 Oct 2024 19:28:18 -0600 Subject: [PATCH 017/102] Generalize CompleteElementaryErrorgenBasis Generalize the implementation of CompleteElementaryErrorgenBasis to add support for local error generator label types. This enables constructing bases with element matrices that are given on the full space, rather than restricted to the nontrivial subspace, which is useful for some applications. Also add a new casting method for LocalStimErrorgenLabel to allow conversion from other label types/label-like objects. --- pygsti/baseobjs/errorgenbasis.py | 73 +++++++++++++++---- pygsti/baseobjs/errorgenlabel.py | 20 +++++ .../errorgenpropagation/localstimerrorgen.py | 69 ++++++++++++++++++ pygsti/tools/errgenproptools.py | 2 +- 4 files changed, 150 insertions(+), 14 deletions(-) diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 3c29016f4..086e6d4b4 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -283,7 +283,6 @@ def difference(self, other_basis): difference_state_space = self.state_space return ExplicitElementaryErrorgenBasis(difference_state_space, difference_labels, self._basis_1q) - class CompleteElementaryErrorgenBasis(ElementaryErrorgenBasis): """ This basis object contains the information necessary for building, @@ -464,11 +463,11 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, return (offsets, total_support) if return_total_support else offsets def __init__(self, basis_1q, state_space, elementary_errorgen_types=('H', 'S', 'C', 'A'), - max_weights=None, sslbl_overlap=None): + max_weights=None, sslbl_overlap=None, default_label_type='global'): """ Parameters ---------- - basis_1q : `Basis` or str, optional (default None) + basis_1q : `Basis` or str A `Basis` object, or str which can be cast to one corresponding to the single-qubit basis elements which comprise the basis element labels for the values of the @@ -493,6 +492,14 @@ def __init__(self, basis_1q, state_space, elementary_errorgen_types=('H', 'S', ' A list of state space labels corresponding to qudits the support of an error generator must overlap with (i.e. the support must include at least one of these qudits) in order to be included in this basis. + + default_label_type : str, optional (default 'global') + String specifying the type of error generator label to use by default. + i.e. the type of label returned by `labels`. This also impacts the + construction of the error generator matrices. + Supported options are 'global' or 'local', which correspond to + `GlobalElementaryErrorgenLabel` and `LocalElementaryErrorgenLabel`, + respectively. """ if isinstance(basis_1q, _Basis): @@ -506,6 +513,7 @@ def __init__(self, basis_1q, state_space, elementary_errorgen_types=('H', 'S', ' self.state_space = state_space self.max_weights = max_weights if max_weights is not None else dict() self._sslbl_overlap = sslbl_overlap + self._default_lbl_typ = default_label_type assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" assert(all([eetyp in ('H', 'S', 'C', 'A') for eetyp in elementary_errorgen_types])), \ @@ -536,7 +544,8 @@ def __init__(self, basis_1q, state_space, elementary_errorgen_types=('H', 'S', ' # this should never happen - somehow the statespace doesn't have all the labels! assert(False), "Logic error! State space doesn't contain all of the present labels!!" - self._cached_labels = None + self._cached_global_labels = None + self._cached_local_labels = None self._cached_matrices = None self._cached_dual_matrices = None self._cached_supports = None @@ -578,14 +587,44 @@ def to_explicit_basis(self): #TODO: Why can't this be done at initialization time? @property def labels(self): - if self._cached_labels is None: + """ + Tuple of either `GlobalElementaryErrorgenLabel` or `LocalElementaryErrorgenLabel` objects + for this basis, with which one determined by the `default_label_type` specified on basis + construction. + + For specific label types see the `global_labels` and `local_labels` methods. + """ + + if self._default_lbl_typ == 'global': + return self.global_labels() + else: + return self.local_labels() + + def global_labels(self): + """ + Return a list of labels for this basis as `GlobalElementaryErrorgenLabel` + objects. + """ + if self._cached_global_labels is None: labels = [] for eetyp in self._elementary_errorgen_types: labels.extend(self._create_ordered_labels(eetyp, self._basis_1q, self.state_space, self.max_weights.get(eetyp, None), self._sslbl_overlap)) - self._cached_labels = tuple(labels) - return self._cached_labels + + self._cached_global_labels = tuple(labels) + return self._cached_global_labels + + def local_labels(self): + """ + Return a list of labels for this basis as `LocalElementaryErrorgenLabel` + objects. + """ + if self._cached_local_labels is None: + if self._cached_global_labels is None: + self._cached_global_labels = self.global_labels() + self._cached_local_labels = tuple([_LocalElementaryErrorgenLabel.cast(lbl) for lbl in self._cached_global_labels]) + return self._cached_local_labels def sublabels(self, errorgen_type): """ @@ -599,22 +638,26 @@ def sublabels(self, errorgen_type): Returns ------- - tuple of `GlobalElementaryErrorgenLabel` + tuple of either `GlobalElementaryErrorgenLabels` or `LocalElementaryErrorgenLabels` """ - - return self._create_ordered_labels(errorgen_type, self._basis_1q, self.state_space, + #TODO: It should be possible to do this much faster than regenerating these from scratch. + #Perhaps by caching the error generators by type at construction time. + labels = self._create_ordered_labels(errorgen_type, self._basis_1q, self.state_space, self.max_weights.get(errorgen_type, None), self._sslbl_overlap) + if self._default_lbl_typ == 'local': + labels = tuple([_LocalElementaryErrorgenLabel.cast(lbl) for lbl in labels]) + return labels @property - def elemgen_supports(self): + def elemgen_supports(self, identity_label='I'): """ Returns a tuple of tuples, each corresponding to the support of the elementary error generators in this basis, returned in the same order as they appear in `labels`. """ if self._cached_supports is None: - self._cached_supports = tuple([elemgen_label.sslbls for elemgen_label in self.labels]) + self._cached_supports = tuple([elemgen_label.sslbls for elemgen_label in self.global_labels()]) return self._cached_supports @property @@ -676,6 +719,11 @@ def label_index(self, label, ok_if_missing=False): ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. """ + #CIO: I don't entirely understand the intention behind this method, so rather than trying to make it work + #using `LocalElementaryErrorgenLabel` I'll just assert it is a global one for now... + if isinstance(label, _LocalElementaryErrorgenLabel): + raise NotImplementedError('This method is not currently implemented for `LocalElementaryErrorgenLabel` inputs.') + support = label.sslbls eetype = label.errorgen_type bels = label.basis_element_labels @@ -706,7 +754,6 @@ def label_index(self, label, ok_if_missing=False): raise ValueError("Invalid elementary errorgen type: %s" % str(eetype)) return base + indices[label] - def create_subbasis(self, sslbl_overlap, retain_max_weights=True): """ diff --git a/pygsti/baseobjs/errorgenlabel.py b/pygsti/baseobjs/errorgenlabel.py index 19d0cff49..a900be7da 100644 --- a/pygsti/baseobjs/errorgenlabel.py +++ b/pygsti/baseobjs/errorgenlabel.py @@ -29,6 +29,26 @@ class LocalElementaryErrorgenLabel(ElementaryErrorgenLabel): """ @classmethod def cast(cls, obj, sslbls=None, identity_label='I'): + """ + Method for casting an object to an instance of LocalElementaryErrorgenLabel + + Parameters + ---------- + obj : `LocalElementaryErrorgenLabel`, `GlobalElementaryErrorgenLabel`, tuple or list + Object to cast. + + sslbls : tuple or list, optional (default None) + A complete set of state space labels. Used when casting from a GlobalElementaryErrorgenLabel + or from a tuple of length 3 (wherein the final element is interpreted as the set of ssblbs the error + generator acts upon). + + identity_label : str, optional (default 'I') + An optional string specifying the label used to denote the identity in basis element labels. + + Returns + ------- + LocalElementaryErrorgenLabel + """ if isinstance(obj, LocalElementaryErrorgenLabel): return obj elif isinstance(obj, GlobalElementaryErrorgenLabel): diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index b8f585906..bd191a004 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -27,6 +27,75 @@ class LocalStimErrorgenLabel(_ElementaryErrorgenLabel): Null """ + @classmethod + def cast(cls, obj, sslbls=None): + """ + Method for casting objects to instances of LocalStimErrorgenLabel. + + Parameters + ---------- + obj : `LocalStimErrorgenLabel`, ``LocalElementaryErrorgenLabel`, `GlobalElementaryErrorgenLabel`, tuple or list + + sslbls : tuple or list, optional (default None) + A complete set of state space labels. Used when casting from a GlobalElementaryErrorgenLabel + or from a tuple of length 3 (wherein the final element is interpreted as the set of ssblbs the error + generator acts upon). + + Returns + ------- + `LocalStimErrorgenLabel` + """ + if isinstance(obj, LocalStimErrorgenLabel): + return obj + + if isinstance(obj, _GEEL): + #convert to a tuple representation + assert sslbls is not None, 'Must specify sslbls when casting from `GlobalElementaryErrorgenLabel`.' + obj = (obj.errorgen_type, obj.basis_element_labels, obj.sslbls) + + if isinstance(obj, _LEEL): + #convert to a tuple representation + obj = (obj.errorgen_type, obj.basis_element_labels) + + if isinstance(obj, (tuple, list)): + #In this case assert that the first element of the tuple is a string corresponding to the + #error generator type. + errorgen_type = obj[0] + + #two elements for a local label and three for a global one + #second element should have the basis element labels + assert len(obj)==2 or len(obj)==3 and isinstance(obj[1], (tuple, list)) + + #if a global label tuple the third element should be a tuple or list. + if len(obj)==3: + assert isinstance(obj[2], (tuple, list)) + assert sslbls is not None, 'Must specify sslbls when casting from a tuple or list of length 3. See docstring.' + #convert to local-style bels. + indices_to_replace = [sslbls.index(sslbl) for sslbl in obj[2]] + local_bels = [] + for global_lbl in obj[1]: + #start by figure out which initialization to use, either stim + #or a string. + local_bel = stim.PauliString('I'*len(sslbls)) + for kk, k in enumerate(indices_to_replace): + local_bel[k] = global_lbl[kk] + local_bels.append(local_bel) + else: + local_bels = obj[1] + + #now build the LocalStimErrorgenLabel + stim_bels = [] + for bel in local_bels: + if isinstance(bel, str): + stim_bels.append(stim.PauliString(bel)) + elif isinstance(bel, stim.PauliString): + stim_bels.append(bel) + else: + raise ValueError('Only str and `stim.PauliString` basis element labels are supported presently.') + + return cls(errorgen_type, stim_bels) + + def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initial_label=None, label=None): """ diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 4cf0fbf52..983e5a89f 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -148,7 +148,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): return new_errorgen_layer -def commute_error_generators(errorgen_1, errorgen_2, flip_weight=False, weight=1.0): +def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight=1.0): """ Returns the commutator of two error generators. I.e. [errorgen_1, errorgen_2]. From 27a47e94abe2dc7d6edc05bfd4e0d45526273aa8 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 4 Nov 2024 22:30:33 -0700 Subject: [PATCH 018/102] Address basis element label ordering First pass at trying to resolve the fact that we typically only store have the C and A generators due to symmetry, meaning there is a preferred basis label ordering. --- pygsti/baseobjs/errorgenbasis.py | 6 +- pygsti/tools/errgenproptools.py | 207 ++++++++++++++++++++++--------- 2 files changed, 154 insertions(+), 59 deletions(-) diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 086e6d4b4..99ec98a64 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -623,7 +623,7 @@ def local_labels(self): if self._cached_local_labels is None: if self._cached_global_labels is None: self._cached_global_labels = self.global_labels() - self._cached_local_labels = tuple([_LocalElementaryErrorgenLabel.cast(lbl) for lbl in self._cached_global_labels]) + self._cached_local_labels = tuple([_LocalElementaryErrorgenLabel.cast(lbl, sslbls=self.sslbls) for lbl in self._cached_global_labels]) return self._cached_local_labels def sublabels(self, errorgen_type): @@ -646,11 +646,11 @@ def sublabels(self, errorgen_type): self.max_weights.get(errorgen_type, None), self._sslbl_overlap) if self._default_lbl_typ == 'local': - labels = tuple([_LocalElementaryErrorgenLabel.cast(lbl) for lbl in labels]) + labels = tuple([_LocalElementaryErrorgenLabel.cast(lbl, sslbls=self.sslbls) for lbl in labels]) return labels @property - def elemgen_supports(self, identity_label='I'): + def elemgen_supports(self): """ Returns a tuple of tuples, each corresponding to the support of the elementary error generators in this basis, returned in diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 983e5a89f..dbf655ea4 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -126,7 +126,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): for error1 in current_errgen_dict_1.keys(): for error2 in current_errgen_dict_2.keys(): #get the list of error generator labels - commuted_errgen_list = commute_error_generators(error1, error2, + commuted_errgen_list = error_generator_commutator(error1, error2, weight=1/2*current_errgen_dict_1[error1]*current_errgen_dict_1[error2]) print(commuted_errgen_list) #Add all of these error generators to the working dictionary of updated error generators and weights. @@ -206,16 +206,12 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if errorgen_2_bel_0 == ptup[1]: errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup[0])) else: - errorGens.append(( _LSE('C', [errorgen_2_bel_0, ptup[1]]), 1j*w*ptup[0])) - - elif errorgen_1_type=='S' and errorgen_2_type=='H': - ptup = com(errorgen_2_bel_0 , errorgen_1_bel_0) - if ptup is not None: - if errorgen_2_bel_0 == ptup[1]: - errorGens.append(( _LSE('S', [errorgen_2_bel_0]), -1j*w*ptup[0])) - else: - errorGens.append(( _LSE('C', [errorgen_2_bel_0, ptup[1]]), -1j*w*ptup[0])) + new_bels = [errorgen_2_bel_0, ptup[1]] if stim_pauli_string_less_than(errorgen_2_bel_0, ptup[1])\ + else [ptup[1], errorgen_2_bel_0] + errorGens.append(( _LSE('C', new_bels), 1j*w*ptup[0])) + elif errorgen_1_type=='S' and errorgen_2_type=='H': + errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type=='H' and errorgen_2_type=='C': ptup1 = com(errorgen_2_bel_0 , errorgen_1_bel_0) @@ -224,28 +220,38 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup1[1] == errorgen_2_bel_1: errorGens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0])) else: - errorGens.append((_LSE('C', [ptup1[1], errorgen_2_bel_1]), 1j*w*ptup1[0])) + new_bels = [ptup1[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup1[1], errorgen_2_bel_1)\ + else [errorgen_2_bel_1, ptup1[1]] + errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0])) if ptup2 is not None: if ptup2[1] == errorgen_2_bel_0: errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup2[0])) else: - errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_0]), 1j*w*ptup2[0])) + new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0)\ + else [errorgen_2_bel_0, ptup2[1]] + errorGens.append((_LSE('C', new_bels), 1j*w*ptup2[0])) elif errorgen_1_type=='C' and errorgen_2_type=='H': - errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type=='H' and errorgen_2_type=='A': ptup1 = com(errorgen_1_bel_0 , errorgen_2_bel_0) ptup2 = com(errorgen_1_bel_0 , errorgen_2_bel_1) if ptup1 is not None: if ptup1[1] != errorgen_2_bel_1: - errorGens.append((_LSE('A', [ptup1[1], errorgen_2_bel_1]), -1j*w*ptup1[0])) + if stim_pauli_string_less_than(ptup1[1], errorgen_2_bel_1): + errorGens.append((_LSE('A', [ptup1[1], errorgen_2_bel_1]), -1j*w*ptup1[0])) + else: + errorGens.append((_LSE('A', [errorgen_2_bel_1, ptup1[1]]), 1j*w*ptup1[0])) if ptup2 is not None: if ptup2[1] != errorgen_2_bel_0: - errorGens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), -1j*w*ptup2[0])) + if stim_pauli_string_less_than(errorgen_2_bel_0, ptup2[1]): + errorGens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), -1j*w*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), 1j*w*ptup2[0])) elif errorgen_1_type=='A' and errorgen_2_type=='H': - errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type=='S' and errorgen_2_type=='S': #Commutator of S with S is zero. @@ -256,21 +262,27 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_1 , errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append(( _LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append(( _LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append(( _LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0]) ) + errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -280,7 +292,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] == identity: errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]) , 1j*.5*w*ptup1[0]*ptup2[0])) #ptup3 is just the product from ptup2 in reverse, so this can be done #more efficiently, but I'm not going to do that at present... @@ -288,17 +303,21 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup3[1] == identity: errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup3[0]) ) else: - errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup3[1]]) , -1j*.5*w*ptup1[0]*ptup3[0])) + if stim_pauli_string_less_than(errorgen_1_bel_0, ptup3[1]): + errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup3[1]]) , -1j*.5*w*ptup1[0]*ptup3[0])) + else: + errorGens.append((_LSE('A', [ptup3[1], errorgen_1_bel_0]) , 1j*.5*w*ptup1[0]*ptup3[0])) elif errorgen_1_type == 'C' and errorgen_2_type == 'S': - errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'S' and errorgen_2_type == 'A': ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), 1j*w*ptup1[0]*ptup2[0])) + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: if ptup[1] != identity: errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) @@ -307,7 +326,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) if ptup[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: if ptup[1] != identity: errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) @@ -322,19 +342,25 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #Which I don't think is possible when these come from valid error genator indices. #errorgen_1_bel_0 can't be the identity, #And com(errorgen_1_bel_0,com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be by the same - #argument that it can't be errorgen_1_bel_0 - errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -1j*.5*w*ptup1[0]*ptup2[0])) + #argument that it can't be errorgen_1_bel_0 + if stim_pauli_string_less_than(errorgen_1_bel_0, ptup2[1]): + errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -1j*.5*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), 1j*.5*w*ptup1[0]*ptup2[0])) elif errorgen_1_type == 'A' and errorgen_1_type == 'S': - errorGens = commute_error_generators(errorgen_2,errorgen_1, flip_weight=True, weight=weight) + errorGens = error_generator_commutator(errorgen_2,errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'C' and errorgen_2_type == 'C': ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) - elif ptup1[1] == identity: + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: #Are there any reordering concerns here? errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) @@ -343,7 +369,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity @@ -353,7 +382,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_1,errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity @@ -363,7 +395,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity @@ -376,7 +411,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_2_bel_1: #errorgen_2_bel_1 can't be the identity, #And com(errorgen_2_bel_0, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. - errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1): + errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [errorgen_2_bel_1, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -385,7 +423,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_2_bel_0: #errorgen_2_bel_0 can't be the identity. #And com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. - errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0): + errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -394,7 +435,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_1_bel_1: #errorgen_1_bel_1 can't be the identity. #And com(acom(errorgen_2_bel_0, errorgen_2_bel_1), errorgen_2_bel_0) can't be either - errorGens.append((_LSE('A', [ptup2[1] , errorgen_1_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1): + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [errorgen_1_bel_1, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -403,7 +447,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_1_bel_0: #errorgen_1_bel_0 can't be the identity. #And com(acom(errorgen_2_bel_0, errorgen_2_bel_1), errorgen_2_bel_1) can't be either - errorGens.append((_LSE('A', [ptup2[1] , errorgen_1_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -419,7 +466,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if ptup1[1] != identity and ptup2[1] != identity: - errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), 1j*w*ptup1[0]*ptup2[0])) + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) @@ -428,7 +476,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if ptup1[1] != identity and ptup2[1] != identity: - errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) @@ -437,7 +486,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if ptup1[1] != identity and ptup2[1] != identity: - errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), 1j*w*ptup1[0]*ptup2[0])) + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) @@ -446,7 +496,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_1_bel_1, errorgen_2_bel_1) if ptup1[1] != ptup2[1]: if ptup1[1] != identity and ptup2[1] != identity: - errorGens.append((_LSE('C', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) @@ -459,7 +510,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_1_bel_1: #errorgen_1_bel_1 can't be the identity. #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), .5*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1): + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), .5*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [errorgen_1_bel_1, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -468,7 +522,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_1_bel_0: #errorgen_1_bel_0 can't be the identity. #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -477,7 +534,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_2_bel_1: #errorgen_2_bel_1 can't be the identity. #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either - errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_1]), .5*1j*w*ptup1[0]*ptup2[0])) + new_bels = [ptup2[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1) else [errorgen_2_bel_1, ptup2[1]] + errorGens.append((_LSE('C', new_bels), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0,errorgen_1_bel_1) if ptup1 is not None: @@ -486,7 +544,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_2_bel_0: #errorgen_2_bel_0 can't be the identity. #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either - errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup2[1]] + errorGens.append((_LSE('C', new_bels), -.5*1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -499,7 +558,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight errorGens.append((_LSE('H', [ptup3[1]]), -.25*w*ptup1[0]*ptup2[0]*ptup3[0])) elif errorgen_1_type == 'A' and errorgen_2_type == 'C': - errorGens = commute_error_generators(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'A' and errorgen_2_type == 'A': ptup1 = product(errorgen_2_bel_1, errorgen_1_bel_1) @@ -507,7 +566,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity @@ -517,7 +579,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_1_bel_1, errorgen_2_bel_1) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity @@ -527,7 +592,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity @@ -537,7 +605,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity @@ -550,6 +621,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_1_bel_0: #errorgen_1_bel_0 can't be the identity. #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + new_bels = [ptup2[1], errorgen_1_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0) else [errorgen_1_bel_0, ptup2[1]] errorGens.append((_LSE('C', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) @@ -559,7 +631,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_1_bel_1: #errorgen_1_bel_1 can't be the identity. #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. - errorGens.append((_LSE('C', [ptup2[1], errorgen_1_bel_1]), -.5*w*ptup1[0]*ptup2[0])) + new_bels = [ptup2[1], errorgen_1_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1) else [errorgen_1_bel_1, ptup2[1]] + errorGens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -568,7 +641,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_2_bel_1: #errorgen_2_bel_1 can't be the identity. #com(errorgen_2_bel_0, com(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. - errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_1]), .5*w*ptup1[0]*ptup2[0])) + new_bels = [ptup2[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1) else [errorgen_2_bel_1, ptup2[1]] + errorGens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -577,7 +651,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup2[1] != errorgen_2_bel_0: #errorgen_2_bel_0 can't be the identity. #com(errorgen_2_bel_1, com(errorgen_1_bel_0,errorgen_1_bel_1)) can't be either. - errorGens.append((_LSE('C', [ptup2[1], errorgen_2_bel_0]), -.5*w*ptup1[0]*ptup2[0])) + new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup2[1]] + errorGens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -617,4 +692,24 @@ def product(P1, P2): P3 = P1*P2 return (P3.sign, P3 / P3.sign) #return (sign(P3), - # unsigned P3) \ No newline at end of file + # unsigned P3) + +def stim_pauli_string_less_than(pauli1, pauli2): + """ + Returns true if pauli1 is less than pauli lexicographically. + + Parameters + ---------- + pauli1, pauli2 : stim.PauliString + Paulis to compare. + """ + + #remove the signs. + unsigned_pauli1 = pauli1/pauli1.sign + unsigned_pauli2 = pauli2/pauli2.sign + + unsigned_pauli1_str = str(unsigned_pauli1)[1:].replace('_', 'I') + unsigned_pauli2_str = str(unsigned_pauli2)[1:].replace('_', 'I') + + return unsigned_pauli1_str < unsigned_pauli2_str + From d5f6a82397b2e0cab884b8d5d9e09d66c043a695 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 8 Nov 2024 21:54:41 -0700 Subject: [PATCH 019/102] Bunch of fixes for error generator commutators This fixes a bunch of issues with the error generator commutators. Particularly tracking down some missing factors of 2 and -1 and i. --- pygsti/tools/errgenproptools.py | 57 ++++++++++++++++----------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index dbf655ea4..c3fd95caf 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -204,7 +204,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup = com(errorgen_2_bel_0 , errorgen_1_bel_0) if ptup is not None: if errorgen_2_bel_0 == ptup[1]: - errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup[0])) + errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 2*1j*w*ptup[0])) else: new_bels = [errorgen_2_bel_0, ptup[1]] if stim_pauli_string_less_than(errorgen_2_bel_0, ptup[1])\ else [ptup[1], errorgen_2_bel_0] @@ -218,14 +218,14 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup2 = com(errorgen_2_bel_1 , errorgen_1_bel_0) if ptup1 is not None: if ptup1[1] == errorgen_2_bel_1: - errorGens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0])) + errorGens.append((_LSE('S', [errorgen_2_bel_1]), 2*1j*w*ptup1[0])) else: new_bels = [ptup1[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup1[1], errorgen_2_bel_1)\ else [errorgen_2_bel_1, ptup1[1]] errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0])) if ptup2 is not None: if ptup2[1] == errorgen_2_bel_0: - errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 1j*w*ptup2[0])) + errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 2*1j*w*ptup2[0])) else: new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0)\ else [errorgen_2_bel_0, ptup2[1]] @@ -269,7 +269,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) @@ -282,7 +282,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -301,7 +301,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #more efficiently, but I'm not going to do that at present... ptup3 = product(errorgen_1_bel_0, ptup1[1]) if ptup3[1] == identity: - errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup3[0]) ) + errorGens.append((_LSE('H', [errorgen_1_bel_0]), 1j*.5*w*ptup1[0]*ptup3[0]) ) else: if stim_pauli_string_less_than(errorgen_1_bel_0, ptup3[1]): errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup3[1]]) , -1j*.5*w*ptup1[0]*ptup3[0])) @@ -320,17 +320,17 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: if ptup[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) - if ptup[1] != ptup2[1]: + if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: if ptup[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -344,12 +344,12 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #And com(errorgen_1_bel_0,com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be by the same #argument that it can't be errorgen_1_bel_0 if stim_pauli_string_less_than(errorgen_1_bel_0, ptup2[1]): - errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -1j*.5*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), 1j*.5*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) - elif errorgen_1_type == 'A' and errorgen_1_type == 'S': - errorGens = error_generator_commutator(errorgen_2,errorgen_1, flip_weight=True, weight=weight) + elif errorgen_1_type == 'A' and errorgen_2_type == 'S': + errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'C' and errorgen_2_type == 'C': ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) @@ -360,10 +360,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) - elif ptup1[1] == identity: #Are there any reordering concerns here? + elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) @@ -376,7 +376,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_1,errorgen_2_bel_0) ptup2 = product(errorgen_2_bel_1,errorgen_1_bel_0) @@ -389,7 +389,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_1) ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) @@ -402,7 +402,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -470,7 +470,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) @@ -480,7 +480,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_0) ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) @@ -490,7 +490,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_2_bel_0, errorgen_1_bel_0) ptup2 = product(errorgen_1_bel_1, errorgen_2_bel_1) @@ -500,8 +500,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) - + errorGens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -573,7 +572,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_2_bel_0, errorgen_1_bel_0) ptup2 = product(errorgen_1_bel_1, errorgen_2_bel_1) @@ -586,7 +585,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_0) ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) @@ -599,7 +598,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) @@ -612,7 +611,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight elif ptup1[1] == identity: errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -622,7 +621,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_1_bel_0 can't be the identity. #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_1_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0) else [errorgen_1_bel_0, ptup2[1]] - errorGens.append((_LSE('C', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + errorGens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -662,7 +661,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup3 is not None: #it shouldn't be possible for ptup3 to be identity given valid error generator #indices. - errorGens.append((_LSE('H', [ptup3[1]]), .25*w*ptup1[0]*ptup2[0]*ptup3[0])) + errorGens.append((_LSE('H', [ptup3[1]]), .25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) return errorGens From ac56635a7f26b0d0773c853d280629f30d7fc8fe Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 8 Nov 2024 23:23:39 -0700 Subject: [PATCH 020/102] Fix more errors in commutator code Fix some errors in the commutator code that meant some of the S terms were not being calculated properly for the C/A and A/A commutators. --- pygsti/tools/errgenproptools.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index c3fd95caf..8fbb5c151 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -535,6 +535,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either new_bels = [ptup2[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1) else [errorgen_2_bel_1, ptup2[1]] errorGens.append((_LSE('C', new_bels), .5*1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_2_bel_1, don't need to check that errorgen_2_bel_1 isn't identity. + errorGens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0]*ptup2[0])) + ptup1 = acom(errorgen_1_bel_0,errorgen_1_bel_1) if ptup1 is not None: @@ -545,6 +548,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup2[1]] errorGens.append((_LSE('C', new_bels), -.5*1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_2_bel_0, don't need to check that errorgen_2_bel_0 isn't identity. + errorGens.append((_LSE('S', [errorgen_2_bel_0]), -1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -622,7 +627,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_1_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0) else [errorgen_1_bel_0, ptup2[1]] errorGens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) - + else: #ptup2[1] == errorgen_1_bel_0 + errorGens.append((_LSE('S', [errorgen_1_bel_0]), w*ptup1[0]*ptup2[0])) + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: ptup2 = com(errorgen_1_bel_0, ptup1[1]) @@ -632,6 +639,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_1_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1) else [errorgen_1_bel_1, ptup2[1]] errorGens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_1_bel_1 + errorGens.append((_LSE('S', [errorgen_1_bel_1]), -1*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -642,7 +651,10 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #com(errorgen_2_bel_0, com(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1) else [errorgen_2_bel_1, ptup2[1]] errorGens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) - + else: #ptup2[1] == errorgen_2_bel_1 + errorGens.append((_LSE('S', [errorgen_2_bel_1]), w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: ptup2 = com(errorgen_2_bel_1, ptup1[1]) @@ -652,6 +664,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #com(errorgen_2_bel_1, com(errorgen_1_bel_0,errorgen_1_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup2[1]] errorGens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_2_bel_0 + errorGens.append((_LSE('S', [errorgen_2_bel_0]), -1*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: From c87500a8ceb68cae81e09d2be698c523d3a4ab43 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 11 Nov 2024 21:24:34 -0700 Subject: [PATCH 021/102] Random CPTP error generators This adds a stab at functionality for random CP constrained error generators. In addition to enforcing the CP constraint, this also allows features like: specifying a target error rate, fixing particular error generator rates (while selecting the rest randomly), setting weights for the H and S sectors, and specifying error generator type/weight/sslbl overlap constraints. --- pygsti/tools/lindbladtools.py | 260 +++++++++++++++++++++++++++++++++- 1 file changed, 259 insertions(+), 1 deletion(-) diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index 9b24a9688..e25f64be3 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -14,7 +14,10 @@ import scipy.sparse as _sps from pygsti.tools.basistools import basis_matrices - +import pygsti.baseobjs as _bo +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel +from pygsti.baseobjs.statespace import QubitSpace as _QubitSpace +import warnings as _warnings def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_factor='auto'): """ @@ -229,3 +232,258 @@ def create_lindbladian_term_errorgen(typ, Lm, Ln=None, sparse=False): # noqa N8 if sparse: lind_errgen = lind_errgen.tocsr() return lind_errgen + + +def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A'), max_weights=None, + H_params=(0.,.01), SCA_params=(0.,.01), error_metric=None, error_metric_value=None, + relative_HS_contribution=None, fixed_errorgen_rates=None, sslbl_overlap=None, seed = None): + """ + Function for generating a random set of CPTP error generator rates. + + Parameters + ---------- + num_qubits : int + Number of qubits the error generator acts upon. + + errorgen_types : tuple of str, optional (default('H', 'S', 'C', 'A')) + Tuple of strings designating elementary error generator types to include in this + basis. Note that due to the CP constraint, certain values are not allowed, + and any tuple containing 'C' or 'A' terms must also include 'S'. + + max_weights : dict, optional (default None) + An optional dictionary specifying the maximum weight + for each of the elementary error generator types, with keys + given by the strings 'H', 'S', 'C' and 'A'. If None then + there is no maximum weight. If specified, any error generator + types without entries will have no maximum weight associated + with them. + + H_params : tuple of floats, optional (default (0.,.01)) + Mean and standard deviation parameters for a normal distribution + from which the H rates will be sampled. Note that specifying a non-zero + value for the mean with generator_infidelity set to a non-trivial value + is not supported, and will raise an error. + + SCA_params : tuple of floats, optional (default (0.,.01)) + Mean and standard deviation parameters for a normal distribution + from which the entries of the matrix used in the construction of the S, C and A rates + will be construction is sampled. Note that specifying a non-zero + value for the mean with generator_infidelity set to a non-trivial value + is not supported, and will raise an error. + + error_metric : str, optional (default None) + An optional string, used in conjunction with the error_metric_value + kwarg which specifies which metric to use in setting the sampled + channel's overall error rate. If None, no target value for the channel's + overall error rate is used. Currently supported options include: + + - 'generator_infidelity' + - 'total_generator_error' + + error_metric_value : float, optional (default None) + An float between 0 and 1 which gives the target value of the + error metric specified in 'error_metric' for the channel induced by + the randomly produced error generator. If None + then no target value is used and the returned error generator + will have a random generator infidelity. + + relative_HS_contribution : tuple, optional (default None) + An optional tuple, used in conjunction with the `generator_infidelity` kwarg, + specifying the relative contributions of the H and S error generators to the + generator infidelity. The values in this tuple should sum to 1. The first entry + corresponds to the H sector, and the second the S sector. + + sslbl_overlap : list of sslbls, optional (default None) + A list of state space labels corresponding to qudits the support of + an error generator must overlap with (i.e. the support must include at least + one of these qudits) in order to be included in this basis. + + fixed_errorgen_rates : dict, optional (default None) + An optional dictionary whose keys are `GlobalElementaryErrorgenLabel` + objects, and whose values are error generator rates. When specified, the + rates in this dictionary will override any randomly selected values in the + final returned error generator rate dictionary. The inclusion of these + rates is performed independently of any of the kwargs which otherwise + control the weight and allowed types of the error generators in this + model. If specifying fixed C and A rates it is possible for the final + error generator to be non-CP. + + seed : int, optional (default None) + An optional integer used in seeding the RNG. + + Returns + ------- + Dictionary of error generator coefficient labels and rates + """ + + #Add various assertions + if fixed_errorgen_rates is None: + fixed_errorgen_rates = dict() + + if error_metric is not None: + assert H_params[0] == 0. and SCA_params[0] == 0., 'Specifying non-zero HSCA means together with a target error metric is not supported.' + if error_metric not in ('generator_infidelity', 'total_generator_error'): + raise ValueError('Unsupported error metric type. Currently supported options are generator_infidelity and total_generator_error') + #Add a check that the desired error metric value is attainable given the values of fixed_errorgen_rates. + if fixed_errorgen_rates: + #verify that all of the keys are GlobalElementaryErrorgenLabel objects. + msg = 'All keys of fixed_errorgen_rates must be GlobalElementaryErrorgenLabel.' + assert all([isinstance(key, _GlobalElementaryErrorgenLabel) for key in fixed_errorgen_rates.keys()]), msg + + #get the H and S rates from the dictionary. + fixed_H_rates = _np.array([val for key, val in fixed_errorgen_rates.items() if key.errorgen_type == 'H']) + fixed_S_rates = _np.array([val for key, val in fixed_errorgen_rates.items() if key.errorgen_type == 'S']) + fixed_S_contribution = _np.sum(fixed_S_rates) + if error_metric == 'generator_infidelity': + fixed_H_contribution = _np.sum(fixed_H_rates**2) + fixed_error_metric_value = fixed_S_contribution + fixed_H_contribution + elif error_metric == 'total_generator_error': + fixed_H_contribution = _np.sum(_np.abs(fixed_H_rates)) + fixed_error_metric_value = fixed_S_contribution + fixed_H_contribution + msg = f'Incompatible values of error_metric_value and fixed_errorgen_rates. The value of {error_metric}={error_metric_value}'\ + + f' is less than the value of {fixed_error_metric_value} corresponding to the given fixed_errorgen_rates_dict.' + assert fixed_error_metric_value < error_metric_value, msg + + if relative_HS_contribution is not None: + msg_H = f'Fixed H contribution to {error_metric} of {fixed_H_contribution} exceeds overall H contribution target value of {relative_HS_contribution[0]*error_metric_value}.' + msg_S = f'Fixed S contribution to {error_metric} of {fixed_S_contribution} exceeds overall S contribution target value of {relative_HS_contribution[1]*error_metric_value}.' + assert fixed_H_contribution < relative_HS_contribution[0]*error_metric_value, msg_H + assert fixed_S_contribution < relative_HS_contribution[1]*error_metric_value, msg_S + else: + fixed_H_contribution = 0 + fixed_S_contribution = 0 + + + if relative_HS_contribution is not None: + assert ('H' in errorgen_types and 'S' in errorgen_types), 'Invalid relative_HS_contribution, one of either H or S is not in errorgen_types.' + if error_metric is None: + _warnings.warn('The relative_HS_contribution kwarg is only utilized when error_metric is not None, the specified value is ignored otherwise.') + else: + assert abs(1-sum(relative_HS_contribution))<=1e-7, 'The relative_HS_contribution should sum to 1.' + + if max_weights is not None: + assert max_weights['C'] <= max_weights['S'] and max_weights['A'] <= max_weights['S'], 'The maximum weight of the C and A terms should be less than or equal to the maximum weight of S.' + + rng = _np.random.default_rng(seed) + + #create a state space with this dimension. + state_space = _QubitSpace.cast(num_qubits) + + #create an error generator basis according the our weight specs + errorgen_basis = _bo.CompleteElementaryErrorgenBasis('pp', state_space, elementary_errorgen_types=errorgen_types, + max_weights=max_weights, sslbl_overlap=sslbl_overlap) + + #Get the labels, broken out by sector, of each of the error generators in this basis. + errgen_labels_H = errorgen_basis.sublabels('H') + errgen_labels_S = errorgen_basis.sublabels('S') + errgen_labels_C = errorgen_basis.sublabels('C') + errgen_labels_A = errorgen_basis.sublabels('A') + + #Get the number of H and S error generators. These are stored in HSCA order in the labels + num_H_rates = len(errgen_labels_H) + num_S_rates = len(errgen_labels_S) + + random_rates_dicts = dict() + #Generate random H rates + random_rates_dicts['H'] = {lbl: val for lbl,val in zip(errgen_labels_H, rng.normal(loc=H_params[0], scale=H_params[1], size = num_H_rates))} + + #Create a random matrix with complex gaussian entries which will be used to generator a PSD matrix for the SCA rates. + random_SCA_gen_mat = rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + \ + 1j* rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + random_SCA_mat = random_SCA_gen_mat @ random_SCA_gen_mat.conj().T + #The random S rates are just the diagonal of random_SCA_mat. + random_rates_dicts['S'] = {lbl: val for lbl,val in zip(errgen_labels_S, _np.real(_np.diag(random_SCA_mat)).copy())} + + #The random C rates are the real part of the off diagonal entries, and the A rates the imaginary part. + random_rates_dicts['C'] = {lbl: val for lbl,val in zip(errgen_labels_C, random_SCA_mat[_np.triu_indices_from(random_SCA_mat, k=1)].real)} + random_rates_dicts['A'] = {lbl: val for lbl,val in zip(errgen_labels_A, random_SCA_mat[_np.triu_indices_from(random_SCA_mat, k=1)].imag)} + + #Add in/override the fixed rates for each of the sectors. + H_fixed_keys = [] + S_fixed_keys = [] + C_fixed_keys = [] + A_fixed_keys = [] + for key in fixed_errorgen_rates: + if key.errorgen_type == 'H': + H_fixed_keys.append(key) + elif key.errorgen_type == 'S': + S_fixed_keys.append(key) + elif key.errorgen_type == 'C': + C_fixed_keys.append(key) + else: + A_fixed_keys.append(key) + + random_rates_dicts['H'].update({key:fixed_errorgen_rates[key] for key in H_fixed_keys}) + random_rates_dicts['S'].update({key:fixed_errorgen_rates[key] for key in S_fixed_keys}) + random_rates_dicts['C'].update({key:fixed_errorgen_rates[key] for key in C_fixed_keys}) + random_rates_dicts['A'].update({key:fixed_errorgen_rates[key] for key in A_fixed_keys}) + + #For each sector construct a complementary structure of the free(ish) parameters error generator parameters for + #that sector. + H_free_keys = [key for key in errgen_labels_H if key not in fixed_errorgen_rates] #membership checking is (often) faster with dicts + S_free_keys = [key for key in errgen_labels_S if key not in fixed_errorgen_rates] + C_free_keys = [key for key in errgen_labels_C if key not in fixed_errorgen_rates] + A_free_keys = [key for key in errgen_labels_A if key not in fixed_errorgen_rates] + + #Now it is time to apply the various normalizations necessary to get the desired target + #generator infidelity and sector weights. + if error_metric is not None: + #Get the free parameter's For both generator infidelity we use the sum of the S rates + current_S_sum_free = _np.sum([random_rates_dicts['S'][key] for key in S_free_keys]) + if error_metric == 'generator_infidelity': + #for generator infidelity we use the sum of the squared H rates. + current_H_sum_free = _np.sum([random_rates_dicts['H'][key]**2 for key in H_free_keys]) + elif error_metric == 'total_generator_error': + #for total generator error we use the sum of the H rates directly. + current_H_sum_free = _np.sum([abs(random_rates_dicts['H'][key]) for key in H_free_keys]) + + total_H_sum = current_H_sum_free + fixed_H_contribution + total_S_sum = current_S_sum_free + fixed_S_contribution + + if relative_HS_contribution is not None: + #calculate the target values of the H and S contributions to the error metric + #given the specified contributions + req_H_sum = relative_HS_contribution[0]*error_metric_value + req_S_sum = relative_HS_contribution[1]*error_metric_value + + #If we haven't specified a relative contribution for H and S then we will scale these + #to give the correct generator infidelity while preserving whatever relative contribution + #to the generator infidelity they were randomly sampled to have. + else: + #Get the current relative contributions. + current_H_contribution = total_H_sum/(total_H_sum+total_S_sum) + current_S_contribution = 1-current_H_contribution + req_H_sum = current_H_contribution*error_metric_value + req_S_sum = current_S_contribution*error_metric_value + + #this is how much we still need to be contributed by the free parameters + needed_H_free = req_H_sum - fixed_H_contribution + needed_S_free = req_S_sum - fixed_S_contribution + + if error_metric == 'generator_infidelity': + #The scale factor for the H rates is sqrt(req_squared_H_sum/current_squared_H_sum) + H_scale_factor = _np.sqrt(needed_H_free/current_H_sum_free) + elif error_metric == 'total_generator_error': + #The scale factor for the S rates is req_S_sum/current_S_sum + H_scale_factor = needed_H_free/current_H_sum_free + #The scale factor for the S rates is req_S_sum/current_S_sum + S_scale_factor = needed_S_free/current_S_sum_free + + #Rescale the free random rates, note that the free SCA terms will all be scaled by the S_scale_factor + #to preserve PSD. + for key in H_free_keys: + random_rates_dicts['H'][key]*=H_scale_factor + for key in S_free_keys: + random_rates_dicts['S'][key]*=S_scale_factor + for key in C_free_keys: + random_rates_dicts['C'][key]*=S_scale_factor + for key in A_free_keys: + random_rates_dicts['A'][key]*=S_scale_factor + + #Now turn this into a rates dict + errorgen_rates_dict = dict() + for errgen_type in errorgen_types: + errorgen_rates_dict.update(random_rates_dicts[errgen_type]) + + return errorgen_rates_dict + From 396c91b9873512a1237ae931cb099b9f3bf3c0b4 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 11 Nov 2024 22:05:16 -0700 Subject: [PATCH 022/102] Higher-order BCH Implementation Add in support for higher-order BCH up to fourth order. --- .../errorpropagator_dev.py | 101 ++++++------- pygsti/tools/errgenproptools.py | 142 +++++++++++++----- 2 files changed, 147 insertions(+), 96 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index cc347e809..2936ac622 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -59,17 +59,19 @@ def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, us """ if use_bch: - raise NotImplementedError('Still under development.') - propagated_error_generators = self.propagate_errorgens_bch(circuit, multi_gate_dict=multi_gate_dict, - *bch_kwargs) + #should return a single dictionary of error generator rates + propagated_error_generator = self.propagate_errorgens_bch(circuit, multi_gate_dict=multi_gate_dict, + **bch_kwargs) + #convert this to a process matrix + return _spl.expm(self.errorgen_layer_dict_to_errorgen(propagated_error_generator, mx_basis='pp')) else: propagated_error_generators = self.propagate_errorgens(circuit, multi_gate_dict, include_spam) #loop though the propagated error generator layers and construct their error generators. #Then exponentiate exp_error_generators = [] - for err_gen_layer_list in propagated_error_generators: - if err_gen_layer_list: #if not empty. Should be length one if not empty. + for err_gen_layer in propagated_error_generators: + if err_gen_layer: #if not empty. #Keep the error generator in the standard basis until after the end-of-circuit #channel is constructed so we can reduce the overhead of changing basis. exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer_list[0], mx_basis='pp'))) @@ -229,14 +231,14 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul with state preparation and measurement. """ - msg = 'When bch_layerwise is True this can take the values of either 1 or 2.'\ - +' Otherwise only a value of 1 is currently implemented.' - if not bch_layerwise: - assert bch_order==1, msg - else: - msg1 = 'When bch_layerwise is False only bch_order values of 1 and 2 are currently'\ - + ' supported.' - assert bch_order==1 or bch_order==2, msg1 + #msg = 'When bch_layerwise is True this can take the values of either 1 or 2.'\ + # +' Otherwise only a value of 1 is currently implemented.' + #if not bch_layerwise: + # assert bch_order==1, msg + #else: + # msg1 = 'When bch_layerwise is False only bch_order values of 1 and 2 are currently'\ + # + ' supported.' + # assert bch_order==1 or bch_order==2, msg1 #if not doing layerwise BCH then we can re-use `propagate_errorgens` fully. if not bch_layerwise: @@ -261,11 +263,11 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul assert circuit.line_labels is not None and circuit.line_labels != ('*',) errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam) - #propagate the errorgen_layers through the propagation_layers to get a list - #of end of circuit error generator dictionaries. - propagated_errorgen_layers = self._propagate_errorgen_layers_bch(errorgen_layers, propagation_layers, + #propagate the errorgen_layers through the propagation_layers to get the + #end of circuit error generator dictionary. + propagated_errorgen_layers = self._propagate_errorgen_layers_bch(errorgen_layers, propagation_layers, + bch_order=bch_order, include_spam = include_spam) - return propagated_errorgen_layers @@ -434,7 +436,7 @@ def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, incl value currently found in the model. Returns ------- - List of lists of dictionaries, each one containing the error generator coefficients and rates for a circuit layer, + List of dictionaries, each one containing the error generator coefficients and rates for a circuit layer, with the error generator coefficients now represented using LocalStimErrorgenLabel. """ @@ -471,7 +473,7 @@ def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, incl errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j)] = rate if fixed_rate is None else fixed_rate else: errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis)] = rate if fixed_rate is None else fixed_rate - error_gen_dicts_by_layer.append([errorgen_layer]) + error_gen_dicts_by_layer.append(errorgen_layer) return error_gen_dicts_by_layer def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, include_spam=True): @@ -519,16 +521,12 @@ def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, includ for i in range(stopping_idx): err_layer = errorgen_layers[i] prop_layer = propagation_layers[i] - new_err_layer = [] - #err_layer should be length 1 if using this method - for bch_level_list in err_layer: - new_error_dict=dict() - #iterate through dictionary of error generator coefficients and propagate each one. - for errgen_coeff_lbl in bch_level_list: - propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, bch_level_list[errgen_coeff_lbl]) - new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] - new_err_layer.append(new_error_dict) - fully_propagated_layers.append(new_err_layer) + new_error_dict=dict() + #iterate through dictionary of error generator coefficients and propagate each one. + for errgen_coeff_lbl in err_layer: + propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, err_layer[errgen_coeff_lbl]) + new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] + fully_propagated_layers.append(new_error_dict) #add the final layers which didn't require actual propagation (since they were already at the end). fully_propagated_layers.extend(errorgen_layers[stopping_idx:]) return fully_propagated_layers @@ -565,24 +563,15 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc Returns ------- - fully_propagated_layers : list of lists of dicts - A list of list of dicts with the same structure as errorgen_layers corresponding - to the results of having propagated each of the error generator layers through - the circuit to the end while combining the layers in a layerwise fashion using the - BCH approximation. As a result of this combination, this list should have a length - of one. + fully_propagated_layer : dict + Dictionart corresponding to the results of having propagated each of the error generator + layers through the circuit to the end while combining the layers in a layerwise fashion + using the BCH approximation. """ - - #Add temporary errors when trying to do BCH beyond 1st order while the details of the 2nd order - #approximation's implementation are sorted out. - if bch_order != 1: - msg = 'The implementation of the 2nd order BCH approx is still under development. For now only 1st order is supported.' - raise NotImplementedError(msg) - assert all([len(layer)==1 for layer in errorgen_layers]), msg - - fully_propagated_layers=[] + #TODO: Refactor this and _propagate_errorgen_layers to reduce code repetition as their current + #implementations are very close to each other. #initialize a variable as temporary storage of the result - #of performing BCH on pairwise between a propagater errorgen + #of performing BCH on pairwise between a propagated errorgen #layer and an unpropagated layer for layerwise BCH. if len(errorgen_layers)>0: combined_err_layer = errorgen_layers[0] @@ -597,25 +586,19 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc for i in range(stopping_idx): #err_layer = errorgen_layers[i] prop_layer = propagation_layers[i] - new_err_layer = [] - #err_layer should be length 1 if using this method - for bch_level_dict in combined_err_layer: - new_error_dict = dict() - #iterate through dictionary of error generator coefficients and propagate each one. - for errgen_coeff_lbl in bch_level_dict: - propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, bch_level_dict[errgen_coeff_lbl]) - new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] - new_err_layer.append(new_error_dict) + new_error_dict = dict() + #iterate through dictionary of error generator coefficients and propagate each one. + for errgen_coeff_lbl in combined_err_layer: + propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, combined_err_layer[errgen_coeff_lbl]) + new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] #next use BCH to combine new_err_layer with the now adjacent layer of errorgen_layers[i+1] - combined_err_layer = _eprop.bch_approximation(new_err_layer, errorgen_layers[i+1], bch_order=1) - + combined_err_layer = _eprop.bch_approximation(new_error_dict, errorgen_layers[i+1], bch_order=bch_order) #If we are including spam then there will be one last error generator which we doesn't have an associated propagation #which needs to be combined using BCH. if include_spam: - combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=1) + combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=bch_order) - fully_propagated_layers.append(combined_err_layer) - return fully_propagated_layers + return combined_err_layer def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp'): """ diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 8fbb5c151..a178d3405 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -15,6 +15,7 @@ from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.modelmembers.operations import LindbladErrorgen as _LinbladErrorgen from numpy import conjugate +from functools import reduce def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): """ @@ -80,9 +81,8 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): Parameters ---------- - errgen_layer_1 : list of dicts - Each lists contains dictionaries of the error generator coefficients and rates for a circuit layer. - Each dictionary corresponds to a different order of the BCH approximation. + errgen_layer_1 : dict + Dictionary of the error generator coefficients and rates for a circuit layer. The error generator coefficients are represented using LocalStimErrorgenLabel. errgen_layer_2 : list of dicts @@ -90,62 +90,130 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): Returns ------- - combined_errgen_layer : list of dicts? - A list with the same general structure as `errgen_layer_1` and `errgen_layer_2`, but with the + combined_errgen_layer : dict + A dictionary with the same general structure as `errgen_layer_1` and `errgen_layer_2`, but with the rates combined according to the selected order of the BCH approximation. """ - if bch_order != 1: - msg = 'The implementation of the 2nd order BCH approx is still under development. For now only 1st order is supported.' - raise NotImplementedError(msg) - new_errorgen_layer=[] for curr_order in range(0,bch_order): - working_order_dict = dict() #add first order terms into new layer if curr_order == 0: - #get the dictionaries of error generator coefficient labels and rates - #for the current working BCH order. - current_errgen_dict_1 = errgen_layer_1[curr_order] - current_errgen_dict_2 = errgen_layer_2[curr_order] #Get a combined set of error generator coefficient labels for these two #dictionaries. - current_combined_coeff_lbls = set(current_errgen_dict_1.keys()) | set(current_errgen_dict_2.keys()) + current_combined_coeff_lbls = set(errgen_layer_1.keys()) | set(errgen_layer_2.keys()) + first_order_dict = dict() #loop through the combined set of coefficient labels and add them to the new dictionary for the current BCH #approximation order. If present in both we sum the rates. for coeff_lbl in current_combined_coeff_lbls: - working_order_dict[coeff_lbl] = current_errgen_dict_1.get(coeff_lbl, 0) + current_errgen_dict_2.get(coeff_lbl, 0) - new_errorgen_layer.append(working_order_dict) + first_order_dict[coeff_lbl] = errgen_layer_1.get(coeff_lbl, 0) + errgen_layer_2.get(coeff_lbl, 0) + + #allow short circuiting to avoid an expensive bunch of recombination logic when only using first order BCH + #which will likely be a common use case. + if bch_order==1: + return first_order_dict + new_errorgen_layer.append(first_order_dict) + #second order BCH terms. + # (1/2)*[X,Y] elif curr_order == 1: - current_errgen_dict_1 = errgen_layer_1[curr_order-1] - current_errgen_dict_2 = errgen_layer_2[curr_order-1] #calculate the pairwise commutators between each of the error generators in current_errgen_dict_1 and #current_errgen_dict_2. - for error1 in current_errgen_dict_1.keys(): - for error2 in current_errgen_dict_2.keys(): + commuted_errgen_list = [] + for error1 in errgen_layer_1.keys(): + for error2 in errgen_layer_2.keys(): #get the list of error generator labels - commuted_errgen_list = error_generator_commutator(error1, error2, - weight=1/2*current_errgen_dict_1[error1]*current_errgen_dict_1[error2]) - print(commuted_errgen_list) - #Add all of these error generators to the working dictionary of updated error generators and weights. - #There may be duplicates, which should be summed together. - for error_tuple in commuted_errgen_list: - working_order_dict[error_tuple[0]]=error_tuple[1] + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight= .5*errgen_layer_1[error1]*errgen_layer_2[error2]) + commuted_errgen_list.extend(commuted_errgen_sublist) + #print(f'{commuted_errgen_list=}') + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + second_order_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list: + second_order_comm_dict[error_tuple[0]] += error_tuple[1] + new_errorgen_layer.append(second_order_comm_dict) + #third order BCH terms + # (1/12)*([X,[X,Y]] - [Y,[X,Y]]) + elif curr_order == 2: + #we've already calculated (1/2)*[X,Y] in the previous order, so reuse this result. + #two different lists for the two different commutators so that we can more easily reuse + #this at higher order if needed. + commuted_errgen_list_1 = [] + commuted_errgen_list_2 = [] + first_order_comm = new_errorgen_layer[1] + + for error1a, error1b in zip(errgen_layer_1.keys(), errgen_layer_2.keys()): + for error2 in first_order_comm: + first_order_comm_rate = first_order_comm[error2] + #only need a factor of 1/6 because new_errorgen_layer[1] is 1/2 the commutator + commuted_errgen_sublist = error_generator_commutator(error1a, error2, + weight=(1/6)*errgen_layer_1[error1a]*first_order_comm_rate) + commuted_errgen_list_1.extend(commuted_errgen_sublist) + + #only need a factor of -1/6 because new_errorgen_layer[1] is 1/2 the commutator + commuted_errgen_sublist = error_generator_commutator(error1b, error2, + weight=-(1/6)*errgen_layer_2[error1b]*first_order_comm_rate) + commuted_errgen_list_2.extend(commuted_errgen_sublist) + + #turn the two new commuted error generator lists into dictionaries. + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + third_order_comm_dict_1 = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_1} + third_order_comm_dict_2 = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_2} - if len(errgen_layer_1)==2: - for error_key in errgen_layer_1[1]: - working_order_dict[error_key]=errgen_layer_1[1][error_key] - if len(errgen_layer_2)==2: - for error_key in errgen_layer_2[1]: - working_order_dict[error_key]=errgen_layer_2[1][error_key] - new_errorgen_layer.append(working_order_dict) + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list_1: + third_order_comm_dict_1[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_2: + third_order_comm_dict_2[error_tuple[0]] += error_tuple[1] + + #finally sum these two dictionaries + third_order_comm_dict = {key: third_order_comm_dict_1.get(key, 0) + third_order_comm_dict_2.get(key, 0) + for key in set(third_order_comm_dict_1) | set(third_order_comm_dict_2)} + new_errorgen_layer.append(third_order_comm_dict) + + #fourth order BCH terms + # -(1/24)*[Y,[X,[X,Y]]] + elif curr_order == 3: + #we've already calculated (1/12)*[X,[X,Y]] so reuse this result. + #this is stored in third_order_comm_dict_1 + commuted_errgen_list = [] + for error1 in errgen_layer_2.keys(): + for error2 in third_order_comm_dict_1.keys(): + #only need a factor of -1/2 because third_order_comm_dict_1 is 1/12 the nested commutator + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight= -.5*errgen_layer_2[error1]*third_order_comm_dict_1[error2]) + commuted_errgen_list.extend(commuted_errgen_sublist) + + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + fourth_order_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list: + fourth_order_comm_dict[error_tuple[0]] += error_tuple[1] + new_errorgen_layer.append(fourth_order_comm_dict) else: - raise ValueError("Higher Orders are not Implemented Yet") - return new_errorgen_layer + raise NotImplementedError("Higher orders beyond fourth order are not implemented yet.") + + #Finally accumulate all of the dictionaries in new_errorgen_layer into a single one, summing overlapping terms. + errorgen_labels_by_order = [set(order_dict) for order_dict in new_errorgen_layer] + complete_errorgen_labels = reduce(lambda a, b: a|b, errorgen_labels_by_order) + + #initialize a dictionary with requisite keys + new_errorgen_layer_dict = {lbl: 0 for lbl in complete_errorgen_labels} + + for order_dict in new_errorgen_layer: + for lbl, rate in order_dict.items(): + new_errorgen_layer_dict[lbl] += rate + + return new_errorgen_layer_dict def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight=1.0): From 2424691e0913078dfd6c8c0d7469773a1235b8d1 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 12 Nov 2024 17:16:37 -0700 Subject: [PATCH 023/102] Support for implicit models Add in support for implicit models. The main change here is a switch to using the model's circuit_layer_operator method to grab the requisite error generators for each circuit layer, rather than grabbing these from the model's member dictionaries directly as works for explicit models. --- .../errorpropagator_dev.py | 79 ++++++++++++------- 1 file changed, 51 insertions(+), 28 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 2936ac622..795bcde9e 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -11,6 +11,8 @@ from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis import pygsti.tools.errgenproptools as _eprop import pygsti.tools.basistools as _bt +import pygsti.tools.matrixtools as _mt +from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen class ErrorGeneratorPropagator: @@ -74,13 +76,17 @@ def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, us if err_gen_layer: #if not empty. #Keep the error generator in the standard basis until after the end-of-circuit #channel is constructed so we can reduce the overhead of changing basis. - exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer_list[0], mx_basis='pp'))) + exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer, mx_basis='pp'))) #Next take the product of these exponentiated error generators. #These are in circuit ordering, so reverse for matmul. exp_error_generators.reverse() - #print(exp_error_generators) - eoc_error_channel = _np.linalg.multi_dot(exp_error_generators) - #eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='std', to_basis='pp') + if len(exp_error_generators)>1: + eoc_error_channel = _np.linalg.multi_dot(exp_error_generators) + else: + eoc_error_channel = exp_error_generators[0] + + if mx_basis != 'pp': + eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='pp', to_basis=mx_basis) return eoc_error_channel @@ -446,25 +452,25 @@ def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, incl #TODO: Infer the number of qubits from the model and/or the circuit somehow. #Pull out the error generator dictionaries for each operation (may need to generalize this for implicit models): - model_error_generator_dict = dict() #key will be a label and value the lindblad error generator dictionary. - for op_lbl, op in self.model.operations.items(): - #TODO add assertion that the operation is a lindblad error generator type modelmember. - model_error_generator_dict[op_lbl] = op.errorgen_coefficients() + #model_error_generator_dict = dict() #key will be a label and value the lindblad error generator dictionary. + #for op_lbl, op in self.model.operations.items(): + # #TODO add assertion that the operation is a lindblad error generator type modelmember. + # model_error_generator_dict[op_lbl] = op.errorgen_coefficients() #add in the error generators for the prep and measurement if needed. - if include_spam: - for prep_lbl, prep in self.model.preps.items(): - model_error_generator_dict[prep_lbl] = prep.errorgen_coefficients() - for povm_lbl, povm in self.model.povms.items(): - model_error_generator_dict[povm_lbl] = povm.errorgen_coefficients() + #if include_spam: + # for prep_lbl, prep in self.model.preps.items(): + # model_error_generator_dict[prep_lbl] = prep.errorgen_coefficients() + # for povm_lbl, povm in self.model.povms.items(): + # model_error_generator_dict[povm_lbl] = povm.errorgen_coefficients() #TODO: Generalize circuit time to not be in one-to-one correspondence with the layer index. error_gen_dicts_by_layer = [] for j in range(len(circuit)): circuit_layer = circuit[j] # get the layer #can probably relax this if we detect that the model is a crosstalk free model. - assert isinstance(circuit_layer, Label), 'Correct support for parallel gates is still under development.' + #assert isinstance(circuit_layer, Label), 'Correct support for parallel gates is still under development.' errorgen_layer = dict() - layer_errorgen_coeff_dict = model_error_generator_dict[circuit_layer] #get the errors for the gate + layer_errorgen_coeff_dict = self.model.circuit_layer_operator(circuit_layer).errorgen_coefficients() #get the errors for the gate for errgen_coeff_lbl, rate in layer_errorgen_coeff_dict.items(): #for an error in the accompanying error dictionary #TODO: Can probably replace this function call with `padded_basis_element_labels` method of `GlobalElementaryErrorgenLabel` paulis = _eprop.errgen_coeff_label_to_stim_pauli_strs(errgen_coeff_lbl, num_qubits) @@ -625,22 +631,39 @@ def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp'): #Construct a list of new errorgen coefficients by looping through the keys of errorgen_layer #and converting them to LocalElementaryErrorgenLabels. - local_errorgen_coeffs = [coeff_lbl.to_local_eel() for coeff_lbl in errorgen_layer.keys()] - - errorgen_basis = _ExplicitElementaryErrorgenBasis(self.model.state_space, local_errorgen_coeffs, basis_1q='PP') - #Stack the arrays and then use broadcasting to weight them according to the rates - elemgen_matrices_array = _np.stack(errorgen_basis.elemgen_matrices, axis=-1) - weighted_elemgen_matrices_array = _np.fromiter(errorgen_layer.values(), dtype=_np.double)*elemgen_matrices_array - - #The error generator is then just the sum of weighted_elemgen_matrices_array along the third axis. - errorgen = _np.sum(weighted_elemgen_matrices_array, axis = 2) + #TODO: Debug this implementation, something weird is going on with the basis management and is only + #getting picked up for two or more qubits. + #local_errorgen_coeffs = [coeff_lbl.to_local_eel() for coeff_lbl in errorgen_layer.keys()] + # + #errorgen_basis = _ExplicitElementaryErrorgenBasis(self.model.state_space, local_errorgen_coeffs, basis_1q='PP', elemgen_basis='pp') + #print(f'{errorgen_basis.elemgen_matrices=}') + ##Stack the arrays and then use broadcasting to weight them according to the rates + #elemgen_matrices_array = _np.stack(errorgen_basis.elemgen_matrices, axis=-1) + #weighted_elemgen_matrices_array = _np.fromiter(errorgen_layer.values(), dtype=_np.double)*elemgen_matrices_array + #weighted_elemgen_matrices_array = _np.real_if_close(weighted_elemgen_matrices_array) + ##The error generator is then just the sum of weighted_elemgen_matrices_array along the third axis. + #errorgen = _np.sum(weighted_elemgen_matrices_array, axis = 2) + ##print(f'{errorgen=}') + # + ##finally need to change from the standard basis (which is what the error generator is currently in) + ##to the pauli basis. + #try: + # errorgen = _bt.change_basis(errorgen, from_basis='std', to_basis=mx_basis)#, expect_real=False) + #except ValueError as err: + # print(f'{local_errorgen_coeffs=}') + # print(f'{errorgen_basis.labels=}') + # print(f'{_mt.is_hermitian(errorgen)=}') + # print(f'{errorgen_layer=}') + # _mt.print_mx(errorgen) + # raise err + + global_errorgen_coeffs = [coeff_lbl.to_global_eel() for coeff_lbl in errorgen_layer.keys()] + coeff_dict = {lbl:val for lbl, val in zip(global_errorgen_coeffs, errorgen_layer.values())} - #finally need to change from the standard basis (which is what the error generator is currently in) - #to the pauli basis. - errorgen = _bt.change_basis(errorgen, from_basis='std', to_basis=mx_basis) + errorgen = _LindbladErrorgen.from_elementary_errorgens(coeff_dict, parameterization='GLND', state_space=self.model.state_space) - return errorgen + return errorgen.to_dense() From a4e811ef4ec08e295c7e5961b65b97554d6e1f7c Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 12 Nov 2024 17:19:00 -0700 Subject: [PATCH 024/102] Menagerie of changes Miscellaneous minor updates including: 1. Option for setting error generator label types in the new random rates function. 2. Option to seed the create_random_circuit function with an integer seed. 3. Additions to the stim conversion library 4. Flag for controlling whether the basis change function should expect the output to be real. --- pygsti/algorithms/randomcircuit.py | 7 +++++-- pygsti/tools/basistools.py | 8 ++++++-- pygsti/tools/internalgates.py | 3 ++- pygsti/tools/lindbladtools.py | 20 +++++++++++++++++--- 4 files changed, 30 insertions(+), 8 deletions(-) diff --git a/pygsti/algorithms/randomcircuit.py b/pygsti/algorithms/randomcircuit.py index ee7140e01..23d4f27df 100644 --- a/pygsti/algorithms/randomcircuit.py +++ b/pygsti/algorithms/randomcircuit.py @@ -844,8 +844,9 @@ def create_random_circuit(pspec, length, qubit_labels=None, sampler='Qeliminatio 1-element list consisting of a list of the relevant gate names (e.g., `lsargs` = ['Gi, 'Gxpi, 'Gypi', 'Gzpi']). - rand_state: RandomState, optional - A np.random.RandomState object for seeding RNG + rand_state: RandomState or int, optional (default None) + A np.random.RandomState object for seeding RNG. If an integer is passed in + this is used to set the seed for a newly constructed RNG. Returns ------- @@ -859,6 +860,8 @@ def create_random_circuit(pspec, length, qubit_labels=None, sampler='Qeliminatio lsargs = [] if rand_state is None: rand_state = _np.random.RandomState() + if isinstance(rand_state, int): + rand_state = _np.random.RandomState(rand_state) if isinstance(sampler, str): diff --git a/pygsti/tools/basistools.py b/pygsti/tools/basistools.py index 95471181b..835fdd264 100644 --- a/pygsti/tools/basistools.py +++ b/pygsti/tools/basistools.py @@ -118,7 +118,7 @@ def is_sparse_basis(name_or_basis): return False -def change_basis(mx, from_basis, to_basis): +def change_basis(mx, from_basis, to_basis, expect_real=True): """ Convert a operation matrix from one basis of a density matrix space to another. @@ -134,6 +134,10 @@ def change_basis(mx, from_basis, to_basis): to_basis : {'std', 'gm', 'pp', 'qt'} or Basis object The destination basis. Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt) (or a custom basis object). + + expect_real : bool, optional (default True) + Optional flag specifying whether it is expected that the returned + array in the new basis is real valued. Default is True. Returns ------- @@ -196,7 +200,7 @@ def change_basis(mx, from_basis, to_basis): if not to_basis.real: return ret - if _mt.safe_norm(ret, 'imag') > 1e-8: + if expect_real and _mt.safe_norm(ret, 'imag') > 1e-8: raise ValueError("Array has non-zero imaginary part (%g) after basis change (%s to %s)!\n%s" % (_mt.safe_norm(ret, 'imag'), from_basis, to_basis, ret)) return _mt.safe_real(ret) diff --git a/pygsti/tools/internalgates.py b/pygsti/tools/internalgates.py index 951194984..9f2a4a9d4 100644 --- a/pygsti/tools/internalgates.py +++ b/pygsti/tools/internalgates.py @@ -402,7 +402,8 @@ def standard_gatenames_stim_conversions(): 'Gxx' : stim.Tableau.from_named_gate('SQRT_XX'), 'Gzz' : stim.Tableau.from_named_gate('SQRT_ZZ'), 'Gcnot' : stim.Tableau.from_named_gate('CNOT'), - 'Gswap' : stim.Tableau.from_named_gate('SWAP') + 'Gswap' : stim.Tableau.from_named_gate('SWAP'), + 'Gcphase' : stim.Tableau.from_named_gate('CZ') } return pyGSTi_to_stim_GateDict diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index e25f64be3..30d953800 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -15,7 +15,8 @@ from pygsti.tools.basistools import basis_matrices import pygsti.baseobjs as _bo -from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel, \ + LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel from pygsti.baseobjs.statespace import QubitSpace as _QubitSpace import warnings as _warnings @@ -236,7 +237,8 @@ def create_lindbladian_term_errorgen(typ, Lm, Ln=None, sparse=False): # noqa N8 def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A'), max_weights=None, H_params=(0.,.01), SCA_params=(0.,.01), error_metric=None, error_metric_value=None, - relative_HS_contribution=None, fixed_errorgen_rates=None, sslbl_overlap=None, seed = None): + relative_HS_contribution=None, fixed_errorgen_rates=None, sslbl_overlap=None, + label_type='global', seed = None): """ Function for generating a random set of CPTP error generator rates. @@ -307,7 +309,12 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') control the weight and allowed types of the error generators in this model. If specifying fixed C and A rates it is possible for the final error generator to be non-CP. - + + label_type : str, optional (default 'global') + String which can be either 'global' or 'local', indicating whether to + return a dictionary with keys which are `GlobalElementaryErrorgenLabel` + or `LocalElementaryErrorgenLabel` objects respectively. + seed : int, optional (default None) An optional integer used in seeding the RNG. @@ -485,5 +492,12 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') for errgen_type in errorgen_types: errorgen_rates_dict.update(random_rates_dicts[errgen_type]) + if label_type not in ['global', 'local']: + raise ValueError('Unsupported label type {label_type}.') + + if label_type == 'local': + errorgen_rates_dict = {_LocalElementaryErrorgenLabel.cast(lbl, state_space.state_space_labels): val + for lbl, val in errorgen_rates_dict.items()} + return errorgen_rates_dict From 3c68f94186372c09d37c10cc9c9862554769fd94 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 12 Nov 2024 23:07:23 -0700 Subject: [PATCH 025/102] Add the ability to truncate small terms in BCH Add an option for setting a truncation threshold which is used during the BCH approximation calculation to remove sufficiently small terms. --- .../errorpropagator_dev.py | 40 ++++++--- pygsti/tools/errgenproptools.py | 83 ++++++++++++++----- 2 files changed, 89 insertions(+), 34 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 795bcde9e..e62edf620 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -65,7 +65,7 @@ def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, us propagated_error_generator = self.propagate_errorgens_bch(circuit, multi_gate_dict=multi_gate_dict, **bch_kwargs) #convert this to a process matrix - return _spl.expm(self.errorgen_layer_dict_to_errorgen(propagated_error_generator, mx_basis='pp')) + return _spl.expm(self.errorgen_layer_dict_to_errorgen(propagated_error_generator, mx_basis='pp', return_dense=True)) else: propagated_error_generators = self.propagate_errorgens(circuit, multi_gate_dict, include_spam) @@ -76,7 +76,7 @@ def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, us if err_gen_layer: #if not empty. #Keep the error generator in the standard basis until after the end-of-circuit #channel is constructed so we can reduce the overhead of changing basis. - exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer, mx_basis='pp'))) + exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer, mx_basis='pp', return_dense=True))) #Next take the product of these exponentiated error generators. #These are in circuit ordering, so reverse for matmul. exp_error_generators.reverse() @@ -205,7 +205,7 @@ def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, multi_gate_dict=None, - include_spam=True): + include_spam=True, truncation_threshold=1e-14): """ Propagate all of the error generators for each circuit to the end, performing approximation/recombination either along the way (layerwise) @@ -235,6 +235,10 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul include_spam : bool, optional (default True) If True then we include in the propagation the error generators associated with state preparation and measurement. + + truncation_threshold : float, optional (default 1e-14) + Threshold below which any error generators with magnitudes below this value + are truncated during the BCH approximation. """ #msg = 'When bch_layerwise is True this can take the values of either 1 or 2.'\ @@ -248,6 +252,7 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul #if not doing layerwise BCH then we can re-use `propagate_errorgens` fully. if not bch_layerwise: + raise NotImplementedError('Still under development.') propagated_errorgen_layers = self.propagate_errorgens(circuit, multi_gate_dict, include_spam=include_spam) #otherwise we need to do the error generator layer propagation slightly @@ -273,7 +278,8 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul #end of circuit error generator dictionary. propagated_errorgen_layers = self._propagate_errorgen_layers_bch(errorgen_layers, propagation_layers, bch_order=bch_order, - include_spam = include_spam) + include_spam = include_spam, + truncation_threshold=truncation_threshold) return propagated_errorgen_layers @@ -538,7 +544,7 @@ def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, includ return fully_propagated_layers #TODO: Add an option to return the results with the different BCH order combined. - def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bch_order=1, include_spam=True): + def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bch_order=1, include_spam=True, truncation_threshold=1e-14): """ Propagates the error generator layers through each of the corresponding propagation layers (i.e. the clifford operations for the remainder of the circuit). In this version we @@ -566,6 +572,10 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc If True then include the error generators for state preparation and measurement are included in errogen_layers, and the state preparation error generator should be propagated through (the measurement one is simply appended at the end). + + truncation_threshold : float, optional (default 1e-14) + Threshold below which any error generators with magnitudes below this value + are truncated during the BCH approximation. Returns ------- @@ -598,15 +608,15 @@ def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bc propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, combined_err_layer[errgen_coeff_lbl]) new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] #next use BCH to combine new_err_layer with the now adjacent layer of errorgen_layers[i+1] - combined_err_layer = _eprop.bch_approximation(new_error_dict, errorgen_layers[i+1], bch_order=bch_order) + combined_err_layer = _eprop.bch_approximation(new_error_dict, errorgen_layers[i+1], bch_order=bch_order, truncation_threshold=truncation_threshold) #If we are including spam then there will be one last error generator which we doesn't have an associated propagation #which needs to be combined using BCH. if include_spam: - combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=bch_order) + combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=bch_order, truncation_threshold=truncation_threshold) return combined_err_layer - def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp'): + def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp', return_dense=False): """ Helper method for converting from an error generator dictionary in the format utilized in the `errorgenpropagation` module into a numpy array. @@ -620,6 +630,10 @@ def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp'): mx_basis : Basis or str, optional (default 'pp') Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the basis in which to return the error generator. + + return_dense : bool, optional (default False) + If True return the error generator as a dense numpy array. + Returns ------- errorgen : numpy.ndarray @@ -659,11 +673,15 @@ def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp'): # raise err global_errorgen_coeffs = [coeff_lbl.to_global_eel() for coeff_lbl in errorgen_layer.keys()] - coeff_dict = {lbl:val for lbl, val in zip(global_errorgen_coeffs, errorgen_layer.values())} + coeff_dict = {lbl:_np.real_if_close(val) for lbl, val in zip(global_errorgen_coeffs, errorgen_layer.values())} - errorgen = _LindbladErrorgen.from_elementary_errorgens(coeff_dict, parameterization='GLND', state_space=self.model.state_space) + errorgen = _LindbladErrorgen.from_elementary_errorgens(coeff_dict, parameterization='GLND', state_space=self.model.state_space, + mx_basis=mx_basis) - return errorgen.to_dense() + if return_dense: + return errorgen.to_dense() + else: + return errorgen diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index a178d3405..26b6a7239 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -74,7 +74,7 @@ def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): raise ValueError(f'Unsupported error generator type {errorgen_typ}') -def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): +def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_threshold=1e-14): """ Apply the BCH approximation at the given order to combine the input dictionaries of error generator rates. @@ -87,6 +87,10 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): errgen_layer_2 : list of dicts See errgen_layer_1. + + truncation_threshold : float, optional (default 1e-14) + Threshold for which any error generators with magnitudes below this value + are truncated. Returns ------- @@ -96,7 +100,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): """ new_errorgen_layer=[] - for curr_order in range(0,bch_order): + for curr_order in range(0, bch_order): #add first order terms into new layer if curr_order == 0: #Get a combined set of error generator coefficient labels for these two @@ -107,7 +111,10 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): #loop through the combined set of coefficient labels and add them to the new dictionary for the current BCH #approximation order. If present in both we sum the rates. for coeff_lbl in current_combined_coeff_lbls: - first_order_dict[coeff_lbl] = errgen_layer_1.get(coeff_lbl, 0) + errgen_layer_2.get(coeff_lbl, 0) + #only add to the first order dictionary if the coefficient exceeds the truncation threshold. + first_order_rate = errgen_layer_1.get(coeff_lbl, 0) + errgen_layer_2.get(coeff_lbl, 0) + if abs(first_order_rate) > truncation_threshold: + first_order_dict[coeff_lbl] = first_order_rate #allow short circuiting to avoid an expensive bunch of recombination logic when only using first order BCH #which will likely be a common use case. @@ -123,20 +130,31 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): commuted_errgen_list = [] for error1 in errgen_layer_1.keys(): for error2 in errgen_layer_2.keys(): - #get the list of error generator labels + #get the list of error generator labels + weight = .5*errgen_layer_1[error1]*errgen_layer_2[error2] + #I *think* you can pick up at most around a factor of 8 from the commutator + #itself. Someone should validate that. Set this conservatively, but also + #avoid computing commutators which will be effectively zero. + if abs(weight) < 10*truncation_threshold: + continue commuted_errgen_sublist = error_generator_commutator(error1, error2, - weight= .5*errgen_layer_1[error1]*errgen_layer_2[error2]) + weight= weight) commuted_errgen_list.extend(commuted_errgen_sublist) #print(f'{commuted_errgen_list=}') #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. - second_order_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list} + second_order_comm_dict = {error_tuple[0]: 0 for error_tuple in commuted_errgen_list} #Add all of these error generators to the working dictionary of updated error generators and weights. #There may be duplicates, which should be summed together. for error_tuple in commuted_errgen_list: second_order_comm_dict[error_tuple[0]] += error_tuple[1] - new_errorgen_layer.append(second_order_comm_dict) + #truncate any terms which are below the truncation threshold following + #aggregation. + second_order_comm_dict = {key: val for key, val in second_order_comm_dict.items() if abs(val)>truncation_threshold} + + new_errorgen_layer.append(second_order_comm_dict) + #third order BCH terms # (1/12)*([X,[X,Y]] - [Y,[X,Y]]) elif curr_order == 2: @@ -145,20 +163,26 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): #this at higher order if needed. commuted_errgen_list_1 = [] commuted_errgen_list_2 = [] - first_order_comm = new_errorgen_layer[1] - for error1a, error1b in zip(errgen_layer_1.keys(), errgen_layer_2.keys()): - for error2 in first_order_comm: - first_order_comm_rate = first_order_comm[error2] + for error2 in second_order_comm_dict: + second_order_comm_rate = second_order_comm_dict[error2] + #I *think* you can pick up at most around a factor of 8 from the commutator + #itself. Someone should validate that. Set this conservatively, but also + #avoid computing commutators which will be effectively zero. #only need a factor of 1/6 because new_errorgen_layer[1] is 1/2 the commutator - commuted_errgen_sublist = error_generator_commutator(error1a, error2, - weight=(1/6)*errgen_layer_1[error1a]*first_order_comm_rate) - commuted_errgen_list_1.extend(commuted_errgen_sublist) + weighta = (1/6)*errgen_layer_1[error1a]*second_order_comm_rate + if not abs(weighta) < 10*truncation_threshold: + commuted_errgen_sublist = error_generator_commutator(error1a, error2, + weight=weighta) + commuted_errgen_list_1.extend(commuted_errgen_sublist) + #only need a factor of -1/6 because new_errorgen_layer[1] is 1/2 the commutator - commuted_errgen_sublist = error_generator_commutator(error1b, error2, - weight=-(1/6)*errgen_layer_2[error1b]*first_order_comm_rate) - commuted_errgen_list_2.extend(commuted_errgen_sublist) + weightb = -(1/6)*errgen_layer_2[error1b]*second_order_comm_rate + if not abs(weightb) < 10*truncation_threshold: + commuted_errgen_sublist = error_generator_commutator(error1b, error2, + weight=weightb) + commuted_errgen_list_2.extend(commuted_errgen_sublist) #turn the two new commuted error generator lists into dictionaries. #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. @@ -172,10 +196,12 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): for error_tuple in commuted_errgen_list_2: third_order_comm_dict_2[error_tuple[0]] += error_tuple[1] - #finally sum these two dictionaries - third_order_comm_dict = {key: third_order_comm_dict_1.get(key, 0) + third_order_comm_dict_2.get(key, 0) - for key in set(third_order_comm_dict_1) | set(third_order_comm_dict_2)} - + #finally sum these two dictionaries, keeping only terms which are greater than the threshold. + third_order_comm_dict = dict() + for lbl in set(third_order_comm_dict_1) | set(third_order_comm_dict_2): + third_order_rate = third_order_comm_dict_1.get(lbl, 0) + third_order_comm_dict_2.get(lbl, 0) + if abs(third_order_rate) > truncation_threshold: + third_order_comm_dict[lbl] = third_order_rate new_errorgen_layer.append(third_order_comm_dict) #fourth order BCH terms @@ -186,9 +212,15 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): commuted_errgen_list = [] for error1 in errgen_layer_2.keys(): for error2 in third_order_comm_dict_1.keys(): - #only need a factor of -1/2 because third_order_comm_dict_1 is 1/12 the nested commutator + #I *think* you can pick up at most around a factor of 8 from the commutator + #itself. Someone should validate that. Set this conservatively, but also + #avoid computing commutators which will be effectively zero. + #only need a factor of -1/2 because third_order_comm_dict_1 is 1/12 the nested commutator + weight = -.5*errgen_layer_2[error1]*third_order_comm_dict_1[error2] + if abs(weight) < 10*truncation_threshold: + continue commuted_errgen_sublist = error_generator_commutator(error1, error2, - weight= -.5*errgen_layer_2[error1]*third_order_comm_dict_1[error2]) + weight=weight) commuted_errgen_list.extend(commuted_errgen_sublist) #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. @@ -198,6 +230,9 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): #There may be duplicates, which should be summed together. for error_tuple in commuted_errgen_list: fourth_order_comm_dict[error_tuple[0]] += error_tuple[1] + + #drop any terms below the truncation threshold after aggregation + fourth_order_comm_dict = {key: val for key, val in fourth_order_comm_dict.items() if abs(val)>truncation_threshold} new_errorgen_layer.append(fourth_order_comm_dict) else: raise NotImplementedError("Higher orders beyond fourth order are not implemented yet.") @@ -213,6 +248,8 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1): for lbl, rate in order_dict.items(): new_errorgen_layer_dict[lbl] += rate + #Future: Possibly do one last truncation pass in case any of the different order cancel out when aggregated? + return new_errorgen_layer_dict From 9bda6cd98eb5ab2871551f04f6af2c2df5b16056 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 17 Nov 2024 18:38:30 -0700 Subject: [PATCH 026/102] Remove layerwise BCH implementation Remove layerwise BCH implementation. There was a bug in this implementation that I couldn't track down. But more importantly, I couldn't think of any reason why this would have an advantage over propagating everything to the end before doing the pairwise recombinations (in fact, the way I just stated ought to be more performant in general). --- .../errorpropagator_dev.py | 221 ++++++++---------- 1 file changed, 94 insertions(+), 127 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index e62edf620..0f72c2210 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -16,11 +16,9 @@ class ErrorGeneratorPropagator: - def __init__(self, model, multi_gate_dict=None, bch_order=1, - bch_layerwise=False, nonmarkovian=False, multi_gate=False): + def __init__(self, model, multi_gate_dict=None, bch_order=1,nonmarkovian=False, multi_gate=False): self.model = model self.bch_order = bch_order - self.bch_layerwise = bch_layerwise def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, use_bch=False, bch_kwargs=None, mx_basis='pp'): @@ -204,12 +202,11 @@ def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): return propagated_errorgen_layers - def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, multi_gate_dict=None, + def propagate_errorgens_bch(self, circuit, bch_order=1, multi_gate_dict=None, include_spam=True, truncation_threshold=1e-14): """ Propagate all of the error generators for each circuit to the end, - performing approximation/recombination either along the way (layerwise) - or at the very end using the BCH approximation. + performing approximation/recombination using the BCH approximation. Parameters ---------- @@ -217,16 +214,8 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul Circuit to construct a set of post gate error generators for. bch_order : int, optional (default 1) - Order of the BCH approximation to use. When bch_layerwise is True - this can take the values of either 1 or 2. Otherwise only - a value of 1 is currently implemented. - - bch_layerwise : bool, optional (default False) - If True perform the BCH approximation incrementally, performing the - approximate recombination layer-by-layer during the course of error - generator propagation. If False (the default) then the BCH approximation - is only applied at the very end after all of the error generators have - been propagated to the end. + Order of the BCH approximation to use. A maximum value of 4 is + currently supported. multi_gate_dict : dict, optional (default None) An optional dictionary mapping between gate name aliases and their @@ -241,47 +230,22 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, bch_layerwise=False, mul are truncated during the BCH approximation. """ - #msg = 'When bch_layerwise is True this can take the values of either 1 or 2.'\ - # +' Otherwise only a value of 1 is currently implemented.' - #if not bch_layerwise: - # assert bch_order==1, msg - #else: - # msg1 = 'When bch_layerwise is False only bch_order values of 1 and 2 are currently'\ - # + ' supported.' - # assert bch_order==1 or bch_order==2, msg1 - - #if not doing layerwise BCH then we can re-use `propagate_errorgens` fully. - if not bch_layerwise: - raise NotImplementedError('Still under development.') - propagated_errorgen_layers = self.propagate_errorgens(circuit, multi_gate_dict, - include_spam=include_spam) - #otherwise we need to do the error generator layer propagation slightly - #differently. - else: - #start by converting the input circuit into a list of stim Tableaus with the - #first element dropped. - stim_layers = self.construct_stim_layers(circuit, multi_gate_dict, drop_first_layer= not include_spam) - - #We next want to construct a new set of Tableaus corresponding to the cumulative products - #of each of the circuit layers with those that follow. These Tableaus correspond to the - #clifford operations each error generator will be propagated through in order to reach the - #end of the circuit. - propagation_layers = self.construct_propagation_layers(stim_layers) - - #Next we take the input circuit and construct a list of dictionaries, each corresponding - #to the error generators for a particular gate layer. - #TODO: Add proper inferencing for number of qubits: - assert circuit.line_labels is not None and circuit.line_labels != ('*',) - errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam) - - #propagate the errorgen_layers through the propagation_layers to get the - #end of circuit error generator dictionary. - propagated_errorgen_layers = self._propagate_errorgen_layers_bch(errorgen_layers, propagation_layers, - bch_order=bch_order, - include_spam = include_spam, - truncation_threshold=truncation_threshold) - return propagated_errorgen_layers + propagated_errorgen_layers = self.propagate_errorgens(circuit, multi_gate_dict, + include_spam=include_spam) + #if length one no need to do anything. + if len(propagated_errorgen_layers)==1: + return propagated_errorgen_layers[0] + + #otherwise iterate through in reverse order (the propagated layers are + #in circuit ordering and not matrix multiplication ordering at the moment) + #and combine the terms pairwise + combined_err_layer = propagated_errorgen_layers[-1] + for i in range(len(propagated_errorgen_layers)-2, -1, -1): + combined_err_layer = _eprop.bch_approximation(combined_err_layer, propagated_errorgen_layers[i], + bch_order=bch_order, truncation_threshold=truncation_threshold) + return combined_err_layer + def propagate_errorgens_nonmarkovian(self, circuit, multi_gate_dict=None, include_spam=True): """ @@ -544,77 +508,80 @@ def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, includ return fully_propagated_layers #TODO: Add an option to return the results with the different BCH order combined. - def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bch_order=1, include_spam=True, truncation_threshold=1e-14): - """ - Propagates the error generator layers through each of the corresponding propagation layers - (i.e. the clifford operations for the remainder of the circuit). In this version we - perform a layerwise application of the BCH approximation following each propagation to - recombine the propaged error generator layer with the layer proceeding it before each - successive propagation step. - - Parameters - ---------- - errorgen_layers : list of lists of dicts - Each sublist corresponds to a circuit layer, with these sublists containing dictionaries - of the error generator coefficients and rates for a circuit layer. Each dictionary corresponds - to a different order of the BCH approximation (when not using the BCH approximation this list will - be length 1). The error generator coefficients are represented using LocalStimErrorgenLabel. - - propagation_layers : list of `stim.Tableau` - A list of `stim.Tableau` objects, each corresponding to a cumulative product of - ideal Clifford operations for a set of circuit layers, each corresponding to a layer - of operations which we will be propagating error generators through. - - bch_order : int, optional (default 1) - Order of the BCH approximation to use. - - include_spam : bool, optional (default True) - If True then include the error generators for state preparation and measurement - are included in errogen_layers, and the state preparation error generator should - be propagated through (the measurement one is simply appended at the end). - - truncation_threshold : float, optional (default 1e-14) - Threshold below which any error generators with magnitudes below this value - are truncated during the BCH approximation. - - Returns - ------- - fully_propagated_layer : dict - Dictionart corresponding to the results of having propagated each of the error generator - layers through the circuit to the end while combining the layers in a layerwise fashion - using the BCH approximation. - """ - #TODO: Refactor this and _propagate_errorgen_layers to reduce code repetition as their current - #implementations are very close to each other. - #initialize a variable as temporary storage of the result - #of performing BCH on pairwise between a propagated errorgen - #layer and an unpropagated layer for layerwise BCH. - if len(errorgen_layers)>0: - combined_err_layer = errorgen_layers[0] - - #the stopping index in errorgen_layers will depend on whether the measurement error - #generator is included or not. - if include_spam: - stopping_idx = len(errorgen_layers)-2 - else: - stopping_idx = len(errorgen_layers)-1 - - for i in range(stopping_idx): - #err_layer = errorgen_layers[i] - prop_layer = propagation_layers[i] - new_error_dict = dict() - #iterate through dictionary of error generator coefficients and propagate each one. - for errgen_coeff_lbl in combined_err_layer: - propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, combined_err_layer[errgen_coeff_lbl]) - new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] - #next use BCH to combine new_err_layer with the now adjacent layer of errorgen_layers[i+1] - combined_err_layer = _eprop.bch_approximation(new_error_dict, errorgen_layers[i+1], bch_order=bch_order, truncation_threshold=truncation_threshold) - #If we are including spam then there will be one last error generator which we doesn't have an associated propagation - #which needs to be combined using BCH. - if include_spam: - combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=bch_order, truncation_threshold=truncation_threshold) - - return combined_err_layer + #def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bch_order=1, include_spam=True, truncation_threshold=1e-14): + # """ + # Propagates the error generator layers through each of the corresponding propagation layers + # (i.e. the clifford operations for the remainder of the circuit). In this version we + # perform a layerwise application of the BCH approximation following each propagation to + # recombine the propaged error generator layer with the layer proceeding it before each + # successive propagation step. +# + # Parameters + # ---------- + # errorgen_layers : list of lists of dicts + # Each sublist corresponds to a circuit layer, with these sublists containing dictionaries + # of the error generator coefficients and rates for a circuit layer. Each dictionary corresponds + # to a different order of the BCH approximation (when not using the BCH approximation this list will + # be length 1). The error generator coefficients are represented using LocalStimErrorgenLabel. +# + # propagation_layers : list of `stim.Tableau` + # A list of `stim.Tableau` objects, each corresponding to a cumulative product of + # ideal Clifford operations for a set of circuit layers, each corresponding to a layer + # of operations which we will be propagating error generators through. +# + # bch_order : int, optional (default 1) + # Order of the BCH approximation to use. + # + # include_spam : bool, optional (default True) + # If True then include the error generators for state preparation and measurement + # are included in errogen_layers, and the state preparation error generator should + # be propagated through (the measurement one is simply appended at the end). +# + # truncation_threshold : float, optional (default 1e-14) + # Threshold below which any error generators with magnitudes below this value + # are truncated during the BCH approximation. + # + # Returns + # ------- + # fully_propagated_layer : dict + # Dictionart corresponding to the results of having propagated each of the error generator + # layers through the circuit to the end while combining the layers in a layerwise fashion + # using the BCH approximation. + # """ + # #TODO: Refactor this and _propagate_errorgen_layers to reduce code repetition as their current + # #implementations are very close to each other. + # #initialize a variable as temporary storage of the result + # #of performing BCH on pairwise between a propagated errorgen + # #layer and an unpropagated layer for layerwise BCH. + # if len(errorgen_layers)>0: + # combined_err_layer = errorgen_layers[0] +# + # #the stopping index in errorgen_layers will depend on whether the measurement error + # #generator is included or not. + # if include_spam: + # stopping_idx = len(errorgen_layers)-2 + # else: + # stopping_idx = len(errorgen_layers)-1 +# + # for i in range(stopping_idx): + # #err_layer = errorgen_layers[i] + # prop_layer = propagation_layers[i] + # new_error_dict = dict() + # #iterate through dictionary of error generator coefficients and propagate each one. + # for errgen_coeff_lbl in combined_err_layer: + # propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, combined_err_layer[errgen_coeff_lbl]) + # new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] + # #next use BCH to combine new_err_layer with the now adjacent layer of errorgen_layers[i+1] + # #combined_err_layer = _eprop.bch_approximation(new_error_dict, errorgen_layers[i+1], bch_order=bch_order, truncation_threshold=truncation_threshold) + # combined_err_layer = _eprop.bch_approximation(errorgen_layers[i+1], new_error_dict, bch_order=bch_order, truncation_threshold=truncation_threshold) + # + # #If we are including spam then there will be one last error generator which we doesn't have an associated propagation + # #which needs to be combined using BCH. + # if include_spam: + # #combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=bch_order, truncation_threshold=truncation_threshold) + # combined_err_layer = _eprop.bch_approximation(errorgen_layers[-1], combined_err_layer, bch_order=bch_order, truncation_threshold=truncation_threshold) +# + # return combined_err_layer def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp', return_dense=False): """ From d475c923e0ce68b894d478f86efa3c77d6515e40 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 17 Nov 2024 20:55:12 -0700 Subject: [PATCH 027/102] Unit tests Add some initial unit tests for the error generator propagation codebase. Will need to eventually add in additional tests for more complete coverage. --- pygsti/tools/errgenproptools.py | 7 +- test/unit/objects/test_errorgenpropagation.py | 222 ++++++++++++++++++ 2 files changed, 226 insertions(+), 3 deletions(-) create mode 100644 test/unit/objects/test_errorgenpropagation.py diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 26b6a7239..f536e7924 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -172,14 +172,14 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #only need a factor of 1/6 because new_errorgen_layer[1] is 1/2 the commutator weighta = (1/6)*errgen_layer_1[error1a]*second_order_comm_rate - if not abs(weighta) < 10*truncation_threshold: + if not abs(weighta) < truncation_threshold: commuted_errgen_sublist = error_generator_commutator(error1a, error2, weight=weighta) commuted_errgen_list_1.extend(commuted_errgen_sublist) #only need a factor of -1/6 because new_errorgen_layer[1] is 1/2 the commutator weightb = -(1/6)*errgen_layer_2[error1b]*second_order_comm_rate - if not abs(weightb) < 10*truncation_threshold: + if not abs(weightb) < truncation_threshold: commuted_errgen_sublist = error_generator_commutator(error1b, error2, weight=weightb) commuted_errgen_list_2.extend(commuted_errgen_sublist) @@ -217,7 +217,8 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #avoid computing commutators which will be effectively zero. #only need a factor of -1/2 because third_order_comm_dict_1 is 1/12 the nested commutator weight = -.5*errgen_layer_2[error1]*third_order_comm_dict_1[error2] - if abs(weight) < 10*truncation_threshold: + if abs(weight) < truncation_threshold: + #print('continuing') continue commuted_errgen_sublist = error_generator_commutator(error1, error2, weight=weight) diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py new file mode 100644 index 000000000..03c3e7731 --- /dev/null +++ b/test/unit/objects/test_errorgenpropagation.py @@ -0,0 +1,222 @@ +from ..util import BaseCase +from pygsti.algorithms.randomcircuit import create_random_circuit +from pygsti.errorgenpropagation.errorpropagator_dev import ErrorGeneratorPropagator +from pygsti.processors import QubitProcessorSpec +from pygsti.models.modelconstruction import create_crosstalk_free_model +from pygsti.baseobjs import Label, BuiltinBasis, QubitSpace, CompleteElementaryErrorgenBasis +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel +from pygsti.tools import errgenproptools as _eprop +from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel +from pygsti.tools.matrixtools import print_mx +from itertools import product + + +import numpy as np + + +class ErrorgenPropTester(BaseCase): + + def setUp(self): + num_qubits = 4 + gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] + availability = {'Gcphase':[(0,1), (1,2), (2,3), (3,0)]} + pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) + self.target_model = create_crosstalk_free_model(processor_spec = pspec) + self.circuit = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + + typ = 'H' + max_stochastic = {'S': .0005, 'H': 0, 'H+S': .0001} + max_hamiltonian = {'S': 0, 'H': .00005, 'H+S': .0001} + max_strengths = {1: {'S': max_stochastic[typ], 'H': max_hamiltonian[typ]}, + 2: {'S': 3*max_stochastic[typ], 'H': 3*max_hamiltonian[typ]} + } + error_rates_dict = sample_error_rates_dict(pspec, max_strengths, seed=12345) + self.error_model = create_crosstalk_free_model(pspec, lindblad_error_coeffs=error_rates_dict) + + def test_exact_propagation_probabilities(self): + #This should simultaneously confirm that the propagation code runs + #and also that it is giving the correct values by directly comparing + #to the probabilities from direct forward simulation. + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + probabilities_exact_propagation = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit) + probabilities_forward_simulation = probabilities_fwdsim(self.error_model, self.circuit) + + self.assertTrue(np.linalg.norm(probabilities_exact_propagation - probabilities_forward_simulation, ord=1) < 1e-10) + + def test_approx_propagation_probabilities_BCH(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + probabilities_BCH_order_1 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=1) + probabilities_BCH_order_2 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=2) + probabilities_BCH_order_3 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=3) + probabilities_BCH_order_4 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=4) + + probabilities_forward_simulation = probabilities_fwdsim(self.error_model, self.circuit) + + #use a much looser constraint on the agreement between the BCH results and forward simulation. Mostly testing to catch things exploding. + self.assertTrue(np.linalg.norm(probabilities_BCH_order_1 - probabilities_forward_simulation, ord=1) < 1e-2) + self.assertTrue(np.linalg.norm(probabilities_BCH_order_2 - probabilities_forward_simulation, ord=1) < 1e-2) + self.assertTrue(np.linalg.norm(probabilities_BCH_order_3 - probabilities_forward_simulation, ord=1) < 1e-2) + self.assertTrue(np.linalg.norm(probabilities_BCH_order_4 - probabilities_forward_simulation, ord=1) < 1e-2) + + def test_errorgen_commutators(self): + #confirm we get the correct analytic commutators by comparing to numerics. + + #create an error generator basis. + errorgen_basis = CompleteElementaryErrorgenBasis('PP', QubitSpace(2), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_lbl_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + #loop through all of the pairs of indices. + errorgen_label_pairs = list(product(errorgen_lbls, repeat=2)) + + #also get a version of this list where the labels are local stim ones + local_stim_errorgen_lbls = [LocalStimErrorgenLabel.cast(lbl) for lbl in errorgen_lbls] + stim_errorgen_label_pairs = list(product(local_stim_errorgen_lbls, repeat=2)) + + #for each pair compute the commutator directly and compute it analytically (then converting it to + #a numeric array) and see how they compare. + for pair1, pair2 in zip(errorgen_label_pairs, stim_errorgen_label_pairs): + numeric_commutator = error_generator_commutator_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict) + analytic_commutator = _eprop.error_generator_commutator(pair2[0], pair2[1]) + analytic_commutator_mat = comm_list_to_matrix(analytic_commutator, errorgen_lbl_matrix_dict, 2) + + norm_diff = np.linalg.norm(numeric_commutator-analytic_commutator_mat) + if norm_diff > 1e-10: + print(f'Difference in commutators for pair {pair1} is greater than 1e-10.') + print(f'{np.linalg.norm(numeric_commutator-analytic_commutator_mat)=}') + print('numeric_commutator=') + print_mx(numeric_commutator) + + #Decompose the numerical commutator into rates. + for lbl, dual in zip(errorgen_lbls, errorgen_basis.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_commutator) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_commutator=}') + print('analytic_commutator_mat=') + print_mx(analytic_commutator_mat) + raise ValueError() + +#Helper Functions: +def probabilities_errorgen_prop(error_propagator, target_model, circuit, use_bch=False, bch_order=1, truncation_threshold=1e-14): + #get the eoc error channel, and the process matrix for the ideal circuit: + if use_bch: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True, use_bch=use_bch, + bch_kwargs={'bch_order':bch_order, + 'truncation_threshold':truncation_threshold}) + else: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True) + ideal_channel = target_model.sim.product(circuit) + #also get the ideal state prep and povm: + ideal_prep = target_model.circuit_layer_operator(Label('rho0'), typ='prep').copy() + ideal_meas = target_model.circuit_layer_operator(Label('Mdefault'), typ='povm').copy() + #calculate the probabilities. + prob_vec = np.zeros(len(ideal_meas)) + for i, effect in enumerate(ideal_meas.values()): + dense_effect = effect.to_dense().copy() + dense_prep = ideal_prep.to_dense().copy() + prob_vec[i] = np.linalg.multi_dot([dense_effect.reshape((1,len(dense_effect))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]) + return prob_vec + +def probabilities_fwdsim(noise_model, circuit): + prob_dict = noise_model.sim.probs(circuit) + prob_vec = np.fromiter(prob_dict.values(), dtype=np.double) + return prob_vec + +def sample_error_rates_dict(pspec, strengths, seed=None): + """ + For example: + strengths = {1: {'S':0.001, 'H':0.01}, + 2: {'S':0.01,'H':0.1}} + + The 'S' and 'H' entries in the strengths dictionary give + the maximum possible contribution to the infidelity from a given gate. + """ + qubits = pspec.qubit_labels + errors_rates_dict = {} + for gate, availability in pspec.availability.items(): + n = pspec.gate_num_qubits(gate) + if availability == 'all-edges': + assert(n == 1), "Currently require all 2-qubit gates have a specified availability!" + qubits_for_gate = qubits + else: + qubits_for_gate = availability + for qs in qubits_for_gate: + label = Label(gate, qs) + # First, check if there's a strength specified for this specific gate. + max_stength = strengths.get(label, None) # to get highly biased errors can set generic error rates to be low, then set it to be high for one or two particular gates. + # Next, check if there's a strength specified for all gates with this name + if max_stength is None: + max_stength = strengths.get(gate, None) + # Finally, get error rate for all gates on this number of qubits. + if max_stength is None: + max_stength = strengths[n] + # Sample error rates. + errors_rates_dict[label] = sample_error_rates(max_stength, n, seed) + return errors_rates_dict + +def sample_error_rates(strengths, n, seed = None): + ''' + Samples an error rates dictionary for dependent gates. + ''' + error_rates_dict = {} + + #create a basis to get the basis element labels. + basis = BuiltinBasis('pp', 4**n) + + #set the rng + rng = np.random.default_rng(seed) + + # Sample stochastic error rates. First we sample the overall stochastic error rate. + # Then we sample (and normalize) the individual stochastic error rates + stochastic_strength = strengths['S'] * rng.random() + s_error_rates = rng.random(4 ** n - 1) + s_error_rates = s_error_rates / np.sum(s_error_rates) * stochastic_strength + + hamiltonian_strength = strengths['H'] * rng.random() + h_error_rates = rng.random(4 ** n - 1) + h_error_rates = h_error_rates * np.sqrt(hamiltonian_strength) / np.sqrt(np.sum(h_error_rates**2)) + + error_rates_dict.update({('S', basis.labels[i + 1]): s_error_rates[i] for i in range(4 ** n - 1)}) + error_rates_dict.update({('H', basis.labels[i + 1]): h_error_rates[i] for i in range(4 ** n - 1)}) + + return error_rates_dict + +def comm_list_to_matrix(comm_list, errorgen_matrix_dict, num_qubits): + #if the list is empty return all zeros + #initialize empty array for accumulation. + mat = np.zeros((4**num_qubits, 4**num_qubits), dtype=np.complex128) + if not comm_list: + return mat + + #infer the correct label type. + if errorgen_matrix_dict: + first_label = next(iter(errorgen_matrix_dict)) + if isinstance(first_label, LocalElementaryErrorgenLabel): + label_type = 'local' + elif isinstance(first_label, GlobalElementaryErrorgenLabel): + label_type = 'global' + else: + msg = f'Label type {type(first_label)} is not supported as a key for errorgen_matrix_dict.'\ + + 'Please use either LocalElementaryErrorgenLabel or GlobalElementaryErrorgenLabel.' + raise ValueError() + else: + raise ValueError('Non-empty commutatory result list, but the dictionary is empty. Cannot convert.') + + #loop through comm_list and accumulate the weighted error generators prescribed. + if label_type == 'local': + for comm_tup in comm_list: + mat += comm_tup[1]*errorgen_matrix_dict[comm_tup[0].to_local_eel()] + else: + for comm_tup in comm_list: + mat += comm_tup[1]*errorgen_matrix_dict[comm_tup[0].to_global_eel()] + + return mat + +def error_generator_commutator_numerical(errorgen_1, errorgen_2, errorgen_matrix_dict): + return errorgen_matrix_dict[errorgen_1]@errorgen_matrix_dict[errorgen_2] - errorgen_matrix_dict[errorgen_2]@errorgen_matrix_dict[errorgen_1] + From 7efa90df9e5d4f83ca27551aed5b6a9014eaf2e2 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 18 Nov 2024 23:42:03 -0700 Subject: [PATCH 028/102] Caching and flexible errogen label types This commit represents an attempt at improving the performance of the construction and use of error generator coefficient dictionaries from the objects that support their creation. This is achieved two ways. First there are internal changes to the LindbladCoefficientBlock code which caches the error generator dictionary for a block upon creation and only rebuilds this from scratch when the block data has changed. The second change is to allow greater flexibility in the type of error generator label returned when generating the term dictionary. These can now be returned either as local or global elementary error generators, and similarly the setter methods can accept either one. This flexibility allows for performance benefits by reducing the number of casts required between error generator label types. --- pygsti/modelmembers/errorgencontainer.py | 88 ++++++++++++--- .../operations/composederrorgen.py | 48 ++++++-- pygsti/modelmembers/operations/composedop.py | 45 ++++++-- .../operations/embeddederrorgen.py | 34 +++++- pygsti/modelmembers/operations/embeddedop.py | 34 +++++- .../operations/lindbladcoefficients.py | 41 ++++++- .../operations/lindbladerrorgen.py | 103 +++++++++++------- pygsti/modelmembers/povms/composedpovm.py | 22 +++- pygsti/modelmembers/states/composedstate.py | 20 +++- 9 files changed, 338 insertions(+), 97 deletions(-) diff --git a/pygsti/modelmembers/errorgencontainer.py b/pygsti/modelmembers/errorgencontainer.py index 331708fd3..fa2ea6634 100644 --- a/pygsti/modelmembers/errorgencontainer.py +++ b/pygsti/modelmembers/errorgencontainer.py @@ -23,7 +23,7 @@ class ErrorGeneratorContainer(object): def __init__(self, errorgen): self.errorgen = errorgen - def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): + def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this operation. @@ -46,6 +46,12 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): channel where all stochastic generators had this same coefficient. This is the value returned by :meth:`error_rates`. + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- lindblad_term_dict : dict @@ -61,19 +67,27 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): A Basis mapping the basis labels used in the keys of `lindblad_term_dict` to basis matrices. """ - return self.errorgen.coefficients(return_basis, logscale_nonham) + return self.errorgen.coefficients(return_basis, logscale_nonham, label_type) - def errorgen_coefficient_labels(self): + def errorgen_coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- tuple A tuple of (, [,, [, 0: raise ValueError("Cannot set any error generator coefficients on an op with no error generator!") - def errorgen_coefficient_labels(self): + def errorgen_coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. @@ -479,7 +529,7 @@ def errorgen_coefficients_array_deriv_wrt_params(self): """ return _np.empty((0, self.num_params), 'd') - def error_rates(self): + def error_rates(self, label_type): """ Constructs a dictionary of the error rates associated with this operation. @@ -502,6 +552,14 @@ def error_rates(self): rates is not necessarily the error rate of the overall channel. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- lindblad_term_dict : dict diff --git a/pygsti/modelmembers/operations/composederrorgen.py b/pygsti/modelmembers/operations/composederrorgen.py index 2a29d82c8..81173d352 100644 --- a/pygsti/modelmembers/operations/composederrorgen.py +++ b/pygsti/modelmembers/operations/composederrorgen.py @@ -21,6 +21,7 @@ from pygsti.evotypes import Evotype as _Evotype from pygsti.baseobjs import statespace as _statespace from pygsti.baseobjs.basis import ExplicitBasis as _ExplicitBasis +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel from pygsti.tools import matrixtools as _mt @@ -95,7 +96,7 @@ def _from_memoized_dict(cls, mm_dict, serial_memo): errgens_to_compose = [serial_memo[i] for i in mm_dict['submembers']] return cls(errgens_to_compose, mm_dict['evotype'], state_space) - def coefficients(self, return_basis=False, logscale_nonham=False): + def coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this error generator. @@ -118,6 +119,12 @@ def coefficients(self, return_basis=False, logscale_nonham=False): channel where all stochastic generators had this same coefficient. This is the value returned by :meth:`error_rates`. + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- Ltermdict : dict @@ -139,7 +146,7 @@ def coefficients(self, return_basis=False, logscale_nonham=False): constant_basis = None # the single same Basis used for every factor with a nonempty basis for eg in self.factors: - factor_coeffs = eg.coefficients(return_basis, logscale_nonham) + factor_coeffs = eg.coefficients(return_basis, logscale_nonham, label_type) if return_basis: ltdict, factor_basis = factor_coeffs @@ -184,17 +191,25 @@ def coefficients(self, return_basis=False, logscale_nonham=False): else: return Ltermdict - def coefficient_labels(self): + def coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- tuple A tuple of (, [,, [,, [,, [, scalar #set_basis_el(lbl, basis[lbl]) # REMOVE + #cache the error generator dictionary for future use + self._cached_elementary_errorgens = elementary_errorgens + self._coefficients_need_update = False return elementary_errorgens @@ -511,6 +530,10 @@ def set_elementary_errorgens(self, elementary_errorgens, on_missing='ignore', tr self.block_data[(slice(None, None),) * self.block_data.ndim] = flat_data.reshape(self.block_data.shape) self._truncate_block_data(truncate) + #set a flag to indicate that the coefficients (as returned by elementary_errorgens) + #need to be updated. + self._coefficients_need_update = True + return unused_elementary_errorgens def set_from_errorgen_projections(self, errorgen, errorgen_basis='pp', return_projected_errorgen=False, @@ -522,6 +545,11 @@ def set_from_errorgen_projections(self, errorgen, errorgen_basis='pp', return_pr elementary_errorgens = out[0] if return_projected_errorgen else out unused = self.set_elementary_errorgens(elementary_errorgens, on_missing='raise', truncate=truncate) assert(len(unused) == 0) + + #set a flag to indicate that the coefficients (as returned by elementary_errorgens) + #need to be updated. + self._coefficients_need_update = True + return out[1] if return_projected_errorgen else None @property @@ -779,6 +807,7 @@ def from_vector(self, v): v : numpy.ndarray A 1D array of real parameter values. """ + if self._param_mode == 'static': assert(len(v) == 0), "'static' paramterized blocks should have zero parameters!" return # self.block_data remains the same - no update @@ -849,6 +878,10 @@ def from_vector(self, v): % (self._param_mode, self._block_type)) else: raise ValueError("Internal error: invalid block type!") + + #set a flag to indicate that the coefficients (as returned by elementary_errorgens) + #need to be updated. + self._coefficients_need_update = True #def paramvals_to_coefficients_deriv(self, parameter_values, cache_mx=None): def deriv_wrt_params(self, v=None): diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index 68097dd82..ad50592b1 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -356,13 +356,17 @@ def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto' dim = state_space.dim # Store superop dimension basis = _Basis.cast(elementary_errorgen_basis, dim) - #convert elementary errorgen labels to *local* labels (ok to specify w/global labels) - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = state_space.sole_tensor_product_block_labels # first TPB labels == all labels - elementary_errorgens = _collections.OrderedDict( - [(_LocalElementaryErrorgenLabel.cast(lbl, sslbls, identity_label_1Q), val) - for lbl, val in elementary_errorgens.items()]) - + #check the first key, if local then no need to convert, otherwise convert from global. + first_key = next(iter(elementary_errorgens)) + if isinstance(first_key, (_GlobalElementaryErrorgenLabel, tuple)): + #convert keys to local elementary errorgen labels (the same as those used by the coefficient blocks): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elementary_errorgens = {_LocalElementaryErrorgenLabel.cast(k, sslbls, identity_label_1Q): v + for k, v in elementary_errorgens.items()} + else: + assert isinstance(first_key, _LocalElementaryErrorgenLabel), 'Unsupported error generator label type as key.' + parameterization = LindbladParameterization.minimal_from_elementary_errorgens(elementary_errorgens) \ if parameterization == "auto" else LindbladParameterization.cast(parameterization) @@ -993,7 +997,7 @@ def from_vector(self, v, close=False, dirty_value=True): self._update_rep() self.dirty = dirty_value - def coefficients(self, return_basis=False, logscale_nonham=False): + def coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): """ TODO: docstring Constructs a dictionary of the Lindblad-error-generator coefficients of this error generator. @@ -1016,6 +1020,12 @@ def coefficients(self, return_basis=False, logscale_nonham=False): the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. This is the value returned by :meth:`error_rates`. + + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. Returns ------- @@ -1032,6 +1042,8 @@ def coefficients(self, return_basis=False, logscale_nonham=False): A Basis mapping the basis labels used in the keys of `Ltermdict` to basis matrices. """ + assert label_type=='global' or label_type=='local', "Allowed values of label_type are 'global' and 'local'." + elem_errorgens = {} bases = set() for blk in self.coefficient_blocks: @@ -1039,12 +1051,12 @@ def coefficients(self, return_basis=False, logscale_nonham=False): if blk._basis not in bases: bases.add(blk._basis) - #convert to *global* elementary errorgen labels - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels - elem_errorgens = _collections.OrderedDict( - [(_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q), value) - for local_eeg_lbl, value in elem_errorgens.items()]) + if label_type=='global': + #convert to *global* elementary errorgen labels + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elem_errorgens = {_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q): value + for local_eeg_lbl, value in elem_errorgens.items()} if logscale_nonham: dim = self.dim @@ -1060,10 +1072,18 @@ def coefficients(self, return_basis=False, logscale_nonham=False): else: return elem_errorgens - def coefficient_labels(self): + def coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- tuple @@ -1075,11 +1095,14 @@ def coefficient_labels(self): #labels.extend(blk.coefficent_labels) labels.extend(blk.elementary_errorgens.keys()) + if label_type == 'global': #convert to *global* elementary errorgen labels - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels - return tuple([_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q) - for local_eeg_lbl in labels]) + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + labels = [_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q) + for local_eeg_lbl in labels] + return tuple(labels) + def coefficients_array(self): """ @@ -1128,7 +1151,7 @@ def coefficients_array_deriv_wrt_params(self): ret *= self._coefficient_weights[:, None] return ret - def error_rates(self): + def error_rates(self, label_type='global'): """ Constructs a dictionary of the error rates associated with this error generator. @@ -1153,6 +1176,14 @@ def error_rates(self): rates is not necessarily the error rate of the overall channel. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- lindblad_term_dict : dict @@ -1165,7 +1196,7 @@ def error_rates(self): terms. Values are real error rates except for the 2-basis-label case. """ - return self.coefficients(return_basis=False, logscale_nonham=True) + return self.coefficients(return_basis=False, logscale_nonham=True, label_type=label_type) def set_coefficients(self, elementary_errorgens, action="update", logscale_nonham=False, truncate=True): """ @@ -1210,12 +1241,16 @@ def set_coefficients(self, elementary_errorgens, action="update", logscale_nonha ------- None """ - #convert keys to local elementary errorgen labels (the same as those used by the coefficient blocks): - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels - elem_errorgens = _collections.OrderedDict( - [(_LocalElementaryErrorgenLabel.cast(k, sslbls, identity_label_1Q), v) - for k, v in elementary_errorgens.items()]) + #check the first key, if local then no need to convert, otherwise convert from global. + first_key = next(iter(elementary_errorgens)) + if isinstance(first_key, (_GlobalElementaryErrorgenLabel, tuple)): + #convert keys to local elementary errorgen labels (the same as those used by the coefficient blocks): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elem_errorgens = {_LocalElementaryErrorgenLabel.cast(k, sslbls, identity_label_1Q): v + for k, v in elementary_errorgens.items()} + else: + assert isinstance(first_key, _LocalElementaryErrorgenLabel), 'Unsupported error generator label type as key.' processed = set() # keep track of which entries in elem_errorgens have been processed by a block for blk in self.coefficient_blocks: @@ -1541,28 +1576,18 @@ def to_memoized_dict(self, mmg_memo): mm_dict = super().to_memoized_dict(mmg_memo) mm_dict['rep_type'] = self._rep_type - #OLD: mm_dict['parameterization'] = self.parameterization.to_nice_serialization() - #OLD: mm_dict['lindblad_basis'] = self.lindblad_basis.to_nice_serialization() - #OLD: mm_dict['coefficients'] = [(str(k), self._encodevalue(v)) for k, v in self.coefficients().items()] mm_dict['matrix_basis'] = self.matrix_basis.to_nice_serialization() mm_dict['coefficient_blocks'] = [blk.to_nice_serialization() for blk in self.coefficient_blocks] return mm_dict @classmethod def _from_memoized_dict(cls, mm_dict, serial_memo): - #lindblad_term_dict = {_GlobalElementaryErrorgenLabel.cast(k): cls._decodevalue(v) - # for k, v in mm_dict['coefficients']} # convert keys from str->objects - #parameterization = LindbladParameterization.from_nice_serialization(mm_dict['parameterization']) - #lindblad_basis = _Basis.from_nice_serialization(mm_dict['lindblad_basis']) - #truncate = False # shouldn't need to truncate since we're reloading a valid set of coefficients mx_basis = _Basis.from_nice_serialization(mm_dict['matrix_basis']) state_space = _statespace.StateSpace.from_nice_serialization(mm_dict['state_space']) coeff_blocks = [_LindbladCoefficientBlock.from_nice_serialization(blk) for blk in mm_dict['coefficient_blocks']] return cls(coeff_blocks, 'auto', mx_basis, mm_dict['evotype'], state_space) - #return cls(lindblad_term_dict, parameterization, lindblad_basis, - # mx_basis, truncate, mm_dict['evotype'], state_space) def _is_similar(self, other, rtol, atol): """ Returns True if `other` model member (which it guaranteed to be the same type as self) has @@ -1575,10 +1600,10 @@ def __str__(self): (self.dim, self.num_params) return s - def _oneline_contents(self): + def _oneline_contents(self, label_type='global'): """ Summarizes the contents of this object in a single line. Does not summarize submembers. """ MAXLEN = 60 - coeff_dict = self.coefficients(); s = "" + coeff_dict = self.coefficients(label_type=label_type); s = "" for lbl, val in coeff_dict.items(): if len(s) > MAXLEN: s += "..."; break diff --git a/pygsti/modelmembers/povms/composedpovm.py b/pygsti/modelmembers/povms/composedpovm.py index b23722007..003e761b5 100644 --- a/pygsti/modelmembers/povms/composedpovm.py +++ b/pygsti/modelmembers/povms/composedpovm.py @@ -370,17 +370,25 @@ def __str__(self): % (len(self)) return s - def errorgen_coefficient_labels(self): + def errorgen_coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- tuple A tuple of (, [, Date: Tue, 19 Nov 2024 23:16:31 -0700 Subject: [PATCH 029/102] Add caching for error generator coefficient dicts Add caching to the error generator propagation code to avoid needing to request previously seen error generator coefficient dictionaries for previously seen circuit layer operators. Also make it easier to reuse previously generated basis objects from the parent model to avoid redoing lazy basis element generation unnescessarily. --- .../errorpropagator_dev.py | 32 ++++++++++++++----- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 0f72c2210..3c09cd943 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -435,20 +435,31 @@ def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, incl #TODO: Generalize circuit time to not be in one-to-one correspondence with the layer index. error_gen_dicts_by_layer = [] + + #cache the error generator coefficients for a circuit layer to accelerate cases where we've already seen that layer. + circuit_layer_errorgen_cache = dict() + for j in range(len(circuit)): circuit_layer = circuit[j] # get the layer #can probably relax this if we detect that the model is a crosstalk free model. #assert isinstance(circuit_layer, Label), 'Correct support for parallel gates is still under development.' errorgen_layer = dict() - layer_errorgen_coeff_dict = self.model.circuit_layer_operator(circuit_layer).errorgen_coefficients() #get the errors for the gate + + layer_errorgen_coeff_dict = circuit_layer_errorgen_cache.get(circuit_layer, None) + if layer_errorgen_coeff_dict is None: + layer_errorgen_coeff_dict = self.model.circuit_layer_operator(circuit_layer).errorgen_coefficients(label_type='local') #get the errors for the gate + circuit_layer_errorgen_cache[circuit_layer] = layer_errorgen_coeff_dict + for errgen_coeff_lbl, rate in layer_errorgen_coeff_dict.items(): #for an error in the accompanying error dictionary - #TODO: Can probably replace this function call with `padded_basis_element_labels` method of `GlobalElementaryErrorgenLabel` - paulis = _eprop.errgen_coeff_label_to_stim_pauli_strs(errgen_coeff_lbl, num_qubits) - if include_circuit_time: - #TODO: Refactor the fixed rate stuff to reduce the number of if statement evaluations. - errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j)] = rate if fixed_rate is None else fixed_rate - else: - errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis)] = rate if fixed_rate is None else fixed_rate + #only track this error generator if its rate is not exactly zero. #TODO: Add more flexible initial truncation logic. + if rate !=0 or fixed_rate is not None: + #TODO: Can probably replace this function call with `padded_basis_element_labels` method of `GlobalElementaryErrorgenLabel` + paulis = _eprop.errgen_coeff_label_to_stim_pauli_strs(errgen_coeff_lbl, num_qubits) + if include_circuit_time: + #TODO: Refactor the fixed rate stuff to reduce the number of if statement evaluations. + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j)] = rate if fixed_rate is None else fixed_rate + else: + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis)] = rate if fixed_rate is None else fixed_rate error_gen_dicts_by_layer.append(errorgen_layer) return error_gen_dicts_by_layer @@ -638,6 +649,11 @@ def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp', return_ # print(f'{errorgen_layer=}') # _mt.print_mx(errorgen) # raise err + + #if the model's basis is already the same as mx_basis then reuse the one from the model + if isinstance(mx_basis, str): + if set(self.model.basis.name.split('*')) == set([mx_basis]) or self.model.basis.name==mx_basis: + mx_basis = self.model.basis global_errorgen_coeffs = [coeff_lbl.to_global_eel() for coeff_lbl in errorgen_layer.keys()] coeff_dict = {lbl:_np.real_if_close(val) for lbl, val in zip(global_errorgen_coeffs, errorgen_layer.values())} From 9dcd6ae240c1a0e7e1e7f08bb5b1af3b887a004a Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 19 Nov 2024 23:22:56 -0700 Subject: [PATCH 030/102] Fix bugs introduced by new local label handling Fix a number of bugs and incompatibilities that were discovered related to the changes made to allow for more flexible use of either global or local elementary error generator labels. The main problem area was in the embedded op code which needed logic for dealing with embedding and unembedding error generators into larger hilbert schmidt spaces when working with local label types where basis element labels needed to be expanded or contracted. Also fixes an issue with ComposedOp error generators related to NoErrorgenInterface inheriting objects (not sure why global error generator labels didn't previously bump into this). Finally, add a second code path to the code for converting to stim pauli strings in the tools module to take advantage of efficiencies when using local label types. --- pygsti/modelmembers/operations/composedop.py | 6 + pygsti/modelmembers/operations/embeddedop.py | 127 +++++++++++++++--- .../operations/lindbladerrorgen.py | 43 +++--- pygsti/modelmembers/states/composedstate.py | 2 +- pygsti/tools/errgenproptools.py | 69 +++++----- 5 files changed, 169 insertions(+), 78 deletions(-) diff --git a/pygsti/modelmembers/operations/composedop.py b/pygsti/modelmembers/operations/composedop.py index 6bde32b07..4b3b138d6 100644 --- a/pygsti/modelmembers/operations/composedop.py +++ b/pygsti/modelmembers/operations/composedop.py @@ -720,8 +720,14 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label for op in self.factorops: try: factor_coeffs = op.errorgen_coefficients(return_basis, logscale_nonham, label_type) + except AttributeError: continue # just skip members that don't implemnt errorgen_coefficients (?) + + #If the op has a NoErrorgenInterface as a parent class then factor_coeffs could be empty + #which should be skipped. + if (return_basis and not factor_coeffs[0]) or not factor_coeffs: + continue if return_basis: ltdict, factor_basis = factor_coeffs diff --git a/pygsti/modelmembers/operations/embeddedop.py b/pygsti/modelmembers/operations/embeddedop.py index 579a819ac..6e140f94c 100644 --- a/pygsti/modelmembers/operations/embeddedop.py +++ b/pygsti/modelmembers/operations/embeddedop.py @@ -20,6 +20,7 @@ from pygsti.modelmembers import modelmember as _modelmember from pygsti.baseobjs.basis import EmbeddedBasis as _EmbeddedBasis from pygsti.baseobjs.statespace import StateSpace as _StateSpace +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel class EmbeddedOp(_LinearOperator): @@ -552,7 +553,7 @@ def transform_inplace(self, s): # s and Sinv matrices... but haven't needed it yet. raise NotImplementedError("Cannot transform an EmbeddedOp yet...") - def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): + def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label_type='global', identity_label='I'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this operation. @@ -580,6 +581,13 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label as the keys for the returned dictionary. Allowed options are 'global' for `GlobalElementaryErrorgenLabel` and 'local' for `LocalElementaryErrorgenLabel`. + + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. Returns ------- @@ -598,13 +606,34 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label """ #*** Note: this function is nearly identical to EmbeddedErrorgen.coefficients() *** embedded_coeffs = self.embedded_op.errorgen_coefficients(return_basis, logscale_nonham, label_type) - if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, - self.target_labels)} - embedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in embedded_coeffs.items()} + #print(f'{embedded_coeffs=}') + + if embedded_coeffs: + first_coeff_lbl = next(iter(embedded_coeffs)) + if isinstance(first_coeff_lbl, _GlobalElementaryErrorgenLabel): +# if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: + mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, + self.target_labels)} + embedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in embedded_coeffs.items()} + elif isinstance(first_coeff_lbl, _LocalElementaryErrorgenLabel): + embedded_labels = list(embedded_coeffs.keys()) + #use different embedding scheme for local labels + base_label = [identity_label for _ in range(self.state_space.num_qudits)] + for lbl in embedded_labels: + new_bels = [] + for bel in lbl.basis_element_labels: + base_label = [identity_label for _ in range(self.state_space.num_qudits)] + for target, pauli in zip(self.target_labels, bel): + base_label[target] = pauli + new_bels.append(''.join(base_label)) + lbl.basis_element_labels = tuple(new_bels) + embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, embedded_coeffs.values())} + else: + raise ValueError(f'Invalid error generator label type {first_coeff_lbl}') + return embedded_coeffs - def errorgen_coefficient_labels(self, label_type='global'): + def errorgen_coefficient_labels(self, label_type='global', identity_label='I'): """ The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. @@ -616,6 +645,13 @@ def errorgen_coefficient_labels(self, label_type='global'): 'global' for `GlobalElementaryErrorgenLabel` and 'local' for `LocalElementaryErrorgenLabel`. + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. + Returns ------- tuple @@ -623,10 +659,30 @@ def errorgen_coefficient_labels(self, label_type='global'): generators of this gate. """ embedded_labels = self.embedded_op.errorgen_coefficient_labels(label_type) - if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, - self.target_labels)} - embedded_labels = [k.map_state_space_labels(mapdict) for k in embedded_labels] + #print(f'{embedded_labels=}') + + #if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: + #print(f'{self.target_labels=}') + #print(f'{self.embedded_op.state_space.sole_tensor_product_block_labels=}') + if len(embedded_labels)>0: + if isinstance(embedded_labels[0], _GlobalElementaryErrorgenLabel): + mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, + self.target_labels)} + embedded_labels = [k.map_state_space_labels(mapdict) for k in embedded_labels] + elif isinstance(embedded_labels[0], _LocalElementaryErrorgenLabel): + #use different embedding scheme for local labels + base_label = [identity_label for _ in range(self.state_space.num_qudits)] + for lbl in embedded_labels: + new_bels = [] + for bel in lbl.basis_element_labels: + base_label = [identity_label for _ in range(self.state_space.num_qudits)] + for target, pauli in zip(self.target_labels, bel): + base_label[target] = pauli + new_bels.append(''.join(base_label)) + lbl.basis_element_labels = tuple(new_bels) + else: + raise ValueError(f'Invalid error generator label type {embedded_labels[0]}') + #print(f'{embedded_labels=}') return embedded_labels def errorgen_coefficients_array(self): @@ -658,7 +714,7 @@ def errorgen_coefficients_array_deriv_wrt_params(self): """ return self.embedded_op.errorgen_coefficients_array_deriv_wrt_params() - def error_rates(self, label_type='global'): + def error_rates(self, label_type='global', identity_label='I'): """ Constructs a dictionary of the error rates associated with this operation. @@ -689,6 +745,13 @@ def error_rates(self, label_type='global'): 'global' for `GlobalElementaryErrorgenLabel` and 'local' for `LocalElementaryErrorgenLabel`. + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. + Returns ------- lindblad_term_dict : dict @@ -701,7 +764,7 @@ def error_rates(self, label_type='global'): terms. Values are real error rates except for the 2-basis-label case. """ - return self.errorgen_coefficients(return_basis=False, logscale_nonham=True, label_type=label_type) + return self.errorgen_coefficients(return_basis=False, logscale_nonham=True, label_type=label_type, identity_label=identity_label) def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscale_nonham=False, truncate=True): """ @@ -745,16 +808,38 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal ------- None """ - if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - mapdict = {tgt: loc for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, - self.target_labels)} - unembedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in lindblad_term_dict.items()} - else: - unembedded_coeffs = lindblad_term_dict + #determine is we need to unembed the error generator labels in lindblad_term_dict. + if lindblad_term_dict: + first_coeff_lbl = next(iter(lindblad_term_dict)) + if isinstance(first_coeff_lbl, _GlobalElementaryErrorgenLabel): + if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: + mapdict = {tgt: loc for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, + self.target_labels)} + unembedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in lindblad_term_dict.items()} + else: + unembedded_coeffs = lindblad_term_dict + elif isinstance(first_coeff_lbl, _LocalElementaryErrorgenLabel): + #if the length of the basis element labels are the same as the length of this + #embedded op's target labels then assume those are associated. + if len(first_coeff_lbl.basis_element_labels[0]) == len(self.target_labels): + unembedded_coeffs = lindblad_term_dict + #if the length is equal to the number of qudits then we need to unembed. + elif len(first_coeff_lbl.basis_element_labels[0]) == self.state_space.num_qudits: + unembedded_labels = list(lindblad_term_dict.keys()) + for lbl in unembedded_labels: + new_bels = [] + for bel in lbl.basis_element_labels: + new_bels.append("".join(bel[target] for target in self.target_labels)) + lbl.basis_element_labels = tuple(new_bels) + unembedded_coeffs = {lbl:val for lbl, val in zip(unembedded_labels, lindblad_term_dict.values())} + else: + msg = "Could not parse error generator labels. Expected either length equal to this embedded op's"\ + +" target_labels or equal to the number of qudits." + raise ValueError(msg) - self.embedded_op.set_errorgen_coefficients(unembedded_coeffs, action, logscale_nonham, truncate) - if self._rep_type == 'dense': self._update_denserep() - self.dirty = True + self.embedded_op.set_errorgen_coefficients(unembedded_coeffs, action, logscale_nonham, truncate) + if self._rep_type == 'dense': self._update_denserep() + self.dirty = True def set_error_rates(self, lindblad_term_dict, action="update"): """ diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index ad50592b1..07cad9d7a 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -412,8 +412,6 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis= state_space = _statespace.StateSpace.cast(state_space) dim = state_space.dim # Store superop dimension - #UPDATE: no more self.lindblad_basis - #self.lindblad_basis = _Basis.cast(lindblad_basis, dim, sparse=sparse_bases) if lindblad_basis == "auto": assert(all([(blk._basis is not None) for blk in lindblad_coefficient_blocks])), \ "When `lindblad_basis == 'auto'`, the supplied coefficient blocks must have valid bases!" @@ -426,26 +424,6 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis= elif blk._basis.sparse != sparse_bases: # update block bases to desired sparsity if needed blk._basis = blk._basis.with_sparsity(sparse_bases) - #UPDATE - this essentially constructs the coefficient blocks from a single dict, which are now given as input - ## lindblad_term_dict, basis => bases + parameter values - ## but maybe we want lindblad_term_dict, basisdict => basis + projections/coeffs, - ## then projections/coeffs => paramvals? since the latter is what set_errgen needs - #hamC, otherC, self.ham_basis, self.other_basis = \ - # _ot.lindblad_terms_to_projections(lindblad_term_dict, self.lindblad_basis, - # self.parameterization.nonham_mode) - - #UPDATE - self.ham_basis_size and self.other_basis_size have been removed! - #self.ham_basis_size = len(self.ham_basis) - #self.other_basis_size = len(self.other_basis) - #assert(self.parameterization.ham_params_allowed or self.ham_basis_size == 0), \ - # "Hamiltonian lindblad terms are not allowed!" - #assert(self.parameterization.nonham_params_allowed or self.other_basis_size == 0), \ - # "Non-Hamiltonian lindblad terms are not allowed!" - # - ## Check that bases have the desired sparseness (should be same as lindblad_basis) - #assert (self.ham_basis_size == 0 or self.ham_basis.sparse == sparse_bases) - #assert (self.other_basis_size == 0 or self.other_basis.sparse == sparse_bases) - self.coefficient_blocks = lindblad_coefficient_blocks self.matrix_basis = _Basis.cast(mx_basis, dim, sparse=sparse_bases) @@ -1051,13 +1029,19 @@ def coefficients(self, return_basis=False, logscale_nonham=False, label_type='gl if blk._basis not in bases: bases.add(blk._basis) - if label_type=='global': + first_key = next(iter(elem_errorgens)) + if label_type=='global' and isinstance(first_key, _LocalElementaryErrorgenLabel): #convert to *global* elementary errorgen labels identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels elem_errorgens = {_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q): value for local_eeg_lbl, value in elem_errorgens.items()} - + elif label_type=='local' and isinstance(first_key, _GlobalElementaryErrorgenLabel): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elem_errorgens = {_LocalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q): value + for local_eeg_lbl, value in elem_errorgens.items()} + if logscale_nonham: dim = self.dim for k in elem_errorgens.keys(): @@ -1095,12 +1079,19 @@ def coefficient_labels(self, label_type='global'): #labels.extend(blk.coefficent_labels) labels.extend(blk.elementary_errorgens.keys()) - if label_type == 'global': - #convert to *global* elementary errorgen labels + first_label = labels[0] if len(labels)>0 else None + + if label_type == 'global' and isinstance(first_label, _LocalElementaryErrorgenLabel): + #convert to *global* elementary errorgen labels identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels labels = [_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q) for local_eeg_lbl in labels] + elif label_type=='local' and isinstance(first_label, _GlobalElementaryErrorgenLabel): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + labels = [_LocalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q) + for local_eeg_lbl in labels] return tuple(labels) diff --git a/pygsti/modelmembers/states/composedstate.py b/pygsti/modelmembers/states/composedstate.py index 6189c13f9..66e754085 100644 --- a/pygsti/modelmembers/states/composedstate.py +++ b/pygsti/modelmembers/states/composedstate.py @@ -781,7 +781,7 @@ def errorgen_coefficient_labels(self, label_type='global'): A tuple of (, [,= num_qubits, 'Specified `num_qubits` is less than the length of the basis element labels.' - - if errorgen_typ == 'H' or errorgen_typ == 'S': - pauli_string = num_qubits*['I'] - pauli_lbl = pauli_lbls[0] - for i, sslbl in enumerate(sslbls): - pauli_string[sslbl] = pauli_lbl[i] - pauli_string = stim.PauliString(''.join(pauli_string)) - return (pauli_string,) - elif errorgen_typ == 'C' or errorgen_typ == 'A': - pauli_strings = [] - for pauli_lbl in pauli_lbls: #iterate through both pauli labels + + if isinstance(err_gen_coeff_label, _GEEL): + #the coefficient label is a tuple with 3 elements. + #The first element is the error generator type. + #the second element is a tuple of paulis either of length 1 or 2 depending on the error gen type. + #the third element is a tuple of subsystem labels. + errorgen_typ = err_gen_coeff_label.errorgen_type + pauli_lbls = err_gen_coeff_label.basis_element_labels + sslbls = err_gen_coeff_label.support + + #double check that the number of qubits specified is greater than or equal to the length of the + #basis element labels. + #assert len(pauli_lbls) >= num_qubits, 'Specified `num_qubits` is less than the length of the basis element labels.' + + if errorgen_typ == 'H' or errorgen_typ == 'S': pauli_string = num_qubits*['I'] + pauli_lbl = pauli_lbls[0] for i, sslbl in enumerate(sslbls): pauli_string[sslbl] = pauli_lbl[i] - pauli_strings.append(stim.PauliString(''.join(pauli_string))) - return tuple(pauli_strings) + pauli_string = stim.PauliString(''.join(pauli_string)) + return (pauli_string,) + elif errorgen_typ == 'C' or errorgen_typ == 'A': + pauli_strings = [] + for pauli_lbl in pauli_lbls: #iterate through both pauli labels + pauli_string = num_qubits*['I'] + for i, sslbl in enumerate(sslbls): + pauli_string[sslbl] = pauli_lbl[i] + pauli_strings.append(stim.PauliString(''.join(pauli_string))) + return tuple(pauli_strings) + else: + raise ValueError(f'Unsupported error generator type {errorgen_typ}') + elif isinstance(err_gen_coeff_label, _LEEL): + return tuple([stim.PauliString(bel) for bel in err_gen_coeff_label.basis_element_labels]) + else: - raise ValueError(f'Unsupported error generator type {errorgen_typ}') - + raise ValueError('Only `GlobalElementaryErrorgenLabel and LocalElementaryErrorgenLabel is currently supported.') def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_threshold=1e-14): """ @@ -157,6 +161,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #third order BCH terms # (1/12)*([X,[X,Y]] - [Y,[X,Y]]) + #TODO: Can make this more efficient by using linearity of commutators elif curr_order == 2: #we've already calculated (1/2)*[X,Y] in the previous order, so reuse this result. #two different lists for the two different commutators so that we can more easily reuse @@ -202,6 +207,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th third_order_rate = third_order_comm_dict_1.get(lbl, 0) + third_order_comm_dict_2.get(lbl, 0) if abs(third_order_rate) > truncation_threshold: third_order_comm_dict[lbl] = third_order_rate + #print(f'{third_order_comm_dict=}') new_errorgen_layer.append(third_order_comm_dict) #fourth order BCH terms @@ -233,8 +239,11 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th fourth_order_comm_dict[error_tuple[0]] += error_tuple[1] #drop any terms below the truncation threshold after aggregation + #print(f'{fourth_order_comm_dict=}') fourth_order_comm_dict = {key: val for key, val in fourth_order_comm_dict.items() if abs(val)>truncation_threshold} new_errorgen_layer.append(fourth_order_comm_dict) + #print(f'{fourth_order_comm_dict=}') + else: raise NotImplementedError("Higher orders beyond fourth order are not implemented yet.") From e8ea6b8e67ad0c9a1e6d73cb82d26f8e9dfe8a08 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 20 Nov 2024 21:14:43 -0700 Subject: [PATCH 031/102] Fix bugs introduced into EmbeddedOp Fix some bugs I introduced in the updated version of the error generator dictionary code for EmbeddedOp. Now avoids editing labels in place which was breaking things. Also refactors the code to reduce duplication. Add an option for caching the embedded error generator labels to avoid needing to regenerate these unnecessarily. --- .../errorpropagator_dev.py | 2 - pygsti/modelmembers/operations/embeddedop.py | 79 +++++++++++-------- 2 files changed, 47 insertions(+), 34 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 3c09cd943..6d813138c 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -194,7 +194,6 @@ def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): #TODO: Add proper inferencing for number of qubits: assert circuit.line_labels is not None and circuit.line_labels != ('*',) errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam) - #propagate the errorgen_layers through the propagation_layers to get a list #of end of circuit error generator dictionaries. propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) @@ -654,7 +653,6 @@ def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp', return_ if isinstance(mx_basis, str): if set(self.model.basis.name.split('*')) == set([mx_basis]) or self.model.basis.name==mx_basis: mx_basis = self.model.basis - global_errorgen_coeffs = [coeff_lbl.to_global_eel() for coeff_lbl in errorgen_layer.keys()] coeff_dict = {lbl:_np.real_if_close(val) for lbl, val in zip(global_errorgen_coeffs, errorgen_layer.values())} diff --git a/pygsti/modelmembers/operations/embeddedop.py b/pygsti/modelmembers/operations/embeddedop.py index 6e140f94c..4f3e2e95b 100644 --- a/pygsti/modelmembers/operations/embeddedop.py +++ b/pygsti/modelmembers/operations/embeddedop.py @@ -60,6 +60,10 @@ def __init__(self, state_space, target_labels, operation_to_embed, allocated_to_ evotype = operation_to_embed._evotype rep = self._create_rep_object(evotype, state_space) + self._cached_embedded_errorgen_labels_global = None + self._cached_embedded_errorgen_labels_local = None + self._cached_embedded_label_identity_label = None + _LinearOperator.__init__(self, rep, evotype) self.init_gpindices(allocated_to_parent) # initialize our gpindices based on sub-members if self._rep_type == 'dense': self._update_denserep() @@ -605,31 +609,34 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label keys of `lindblad_term_dict` to basis matrices. """ #*** Note: this function is nearly identical to EmbeddedErrorgen.coefficients() *** - embedded_coeffs = self.embedded_op.errorgen_coefficients(return_basis, logscale_nonham, label_type) + coeffs_to_embed = self.embedded_op.errorgen_coefficients(return_basis, logscale_nonham, label_type) #print(f'{embedded_coeffs=}') - - if embedded_coeffs: - first_coeff_lbl = next(iter(embedded_coeffs)) - if isinstance(first_coeff_lbl, _GlobalElementaryErrorgenLabel): + + if coeffs_to_embed: + embedded_labels = self.errorgen_coefficient_labels(label_type=label_type, identity_label=identity_label) + embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, coeffs_to_embed.values())} + #first_coeff_lbl = next(iter(coeffs_to_embed)) + #if isinstance(first_coeff_lbl, _GlobalElementaryErrorgenLabel): # if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, - self.target_labels)} - embedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in embedded_coeffs.items()} - elif isinstance(first_coeff_lbl, _LocalElementaryErrorgenLabel): - embedded_labels = list(embedded_coeffs.keys()) - #use different embedding scheme for local labels - base_label = [identity_label for _ in range(self.state_space.num_qudits)] - for lbl in embedded_labels: - new_bels = [] - for bel in lbl.basis_element_labels: - base_label = [identity_label for _ in range(self.state_space.num_qudits)] - for target, pauli in zip(self.target_labels, bel): - base_label[target] = pauli - new_bels.append(''.join(base_label)) - lbl.basis_element_labels = tuple(new_bels) - embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, embedded_coeffs.values())} - else: - raise ValueError(f'Invalid error generator label type {first_coeff_lbl}') + #mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, + # self.target_labels)} + #embedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in coeffs_to_embed.items()} + #elif isinstance(first_coeff_lbl, _LocalElementaryErrorgenLabel): + # embedded_labels = self.errorgen_coefficient_labels() + # #use different embedding scheme for local labels + # base_label = [identity_label for _ in range(self.state_space.num_qudits)] + # embedded_labels = [] + # for lbl in coeff_lbls_to_embed: + # new_bels = [] + # for bel in lbl.basis_element_labels: + # base_label = [identity_label for _ in range(self.state_space.num_qudits)] + # for target, pauli in zip(self.target_labels, bel): + # base_label[target] = pauli + # new_bels.append(''.join(base_label)) + # embedded_labels.append(_LocalElementaryErrorgenLabel(lbl.errorgen_type, tuple(new_bels))) + # embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, coeffs_to_embed.values())} + #else: + # raise ValueError(f'Invalid error generator label type {first_coeff_lbl}') return embedded_coeffs @@ -658,31 +665,39 @@ def errorgen_coefficient_labels(self, label_type='global', identity_label='I'): A tuple of (, [,0: - if isinstance(embedded_labels[0], _GlobalElementaryErrorgenLabel): + if len(labels_to_embed)>0: + if isinstance(labels_to_embed[0], _GlobalElementaryErrorgenLabel): mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, self.target_labels)} - embedded_labels = [k.map_state_space_labels(mapdict) for k in embedded_labels] - elif isinstance(embedded_labels[0], _LocalElementaryErrorgenLabel): + embedded_labels = [k.map_state_space_labels(mapdict) for k in labels_to_embed] + self._cached_embedded_errorgen_labels_global = embedded_labels + elif isinstance(labels_to_embed[0], _LocalElementaryErrorgenLabel): #use different embedding scheme for local labels + embedded_labels = [] base_label = [identity_label for _ in range(self.state_space.num_qudits)] - for lbl in embedded_labels: + for lbl in labels_to_embed: new_bels = [] for bel in lbl.basis_element_labels: base_label = [identity_label for _ in range(self.state_space.num_qudits)] for target, pauli in zip(self.target_labels, bel): base_label[target] = pauli new_bels.append(''.join(base_label)) - lbl.basis_element_labels = tuple(new_bels) + embedded_labels.append(_LocalElementaryErrorgenLabel(lbl.errorgen_type, tuple(new_bels))) + self._cached_embedded_errorgen_labels_local = embedded_labels + self._cached_embedded_label_identity_label = identity_label else: - raise ValueError(f'Invalid error generator label type {embedded_labels[0]}') - #print(f'{embedded_labels=}') + raise ValueError(f'Invalid error generator label type {labels_to_embed[0]}') return embedded_labels def errorgen_coefficients_array(self): From 5b341940429209d7e36006f9e3e24a4464b4cee1 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 20 Nov 2024 21:24:50 -0700 Subject: [PATCH 032/102] Another EmbeddedOp bugfix Add handling for an edge case with the error generator dictionary code. --- pygsti/modelmembers/operations/embeddedop.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pygsti/modelmembers/operations/embeddedop.py b/pygsti/modelmembers/operations/embeddedop.py index 4f3e2e95b..543791dab 100644 --- a/pygsti/modelmembers/operations/embeddedop.py +++ b/pygsti/modelmembers/operations/embeddedop.py @@ -637,6 +637,8 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label # embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, coeffs_to_embed.values())} #else: # raise ValueError(f'Invalid error generator label type {first_coeff_lbl}') + else: + embedded_coeffs = dict() return embedded_coeffs @@ -680,7 +682,7 @@ def errorgen_coefficient_labels(self, label_type='global', identity_label='I'): if isinstance(labels_to_embed[0], _GlobalElementaryErrorgenLabel): mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, self.target_labels)} - embedded_labels = [k.map_state_space_labels(mapdict) for k in labels_to_embed] + embedded_labels = tuple([k.map_state_space_labels(mapdict) for k in labels_to_embed]) self._cached_embedded_errorgen_labels_global = embedded_labels elif isinstance(labels_to_embed[0], _LocalElementaryErrorgenLabel): #use different embedding scheme for local labels @@ -694,10 +696,14 @@ def errorgen_coefficient_labels(self, label_type='global', identity_label='I'): base_label[target] = pauli new_bels.append(''.join(base_label)) embedded_labels.append(_LocalElementaryErrorgenLabel(lbl.errorgen_type, tuple(new_bels))) + embedded_labels = tuple(embedded_labels) self._cached_embedded_errorgen_labels_local = embedded_labels self._cached_embedded_label_identity_label = identity_label else: raise ValueError(f'Invalid error generator label type {labels_to_embed[0]}') + else: + embedded_labels = tuple() + return embedded_labels def errorgen_coefficients_array(self): From 8406a42118d0316e9a520ac283916312b3354568 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 20 Nov 2024 22:45:41 -0700 Subject: [PATCH 033/102] Minor performance updates Reduce the need to convert to local labels when we're casting from a local label. Also update code to pass in a value for the initial_label kwarg in cases where we already have a local label handy. Improve hashing performance for LocalStimErrorgenLabel by storing a hashable representation of the pauli strings, rather than needing to recompute this every time. Micro optimizations for the stim tableau propagation method. --- pygsti/circuits/circuit.py | 2 +- .../errorpropagator_dev.py | 9 +++- .../errorgenpropagation/localstimerrorgen.py | 50 ++++++++++++------- pygsti/modelmembers/operations/composedop.py | 2 +- 4 files changed, 42 insertions(+), 21 deletions(-) diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index d1677c542..f284ee747 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -3447,7 +3447,7 @@ def num_gates(self): """ if self._static: def cnt(lbl): # obj a Label, perhaps compound - if lbl.is_simple(): # a simple label + if lbl.IS_SIMPLE: # a simple label return 1 if (lbl.sslbls is not None) else 0 else: return sum([cnt(sublbl) for sublbl in lbl.components]) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 6d813138c..32873ff5e 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -9,6 +9,7 @@ from .utilserrorgenpropagation import * import copy as _copy from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrogenLabel import pygsti.tools.errgenproptools as _eprop import pygsti.tools.basistools as _bt import pygsti.tools.matrixtools as _mt @@ -452,13 +453,17 @@ def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, incl for errgen_coeff_lbl, rate in layer_errorgen_coeff_dict.items(): #for an error in the accompanying error dictionary #only track this error generator if its rate is not exactly zero. #TODO: Add more flexible initial truncation logic. if rate !=0 or fixed_rate is not None: + if isinstance(errgen_coeff_lbl, _LocalElementaryErrogenLabel): + initial_label = errgen_coeff_lbl + else: + initial_label = None #TODO: Can probably replace this function call with `padded_basis_element_labels` method of `GlobalElementaryErrorgenLabel` paulis = _eprop.errgen_coeff_label_to_stim_pauli_strs(errgen_coeff_lbl, num_qubits) if include_circuit_time: #TODO: Refactor the fixed rate stuff to reduce the number of if statement evaluations. - errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j)] = rate if fixed_rate is None else fixed_rate + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j, initial_label=initial_label)] = rate if fixed_rate is None else fixed_rate else: - errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis)] = rate if fixed_rate is None else fixed_rate + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, initial_label=initial_label)] = rate if fixed_rate is None else fixed_rate error_gen_dicts_by_layer.append(errorgen_layer) return error_gen_dicts_by_layer diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index bd191a004..f834c0201 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -52,16 +52,19 @@ def cast(cls, obj, sslbls=None): #convert to a tuple representation assert sslbls is not None, 'Must specify sslbls when casting from `GlobalElementaryErrorgenLabel`.' obj = (obj.errorgen_type, obj.basis_element_labels, obj.sslbls) + initial_label=None if isinstance(obj, _LEEL): #convert to a tuple representation + initial_label = obj obj = (obj.errorgen_type, obj.basis_element_labels) if isinstance(obj, (tuple, list)): #In this case assert that the first element of the tuple is a string corresponding to the #error generator type. errorgen_type = obj[0] - + initial_label = None + #two elements for a local label and three for a global one #second element should have the basis element labels assert len(obj)==2 or len(obj)==3 and isinstance(obj[1], (tuple, list)) @@ -93,7 +96,7 @@ def cast(cls, obj, sslbls=None): else: raise ValueError('Only str and `stim.PauliString` basis element labels are supported presently.') - return cls(errorgen_type, stim_bels) + return cls(errorgen_type, stim_bels, initial_label=initial_label) def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initial_label=None, @@ -108,7 +111,7 @@ def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initi an element of. Allowed values are 'H', 'S', 'C' and 'A'. basis_element_labels : tuple or list - A list or tuple of strings labeling basis elements used to label this error generator. + A list or tuple of stim.PauliString labeling basis elements used to label this error generator. This is either length-1 for 'H' and 'S' type error generators, or length-2 for 'C' and 'A' type. @@ -140,11 +143,12 @@ def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initi else: self.initial_label = self.to_local_eel() + self._hashable_basis_element_labels = tuple([str(pauli) for pauli in self.basis_element_labels]) + #TODO: Update various methods to account for additional metadata that has been added. def __hash__(self): - pauli_hashable = [str(pauli) for pauli in self.basis_element_labels] - return hash((self.errorgen_type, tuple(pauli_hashable))) + return hash((self.errorgen_type, self._hashable_basis_element_labels)) def bel_to_strings(self): """ @@ -226,22 +230,34 @@ def propagate_error_gen_tableau(self, slayer, weight): """ Parameters ---------- - slayer : + slayer : `stim.Tableau` + `stim.Tableau` object corresponding to an ideal Clifford operations for + a circuit layer which we will be propagating this error generator through. weight : float - + Current weight of this error generator. + + Returns + ------- + tuple of consisting of an `LocalStimErrorgenLabel` and an updated error generator + weight, which may have changed by a sign. """ - if self.errorgen_type =='Z' or self.errorgen_type=='I': - return (self, weight) + #if self.errorgen_type =='Z' or self.errorgen_type=='I': + # return (self, weight) new_basis_labels = [] - weightmod = 1 - for pauli in self.basis_element_labels: - temp = slayer(pauli) - weightmod=_np.real(temp.sign) * weightmod - temp=temp*temp.sign - new_basis_labels.append(temp) - if self.errorgen_type =='S': - weightmod=1.0 + weightmod = 1.0 + if self.errorgen_type == 'S': + for pauli in self.basis_element_labels: + temp = slayer(pauli) + temp = temp*temp.sign + new_basis_labels.append(temp) + else: + for pauli in self.basis_element_labels: + temp = slayer(pauli) + temp_sign = temp.sign + weightmod = temp_sign.real*weightmod + temp = temp*temp_sign + new_basis_labels.append(temp) return (LocalStimErrorgenLabel(self.errorgen_type, new_basis_labels, initial_label=self.initial_label, circuit_time=self.circuit_time), weightmod*weight) diff --git a/pygsti/modelmembers/operations/composedop.py b/pygsti/modelmembers/operations/composedop.py index 4b3b138d6..5a24ee1c4 100644 --- a/pygsti/modelmembers/operations/composedop.py +++ b/pygsti/modelmembers/operations/composedop.py @@ -711,7 +711,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label A Basis mapping the basis labels used in the keys of `lindblad_term_dict` to basis matrices. """ - #*** Note: this function is nearly identitcal to ComposedErrorgen.coefficients() *** + #*** Note: this function is nearly identical to ComposedErrorgen.coefficients() *** Ltermdict = dict() basisdict = dict() first_nonempty_basis = None From 4e64427cd675a74be5624a64c2476faf10ed9bd0 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 23 Nov 2024 21:46:12 -0700 Subject: [PATCH 034/102] Error gen label hash cache Add caching for the hashes of error generator labels to boost performance of error generator coefficient dictionary manipulation. This is done a lot in the error generator propagation code at present. --- pygsti/baseobjs/errorgenlabel.py | 28 ++++++++++++++++++++++++++-- pygsti/tools/internalgates.py | 4 ++-- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/pygsti/baseobjs/errorgenlabel.py b/pygsti/baseobjs/errorgenlabel.py index a900be7da..3e0324aba 100644 --- a/pygsti/baseobjs/errorgenlabel.py +++ b/pygsti/baseobjs/errorgenlabel.py @@ -100,9 +100,21 @@ def __init__(self, errorgen_type, basis_element_labels): self.errorgen_type = str(errorgen_type) self.basis_element_labels = tuple(basis_element_labels) + self._hash = hash((self.errorgen_type, self.basis_element_labels)) def __hash__(self): - return hash((self.errorgen_type, self.basis_element_labels)) + return self._hash + + #pickle management functions + def __getstate__(self): + state_dict = self.__dict__ + return state_dict + + def __setstate__(self, state_dict): + for k, v in state_dict.items(): + self.__dict__[k] = v + #reinitialize the hash + self._hash = hash((self.errorgen_type, self.basis_element_labels)) def __eq__(self, other): return (self.errorgen_type == other.errorgen_type @@ -205,9 +217,21 @@ def __init__(self, errorgen_type, basis_element_labels, sslbls, sort=True): self.sslbls = tuple(sslbls) # Note: each element of basis_element_labels must be an iterable over # 1-qubit basis labels of length len(self.sslbls) (?) + self._hash = hash((self.errorgen_type, self.basis_element_labels, self.sslbls)) def __hash__(self): - return hash((self.errorgen_type, self.basis_element_labels, self.sslbls)) + return self._hash + + #pickle management functions + def __getstate__(self): + state_dict = self.__dict__ + return state_dict + + def __setstate__(self, state_dict): + for k, v in state_dict.items(): + self.__dict__[k] = v + #reinitialize the hash + self._hash = hash((self.errorgen_type, self.basis_element_labels, self.sslbls)) def __eq__(self, other): return (self.errorgen_type == other.errorgen_type diff --git a/pygsti/tools/internalgates.py b/pygsti/tools/internalgates.py index 9f2a4a9d4..51141f67f 100644 --- a/pygsti/tools/internalgates.py +++ b/pygsti/tools/internalgates.py @@ -385,7 +385,7 @@ def standard_gatenames_stim_conversions(): import stim except ImportError: raise ImportError("Stim is required for this operation, and it does not appear to be installed.") - pyGSTi_to_stim_GateDict={ + gate_dict = { 'Gi' : stim.Tableau.from_named_gate('I'), 'Gxpi' : stim.Tableau.from_named_gate('X'), 'Gypi' : stim.Tableau.from_named_gate('Y'), @@ -405,7 +405,7 @@ def standard_gatenames_stim_conversions(): 'Gswap' : stim.Tableau.from_named_gate('SWAP'), 'Gcphase' : stim.Tableau.from_named_gate('CZ') } - return pyGSTi_to_stim_GateDict + return gate_dict def standard_gatenames_cirq_conversions(): """ From 8767ea928e8f6cfcc1003d66609fd456ce1406b2 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 23 Nov 2024 21:47:53 -0700 Subject: [PATCH 035/102] Make StateSpace more staticish Protect the rest of the attribute for the various StateSpace subclasses we use. This is in service of a change for improved performance which caches the hash for state spaces to accelerate look ups involving them. --- pygsti/baseobjs/statespace.py | 108 ++++++++++++++++++++++++++-------- 1 file changed, 85 insertions(+), 23 deletions(-) diff --git a/pygsti/baseobjs/statespace.py b/pygsti/baseobjs/statespace.py index a4cbe676e..163a57a96 100644 --- a/pygsti/baseobjs/statespace.py +++ b/pygsti/baseobjs/statespace.py @@ -656,33 +656,63 @@ class QuditSpace(StateSpace): def __init__(self, nqudits_or_labels, udim_or_udims): super().__init__() if isinstance(nqudits_or_labels, int): - self.qudit_labels = tuple(range(nqudits_or_labels)) + self._qudit_labels = tuple(range(nqudits_or_labels)) else: - self.qudit_labels = tuple(nqudits_or_labels) + self._qudit_labels = tuple(nqudits_or_labels) if isinstance(udim_or_udims, int): - self.qudit_udims = tuple([udim_or_udims] * len(self.qudit_labels)) + self._qudit_udims = tuple([udim_or_udims] * len(self._qudit_labels)) else: - self.qudit_udims = tuple(udim_or_udims) - assert(len(self.qudit_udims) == len(self.qudit_labels)), \ + self._qudit_udims = tuple(udim_or_udims) + assert(len(self._qudit_udims) == len(self._qudit_labels)), \ "`udim_or_udims` must either be an interger or have length equal to the number of qudits!" + + #This state space is effectively static, so we can precompute the hash for it for performance + self._hash = hash((self.tensor_product_blocks_labels, + self.tensor_product_blocks_dimensions, + self.tensor_product_blocks_types)) + + def __hash__(self): + return self._hash + + #pickle management functions + def __getstate__(self): + state_dict = self.__dict__ + return state_dict + + def __setstate__(self, state_dict): + for k, v in state_dict.items(): + self.__dict__[k] = v + #reinitialize the hash + self._hash = hash((self.tensor_product_blocks_labels, + self.tensor_product_blocks_dimensions, + self.tensor_product_blocks_types)) def _to_nice_serialization(self): state = super()._to_nice_serialization() - state.update({'qudit_labels': self.qudit_labels, - 'qudit_udims': self.qudit_udims}) + state.update({'qudit_labels': self._qudit_labels, + 'qudit_udims': self._qudit_udims}) return state @classmethod def _from_nice_serialization(cls, state): return cls(state['qudit_labels'], state['qudit_udims']) + @property + def qudit_labels(self): + """The labels of the qudits in this state space.""" + return self._qudit_labels + + @property + def qudit_udims(self): + """Integer Hilbert (unitary operator) space dimensions of the qudits in ths quantum state space.""" + @property def udim(self): """ Integer Hilbert (unitary operator) space dimension of this quantum state space. """ - return _np.prod(self.qudit_udims) + return _np.prod(self._qudit_udims) @property def dim(self): @@ -694,7 +724,7 @@ def num_qudits(self): # may raise ValueError if the state space doesn't consist """ The number of qubits in this quantum state space. """ - return len(self.qudit_labels) + return len(self._qudit_labels) @property def num_tensor_product_blocks(self): @@ -716,7 +746,7 @@ def tensor_product_blocks_labels(self): ------- tuple of tuples """ - return (self.qudit_labels,) + return (self._qudit_labels,) @property def tensor_product_blocks_dimensions(self): @@ -727,7 +757,7 @@ def tensor_product_blocks_dimensions(self): ------- tuple of tuples """ - return (tuple([udim**2 for udim in self.qudit_udims]),) + return (tuple([udim**2 for udim in self._qudit_udims]),) @property def tensor_product_blocks_udimensions(self): @@ -738,7 +768,7 @@ def tensor_product_blocks_udimensions(self): ------- tuple of tuples """ - return (self.qudit_udims,) + return (self._qudit_udims,) @property def tensor_product_blocks_types(self): @@ -749,7 +779,7 @@ def tensor_product_blocks_types(self): ------- tuple of tuples """ - return (('Q',) * len(self.qudit_labels)) + return (('Q',) * len(self._qudit_labels)) def label_dimension(self, label): """ @@ -764,9 +794,9 @@ def label_dimension(self, label): ------- int """ - if label in self.qudit_labels: - i = self.qudit_labels.index(label) - return self.qudit_udims[i]**2 + if label in self._qudit_labels: + i = self._qudit_labels.index(label) + return self._qudit_udims[i]**2 else: raise KeyError("Invalid qudit label: %s" % label) @@ -783,9 +813,9 @@ def label_udimension(self, label): ------- int """ - if label in self.qudit_labels: - i = self.qudit_labels.index(label) - return self.qudit_udims[i] + if label in self._qudit_labels: + i = self._qudit_labels.index(label) + return self._qudit_udims[i] else: raise KeyError("Invalid qudit label: %s" % label) @@ -802,7 +832,7 @@ def label_tensor_product_block_index(self, label): ------- int """ - if label in self.qudit_labels: + if label in self._qudit_labels: return 0 else: raise KeyError("Invalid qudit label: %s" % label) @@ -820,13 +850,13 @@ def label_type(self, label): ------- str """ - if label in self.qudit_labels: + if label in self._qudit_labels: return 'Q' else: raise KeyError("Invalid qudit label: %s" % label) def __str__(self): - return 'QuditSpace(' + str(self.qudit_labels) + ")" + return 'QuditSpace(' + str(self._qudit_labels) + ")" class QubitSpace(QuditSpace): @@ -861,7 +891,7 @@ def dim(self): @property def qubit_labels(self): """The labels of the qubits""" - return self.qudit_labels + return self._qudit_labels @property def num_qubits(self): # may raise ValueError if the state space doesn't consist entirely of qubits @@ -1157,6 +1187,27 @@ def is_label(x): elif all([typ == 'Q' for typ in self.label_types.values()]): self._nqudits = len(self.labels[0]) + #This state space is effectively static, so we can precompute the hash for it for performance + self._hash = hash((self.tensor_product_blocks_labels, + self.tensor_product_blocks_dimensions, + self.tensor_product_blocks_types)) + + def __hash__(self): + return self._hash + + #pickle management functions + def __getstate__(self): + state_dict = self.__dict__ + return state_dict + + def __setstate__(self, state_dict): + for k, v in state_dict.items(): + self.__dict__[k] = v + #reinitialize the hash + self._hash = hash((self.tensor_product_blocks_labels, + self.tensor_product_blocks_dimensions, + self.tensor_product_blocks_types)) + def _to_nice_serialization(self): state = super()._to_nice_serialization() state.update({'labels': self.labels, @@ -1169,6 +1220,17 @@ def _to_nice_serialization(self): def _from_nice_serialization(cls, state): return cls(state['labels'], state['unitary_space_dimensions'], state['types']) + @property + def labels(self): + """ + The labels for all the tensor-product blocks. + + Returns + ------- + tuple of tuples + """ + return self.labels + @property def udim(self): """ From c8f0443f89d7c3d130eb219cc79655a7cc051e78 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 23 Nov 2024 21:48:58 -0700 Subject: [PATCH 036/102] Rework circuit to stim tableau conversion Rework the implementation of the conversion to stim Tableaus. About twice as fast as the previous implementation by reducing the number of expensive method calls. --- pygsti/circuits/circuit.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index f284ee747..ef1f4938e 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -3729,18 +3729,20 @@ def _write_q_circuit_tex(self, filename): # TODO f.close() - def convert_to_stim_tableau_layers(self,gate_name_conversions=None): + def convert_to_stim_tableau_layers(self, gate_name_conversions=None, num_qubits=None): """ Converts this circuit to a list of stim tableau layers Parameters ---------- - gate_name_conversions : Dict - A map from pygsti gatenames to standard stim tableaus. If set to None a standard set of gate names is used + gate_name_conversions : dict, optional (default None) + A map from pygsti gatenames to standard stim tableaus. + If None a standard set of gate names is used from + `pygsti.tools.internalgates` Returns ------- - A layer by layer list of stim tabluaes + A layer by layer list of stim tableaus """ try: import stim @@ -3749,14 +3751,23 @@ def convert_to_stim_tableau_layers(self,gate_name_conversions=None): if gate_name_conversions is None: gate_name_conversions = _itgs.standard_gatenames_stim_conversions() - qubits=len(self.line_labels) + if num_qubits is None: + line_labels = self._line_labels + assert line_labels != ('*',), "Cannot convert circuits with placeholder line label to stim Tableau unless number of qubits is specified." + num_qubits=len(line_labels) + stim_layers=[] - for j in range(self.depth): - layer = self.layer(j) - stim_layer=stim.Tableau(qubits) + + if self._static: + circuit_layers = [layer.components for layer in self._labels] + else: + circuit_layers = self._labels + empty_tableau = stim.Tableau(num_qubits) + for layer in circuit_layers: + stim_layer = empty_tableau.copy() for sub_lbl in layer: temp = gate_name_conversions[sub_lbl.name] - stim_layer.append(temp,sub_lbl.qubits) + stim_layer.append(temp, sub_lbl.qubits) stim_layers.append(stim_layer) return stim_layers From d2509a114ae3c0fdc5c6847538b243ba4f9f54d2 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 23 Nov 2024 21:51:21 -0700 Subject: [PATCH 037/102] Coefficient dict performance tweaks Makes two tweaks to the implementation of the coefficient dictionary retrieval. The first moves some expensive basis handling code that is only needed when we're returning the basis for each block into a conditional block. The second reworks a loop in ComposedOp to reduce the number of dictionary look ups (and hashes). --- pygsti/modelmembers/operations/composedop.py | 7 +++---- .../modelmembers/operations/lindbladerrorgen.py | 15 ++++++++++----- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/pygsti/modelmembers/operations/composedop.py b/pygsti/modelmembers/operations/composedop.py index 5a24ee1c4..ad2a7599a 100644 --- a/pygsti/modelmembers/operations/composedop.py +++ b/pygsti/modelmembers/operations/composedop.py @@ -752,10 +752,9 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label ltdict = factor_coeffs for key, coeff in ltdict.items(): - if key in Ltermdict: - Ltermdict[key] += coeff - else: - Ltermdict[key] = coeff + Ltermdict[key] = coeff + Ltermdict.get(key, 0) + + Ltermdict = dict(Ltermdict) if return_basis: #Use constant_basis or turn basisdict into a Basis to return diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index 07cad9d7a..bb98a47ed 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -1023,11 +1023,16 @@ def coefficients(self, return_basis=False, logscale_nonham=False, label_type='gl assert label_type=='global' or label_type=='local', "Allowed values of label_type are 'global' and 'local'." elem_errorgens = {} - bases = set() - for blk in self.coefficient_blocks: - elem_errorgens.update(blk.elementary_errorgens) - if blk._basis not in bases: - bases.add(blk._basis) + + if return_basis: + bases = set() + for blk in self.coefficient_blocks: + elem_errorgens.update(blk.elementary_errorgens) + if blk._basis not in bases: + bases.add(blk._basis) + else: #split this off to avoid expensive basis hashing and equivalence checking if not needed. + for blk in self.coefficient_blocks: + elem_errorgens.update(blk.elementary_errorgens) first_key = next(iter(elem_errorgens)) if label_type=='global' and isinstance(first_key, _LocalElementaryErrorgenLabel): From 9da5f8370910609784e2bbd7976ccc79146966ce Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 23 Nov 2024 21:53:03 -0700 Subject: [PATCH 038/102] More Minor Tweaks Refactor the order of certain conditionals, and make a handful of other small changes to the implementation. --- .../errorpropagator_dev.py | 15 ++++++++------ .../errorgenpropagation/localstimerrorgen.py | 20 ++++++++++++------- pygsti/tools/errgenproptools.py | 10 +++++----- 3 files changed, 27 insertions(+), 18 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 32873ff5e..847e6bf87 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -453,17 +453,20 @@ def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, incl for errgen_coeff_lbl, rate in layer_errorgen_coeff_dict.items(): #for an error in the accompanying error dictionary #only track this error generator if its rate is not exactly zero. #TODO: Add more flexible initial truncation logic. if rate !=0 or fixed_rate is not None: - if isinstance(errgen_coeff_lbl, _LocalElementaryErrogenLabel): - initial_label = errgen_coeff_lbl - else: - initial_label = None + #if isinstance(errgen_coeff_lbl, _LocalElementaryErrogenLabel): + initial_label = errgen_coeff_lbl + #else: + # initial_label = None #TODO: Can probably replace this function call with `padded_basis_element_labels` method of `GlobalElementaryErrorgenLabel` paulis = _eprop.errgen_coeff_label_to_stim_pauli_strs(errgen_coeff_lbl, num_qubits) + pauli_strs = errgen_coeff_lbl.basis_element_labels #get the original python string reps from local labels if include_circuit_time: #TODO: Refactor the fixed rate stuff to reduce the number of if statement evaluations. - errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j, initial_label=initial_label)] = rate if fixed_rate is None else fixed_rate + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j, + initial_label=initial_label, pauli_str_reps=pauli_strs)] = rate if fixed_rate is None else fixed_rate else: - errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, initial_label=initial_label)] = rate if fixed_rate is None else fixed_rate + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, initial_label=initial_label, + pauli_str_reps=pauli_strs)] = rate if fixed_rate is None else fixed_rate error_gen_dicts_by_layer.append(errorgen_layer) return error_gen_dicts_by_layer diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index f834c0201..c6a4c6bc5 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -100,7 +100,7 @@ def cast(cls, obj, sslbls=None): def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initial_label=None, - label=None): + label=None, pauli_str_reps=None): """ Create a new instance of `LocalStimErrorgenLabel` @@ -130,10 +130,13 @@ def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initi An optional label string which is included when printing the string representation of this label. + pauli_str_reps : tuple of str, optional (default None) + Optional tuple of python strings corresponding to the stim.PauliStrings in basis_element_labels. + When specified can speed up construction of hashable label representations. """ - self.errorgen_type=str(errorgen_type) - self.basis_element_labels=tuple(basis_element_labels) - self.label=label + self.errorgen_type = errorgen_type + self.basis_element_labels = tuple(basis_element_labels) + self.label = label self.circuit_time = circuit_time #additionally store a copy of the value of the original error generator label which will remain unchanged @@ -143,7 +146,10 @@ def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initi else: self.initial_label = self.to_local_eel() - self._hashable_basis_element_labels = tuple([str(pauli) for pauli in self.basis_element_labels]) + if pauli_str_reps is not None: + self._hashable_basis_element_labels = pauli_str_reps + else: + self._hashable_basis_element_labels = tuple([str(pauli) for pauli in self.basis_element_labels]) #TODO: Update various methods to account for additional metadata that has been added. @@ -163,8 +169,8 @@ def __eq__(self, other): Performs equality check by seeing if the two error gen labels have the same `errorgen_type` and `basis_element_labels`. """ - return isinstance(other, LocalStimErrorgenLabel) and self.errorgen_type == other.errorgen_type \ - and self.basis_element_labels == other.basis_element_labels + return self.errorgen_type == other.errorgen_type and self.basis_element_labels == other.basis_element_labels \ + and isinstance(other, LocalStimErrorgenLabel) def __str__(self): diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 99de145b5..81c26f4ec 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -42,7 +42,10 @@ def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): """ - if isinstance(err_gen_coeff_label, _GEEL): + if isinstance(err_gen_coeff_label, _LEEL): + return tuple([stim.PauliString(bel) for bel in err_gen_coeff_label.basis_element_labels]) + + elif isinstance(err_gen_coeff_label, _GEEL): #the coefficient label is a tuple with 3 elements. #The first element is the error generator type. #the second element is a tuple of paulis either of length 1 or 2 depending on the error gen type. @@ -72,9 +75,6 @@ def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): return tuple(pauli_strings) else: raise ValueError(f'Unsupported error generator type {errorgen_typ}') - elif isinstance(err_gen_coeff_label, _LEEL): - return tuple([stim.PauliString(bel) for bel in err_gen_coeff_label.basis_element_labels]) - else: raise ValueError('Only `GlobalElementaryErrorgenLabel and LocalElementaryErrorgenLabel is currently supported.') @@ -139,7 +139,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #I *think* you can pick up at most around a factor of 8 from the commutator #itself. Someone should validate that. Set this conservatively, but also #avoid computing commutators which will be effectively zero. - if abs(weight) < 10*truncation_threshold: + if abs(weight) < truncation_threshold: continue commuted_errgen_sublist = error_generator_commutator(error1, error2, weight= weight) From 6eacb6051761c94d10614cb056d640baffd5257f Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 23 Nov 2024 22:28:15 -0700 Subject: [PATCH 039/102] Minor bug fix Fix a bug introduced that resulted in inconsistent strings being used for the hash string. --- pygsti/errorgenpropagation/localstimerrorgen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index c6a4c6bc5..c81d3c080 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -149,7 +149,7 @@ def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initi if pauli_str_reps is not None: self._hashable_basis_element_labels = pauli_str_reps else: - self._hashable_basis_element_labels = tuple([str(pauli) for pauli in self.basis_element_labels]) + self._hashable_basis_element_labels = self.bel_to_strings() #TODO: Update various methods to account for additional metadata that has been added. From eaf54d72875263108b076a3fe38416534908ec44 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 23 Nov 2024 23:38:04 -0700 Subject: [PATCH 040/102] Minor Refactors Refactor the hash and initialization for the LocalStimErrorgenLabel class. Also tweak the commutator calculation to reduce the amount of hashing performed. --- .../errorgenpropagation/localstimerrorgen.py | 20 ++++---- pygsti/tools/errgenproptools.py | 46 +++++++++++-------- 2 files changed, 38 insertions(+), 28 deletions(-) diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index c81d3c080..eb8ca1b1e 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -139,23 +139,25 @@ def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initi self.label = label self.circuit_time = circuit_time + if pauli_str_reps is not None: + self._hashable_basis_element_labels = pauli_str_reps + self._hashable_string_rep = self.errorgen_type.join(pauli_str_reps) + else: + self._hashable_basis_element_labels = self.bel_to_strings() + self._hashable_string_rep = self.errorgen_type.join(self.bel_to_strings()) + #additionally store a copy of the value of the original error generator label which will remain unchanged #during the course of propagation for later bookkeeping purposes. if initial_label is not None: self.initial_label = initial_label else: self.initial_label = self.to_local_eel() - - if pauli_str_reps is not None: - self._hashable_basis_element_labels = pauli_str_reps - else: - self._hashable_basis_element_labels = self.bel_to_strings() - #TODO: Update various methods to account for additional metadata that has been added. def __hash__(self): - return hash((self.errorgen_type, self._hashable_basis_element_labels)) - + #return hash((self.errorgen_type, self._hashable_basis_element_labels)) + return hash(self._hashable_string_rep) + def bel_to_strings(self): """ Convert the elements of `basis_element_labels` to python strings @@ -296,6 +298,6 @@ def to_local_eel(self): ------- `LocalElementaryErrorgenLabel` """ - return _LEEL(self.errorgen_type, self.bel_to_strings()) + return _LEEL(self.errorgen_type, self._hashable_basis_element_labels) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 81c26f4ec..585bca3ea 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -131,18 +131,22 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th elif curr_order == 1: #calculate the pairwise commutators between each of the error generators in current_errgen_dict_1 and #current_errgen_dict_2. + #precompute an identity string for comparisons in commutator calculations. + if errgen_layer_1: + identity = stim.PauliString('I'*len(next(iter(errgen_layer_1)).basis_element_labels[0])) commuted_errgen_list = [] - for error1 in errgen_layer_1.keys(): - for error2 in errgen_layer_2.keys(): + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in errgen_layer_2.items(): #get the list of error generator labels - weight = .5*errgen_layer_1[error1]*errgen_layer_2[error2] + #weight = .5*errgen_layer_1[error1]*errgen_layer_2[error2] + weight = .5*error1_val*error2_val #I *think* you can pick up at most around a factor of 8 from the commutator #itself. Someone should validate that. Set this conservatively, but also #avoid computing commutators which will be effectively zero. if abs(weight) < truncation_threshold: continue commuted_errgen_sublist = error_generator_commutator(error1, error2, - weight= weight) + weight= weight, identity=identity) commuted_errgen_list.extend(commuted_errgen_sublist) #print(f'{commuted_errgen_list=}') #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. @@ -168,25 +172,24 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #this at higher order if needed. commuted_errgen_list_1 = [] commuted_errgen_list_2 = [] - for error1a, error1b in zip(errgen_layer_1.keys(), errgen_layer_2.keys()): - for error2 in second_order_comm_dict: - second_order_comm_rate = second_order_comm_dict[error2] + for (error1a, error1a_val), (error1b, error1b_val) in zip(errgen_layer_1.items(), errgen_layer_2.items()): + for error2, error2_val in second_order_comm_dict.items(): #I *think* you can pick up at most around a factor of 8 from the commutator #itself. Someone should validate that. Set this conservatively, but also #avoid computing commutators which will be effectively zero. #only need a factor of 1/6 because new_errorgen_layer[1] is 1/2 the commutator - weighta = (1/6)*errgen_layer_1[error1a]*second_order_comm_rate + weighta = (1/6)*error1a_val*error2_val if not abs(weighta) < truncation_threshold: commuted_errgen_sublist = error_generator_commutator(error1a, error2, - weight=weighta) + weight=weighta, identity=identity) commuted_errgen_list_1.extend(commuted_errgen_sublist) #only need a factor of -1/6 because new_errorgen_layer[1] is 1/2 the commutator - weightb = -(1/6)*errgen_layer_2[error1b]*second_order_comm_rate + weightb = -(1/6)*error1b_val*error2_val if not abs(weightb) < truncation_threshold: commuted_errgen_sublist = error_generator_commutator(error1b, error2, - weight=weightb) + weight=weightb, identity=identity) commuted_errgen_list_2.extend(commuted_errgen_sublist) #turn the two new commuted error generator lists into dictionaries. @@ -216,18 +219,17 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #we've already calculated (1/12)*[X,[X,Y]] so reuse this result. #this is stored in third_order_comm_dict_1 commuted_errgen_list = [] - for error1 in errgen_layer_2.keys(): - for error2 in third_order_comm_dict_1.keys(): + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in third_order_comm_dict_1.items(): #I *think* you can pick up at most around a factor of 8 from the commutator #itself. Someone should validate that. Set this conservatively, but also #avoid computing commutators which will be effectively zero. #only need a factor of -1/2 because third_order_comm_dict_1 is 1/12 the nested commutator - weight = -.5*errgen_layer_2[error1]*third_order_comm_dict_1[error2] + weight = -.5*error1_val*error2_val if abs(weight) < truncation_threshold: - #print('continuing') continue commuted_errgen_sublist = error_generator_commutator(error1, error2, - weight=weight) + weight=weight, identity=identity) commuted_errgen_list.extend(commuted_errgen_sublist) #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. @@ -263,7 +265,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th return new_errorgen_layer_dict -def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight=1.0): +def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight=1.0, identity=None): """ Returns the commutator of two error generators. I.e. [errorgen_1, errorgen_2]. @@ -280,6 +282,12 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight weight : float, optional (default 1.0) An optional weighting value to apply to the value of the commutator. + + identity : stim.PauliString, optional (default None) + An optional stim.PauliString to use for comparisons to the identity. + Passing in this kwarg isn't necessary, but can allow for reduced + stim.PauliString creation when calling this function many times for + improved efficiency. Returns ------- @@ -308,7 +316,8 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight errorgen_2_bel_1 = errorgen_2.basis_element_labels[1] #create the identity stim.PauliString for later comparisons. - identity = stim.PauliString('I'*len(errorgen_1_bel_0)) + if identity is None: + identity = stim.PauliString('I'*len(errorgen_1_bel_0)) if errorgen_1_type=='H' and errorgen_2_type=='H': ptup = com(errorgen_1_bel_0 , errorgen_2_bel_0) @@ -797,7 +806,6 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight def com(P1, P2): #P1 and P2 either commute or anticommute. if P1.commutes(P2): - P3 = 0 return None else: P3 = P1*P2 From b585a3e926e970fffd4be26a88a0fecf66591aa4 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 24 Nov 2024 14:00:08 -0700 Subject: [PATCH 041/102] Get rid of annoying non-determinism Get rid of usage of set in the BCH code which was creating annoying non-deterministic behavior for some of the higher-order BCH code which was making it annoying to test/validate correctness on. --- .../errorgenpropagation/localstimerrorgen.py | 4 ++++ pygsti/tools/errgenproptools.py | 23 +++++++------------ 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index eb8ca1b1e..dda77e34c 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -6,6 +6,10 @@ from pygsti.tools import change_basis from pygsti.tools.lindbladtools import create_elementary_errorgen +#TODO: Split this into a parent class and subclass for markovian and non-markovian +#propagation. There is some overhead in instantiating the NM version of these labels +#which we can avoid and make markovian applications much more efficient (label instantiation +#is like a third of runtime when using higher-order BCH, e.g.) class LocalStimErrorgenLabel(_ElementaryErrorgenLabel): """ diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 585bca3ea..c8ae6cb58 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -14,8 +14,8 @@ from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GEEL, LocalElementaryErrorgenLabel as _LEEL from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.modelmembers.operations import LindbladErrorgen as _LinbladErrorgen -from numpy import conjugate from functools import reduce +from itertools import chain def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): """ @@ -109,8 +109,8 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th if curr_order == 0: #Get a combined set of error generator coefficient labels for these two #dictionaries. - current_combined_coeff_lbls = set(errgen_layer_1.keys()) | set(errgen_layer_2.keys()) - + current_combined_coeff_lbls = {key: None for key in chain(errgen_layer_1, errgen_layer_2)} + first_order_dict = dict() #loop through the combined set of coefficient labels and add them to the new dictionary for the current BCH #approximation order. If present in both we sum the rates. @@ -138,17 +138,13 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th for error1, error1_val in errgen_layer_1.items(): for error2, error2_val in errgen_layer_2.items(): #get the list of error generator labels - #weight = .5*errgen_layer_1[error1]*errgen_layer_2[error2] weight = .5*error1_val*error2_val - #I *think* you can pick up at most around a factor of 8 from the commutator - #itself. Someone should validate that. Set this conservatively, but also #avoid computing commutators which will be effectively zero. if abs(weight) < truncation_threshold: continue commuted_errgen_sublist = error_generator_commutator(error1, error2, weight= weight, identity=identity) commuted_errgen_list.extend(commuted_errgen_sublist) - #print(f'{commuted_errgen_list=}') #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. second_order_comm_dict = {error_tuple[0]: 0 for error_tuple in commuted_errgen_list} @@ -174,12 +170,11 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th commuted_errgen_list_2 = [] for (error1a, error1a_val), (error1b, error1b_val) in zip(errgen_layer_1.items(), errgen_layer_2.items()): for error2, error2_val in second_order_comm_dict.items(): - #I *think* you can pick up at most around a factor of 8 from the commutator - #itself. Someone should validate that. Set this conservatively, but also - #avoid computing commutators which will be effectively zero. + #only need a factor of 1/6 because new_errorgen_layer[1] is 1/2 the commutator weighta = (1/6)*error1a_val*error2_val + #avoid computing commutators which will be effectively zero. if not abs(weighta) < truncation_threshold: commuted_errgen_sublist = error_generator_commutator(error1a, error2, weight=weighta, identity=identity) @@ -206,11 +201,11 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #finally sum these two dictionaries, keeping only terms which are greater than the threshold. third_order_comm_dict = dict() - for lbl in set(third_order_comm_dict_1) | set(third_order_comm_dict_2): + current_combined_coeff_lbls = {key: None for key in chain(third_order_comm_dict_1, third_order_comm_dict_2)} + for lbl in current_combined_coeff_lbls: third_order_rate = third_order_comm_dict_1.get(lbl, 0) + third_order_comm_dict_2.get(lbl, 0) if abs(third_order_rate) > truncation_threshold: third_order_comm_dict[lbl] = third_order_rate - #print(f'{third_order_comm_dict=}') new_errorgen_layer.append(third_order_comm_dict) #fourth order BCH terms @@ -241,16 +236,14 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th fourth_order_comm_dict[error_tuple[0]] += error_tuple[1] #drop any terms below the truncation threshold after aggregation - #print(f'{fourth_order_comm_dict=}') fourth_order_comm_dict = {key: val for key, val in fourth_order_comm_dict.items() if abs(val)>truncation_threshold} new_errorgen_layer.append(fourth_order_comm_dict) - #print(f'{fourth_order_comm_dict=}') else: raise NotImplementedError("Higher orders beyond fourth order are not implemented yet.") #Finally accumulate all of the dictionaries in new_errorgen_layer into a single one, summing overlapping terms. - errorgen_labels_by_order = [set(order_dict) for order_dict in new_errorgen_layer] + errorgen_labels_by_order = [{key: None for key in order_dict} for order_dict in new_errorgen_layer] complete_errorgen_labels = reduce(lambda a, b: a|b, errorgen_labels_by_order) #initialize a dictionary with requisite keys From 693057c4c295f922eef9362eae20b6addb1118fe Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 24 Nov 2024 18:11:41 -0700 Subject: [PATCH 042/102] Fifth order BCH Implementation for fifth order BCH. The main motivation here isn't really that we're likely to want to go this high, but that I needed this implemented to test out some properties of the convergence. --- pygsti/tools/errgenproptools.py | 185 +++++++++++++++++++++++++++++++- 1 file changed, 183 insertions(+), 2 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index c8ae6cb58..fbad0971d 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -91,6 +91,9 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th errgen_layer_2 : list of dicts See errgen_layer_1. + + bch_order : int, optional (default 1) + Order of the BCH approximation to use. Currently support for up to fifth order. truncation_threshold : float, optional (default 1e-14) Threshold for which any error generators with magnitudes below this value @@ -238,9 +241,187 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #drop any terms below the truncation threshold after aggregation fourth_order_comm_dict = {key: val for key, val in fourth_order_comm_dict.items() if abs(val)>truncation_threshold} new_errorgen_layer.append(fourth_order_comm_dict) - + + #Note for fifth order and beyond we can save a bunch of commutators + #by using the results of https://doi.org/10.1016/j.laa.2003.09.010 + #Revisit this if going up to high-order ever becomes a regular computation. + #fifth-order BCH terms: + #-(1/720)*([X,F] - [Y, E]) + (1/360)*([Y,F] - [X,E]) + (1/120)*([Y,G] - [X,D]) + # Where: E = [Y,C]; F = [X,B]; G=[X,C] + # B = [X,[X,Y]]; C = [Y,[X,Y]]; D = [Y,[X,[X,Y]]] + # B, C and D have all been previously calculated (up to the leading constant). + # B is proportional to third_order_comm_dict_1, C is proportional to third_order_comm_dict_2 + # D is proportional to fourth_order_comm_dict + # This gives 9 new commutators to calculate (7 if you used linearity, and even fewer would be needed + # using the result from the paper above, but we won't here atm). + elif curr_order == 4: + B = third_order_comm_dict_1 + C = third_order_comm_dict_2 + D = fourth_order_comm_dict + #Compute the new commutators E, F and G as defined above. + #Start with E: + commuted_errgen_list_E = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in C.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_E.extend(commuted_errgen_sublist) + #Next F: + commuted_errgen_list_F = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in B.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_F.extend(commuted_errgen_sublist) + #Then G: + commuted_errgen_list_G = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in C.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_G.extend(commuted_errgen_sublist) + + #Turn the commutator lists into dictionaries: + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + E_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_E} + F_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_F} + G_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_G} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list_E: + E_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_F: + F_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_G: + G_comm_dict[error_tuple[0]] += error_tuple[1] + + #drop any terms below the truncation threshold after aggregation + E_comm_dict = {key: val for key, val in E_comm_dict.items() if abs(val)>truncation_threshold} + F_comm_dict = {key: val for key, val in F_comm_dict.items() if abs(val)>truncation_threshold} + G_comm_dict = {key: val for key, val in G_comm_dict.items() if abs(val)>truncation_threshold} + #-(1/720)*([X,F] - [Y, E]) + (1/360)*([Y,F] - [X,E]) + (1/120)*([Y,G] - [X,D]) + #Now do the next round of 6 commutators: [X,F], [Y,E], [Y,F], [X,E], [Y,G] and [X,D] + #We also need the following weight factors. F has a leading factor of (1/12) + #E and G have a leading factor of (-1/12). D has a leading factor of (-1/24) + #This gives the following additional weight multipliers: + #[X,F] = (-1/60); [Y,E] = (1/60); [Y,F]= (1/30); [X,E]= (1/30); [Y,G] = (-1/10); [X,D] = (1/5) + + #[X,F]: + commuted_errgen_list_XF = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in F_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = -(1/60)*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_XF.extend(commuted_errgen_sublist) + #[Y,E]: + commuted_errgen_list_YE = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in E_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = (1/60)*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_YE.extend(commuted_errgen_sublist) + #[Y,F]: + commuted_errgen_list_YF = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in F_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = (1/30)*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_YF.extend(commuted_errgen_sublist) + #[X,E]: + commuted_errgen_list_XE = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in E_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = (1/30)*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_XE.extend(commuted_errgen_sublist) + #[Y,G]: + commuted_errgen_list_YG = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in G_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = -.1*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_YG.extend(commuted_errgen_sublist) + #[X,D]: + commuted_errgen_list_XD = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in D.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = .2*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_XD.extend(commuted_errgen_sublist) + + #Turn the commutator lists into dictionaries: + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + XF_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_XF} + YE_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_YE} + YF_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_YF} + XE_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_XE} + YG_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_YG} + XD_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_XD} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list_XF: + XF_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_YE: + YE_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_YF: + YF_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_XE: + XE_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_YG: + YG_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_XD: + XD_comm_dict[error_tuple[0]] += error_tuple[1] + + #finally sum these six dictionaries, keeping only terms which are greater than the threshold. + fifth_order_comm_dict = dict() + fifth_order_dicts = [XF_comm_dict, YE_comm_dict, YF_comm_dict, XE_comm_dict, YG_comm_dict, XD_comm_dict] + current_combined_coeff_lbls = {key: None for key in chain(*fifth_order_dicts)} + for lbl in current_combined_coeff_lbls: + fifth_order_rate = sum([comm_dict.get(lbl, 0) for comm_dict in fifth_order_dicts]) + if abs(fifth_order_rate) > truncation_threshold: + fifth_order_comm_dict[lbl] = fifth_order_rate + new_errorgen_layer.append(fifth_order_comm_dict) + else: - raise NotImplementedError("Higher orders beyond fourth order are not implemented yet.") + raise NotImplementedError("Higher orders beyond fifth order are not implemented yet.") #Finally accumulate all of the dictionaries in new_errorgen_layer into a single one, summing overlapping terms. errorgen_labels_by_order = [{key: None for key in order_dict} for order_dict in new_errorgen_layer] From 7d61350cf93bfffc2f083cbb7e2b8087c9881feb Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 26 Nov 2024 13:26:00 -0700 Subject: [PATCH 043/102] Fix bug in circuit to stim conversion Fix a bug due to mismatch between circuit and matrix multiplication ordering. --- pygsti/circuits/circuit.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index ef1f4938e..4b67b6f60 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -3773,24 +3773,26 @@ def convert_to_stim_tableau_layers(self, gate_name_conversions=None, num_qubits= def convert_to_stim_tableau(self,gate_name_conversions=None): """ - Converts this circuit to a stim tableu + Converts this circuit to a stim tableau Parameters ---------- - gate_name_conversions : Dict - A map from pygsti gatenames to standard stim tableaus. If set to None a standard set of gate names is used + gate_name_conversions : dict, optional (default None) + A map from pygsti gatenames to standard stim tableaus. + If None a standard set of gate names is used from + `pygsti.tools.internalgates` Returns ------- - A single stim tableu representing the entire circuit + A single stim.Tableau representing the entire circuit. """ layers=self.convert_to_stim_tableau_layers(gate_name_conversions) - tableu=layers.pop(0) - for layer in layers: - tableu=tableu*layer - return tableu + if layers: + tableau=layers[0] + for layer in layers[1:]: + tableau= layer*tableau + return tableau - def convert_to_cirq(self, qubit_conversion, From bae42fda9687b6a8117174b16aef5bdcfa20dbd5 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 3 Dec 2024 22:21:05 -0700 Subject: [PATCH 044/102] Approximate Error Generator Probabilities Provides initial support for the calculation of first-order approximations to the output probabilities of circuits with end-of-circuit error generators. --- pygsti/circuits/circuit.py | 2 +- pygsti/tools/errgenproptools.py | 477 +++++++++++++++++++++++++++++++- 2 files changed, 477 insertions(+), 2 deletions(-) diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 4b67b6f60..c54117d4a 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -3771,7 +3771,7 @@ def convert_to_stim_tableau_layers(self, gate_name_conversions=None, num_qubits= stim_layers.append(stim_layer) return stim_layers - def convert_to_stim_tableau(self,gate_name_conversions=None): + def convert_to_stim_tableau(self, gate_name_conversions=None): """ Converts this circuit to a stim tableau diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index fbad0971d..f7da1b679 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -11,11 +11,13 @@ #*************************************************************************************************** import stim +import numpy as _np from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GEEL, LocalElementaryErrorgenLabel as _LEEL from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.modelmembers.operations import LindbladErrorgen as _LinbladErrorgen +from pygsti.circuits import Circuit as _Circuit from functools import reduce -from itertools import chain +from itertools import chain, product def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): """ @@ -1023,3 +1025,476 @@ def stim_pauli_string_less_than(pauli1, pauli2): return unsigned_pauli1_str < unsigned_pauli2_str +#-----------First-Order Approximate Error Generator Probabilities---------------# + +def random_support(tableau, return_support=False): + """ + Compute the number of bits over which the stabilizer state corresponding to this stim tableau + would have measurement outcomes which are random. + + Parameters + ---------- + tableau : stim.Tableau + stim.Tableau corresponding to the stabilizer state we want the random support + for. + + return_support : bool, optional (default False) + If True also returns a list of qubit indices over which the distribution of outcome + bit strings is random. + """ + #TODO Test for correctness on support + sim = stim.TableauSimulator() + sim.set_inverse_tableau(tableau**-1) + num_random = 0 + support = [] + for i in range(len(tableau)): + z = sim.peek_z(i) + if z == 0: + num_random+=1 + support.append(i) + # For a phase reference, use the smallest state with non-zero amplitude. + forced_bit = z == -1 + sim.postselect_z(i, desired_value=forced_bit) + return (num_random, support) if return_support else num_random + +#Courtesy of Gidney +#https://quantumcomputing.stackexchange.com/questions/38826/how-do-i-efficiently-compute-the-fidelity-between-two-stabilizer-tableau-states +def tableau_fidelity(tableau1, tableau2): + """ + Calculate the fidelity between the stabilizer states corresponding to the given stim + tableaus. This returns a result in units of probability (so this may be squared + fidelity depending on your convention). + + Parameters + ---------- + tableau1 : stim.Tableau + Stim tableau for first stabilizer state. + tableau2 : stim.Tableau + Stim tableau for second stabilizer state. + """ + t3 = tableau2**-1 * tableau1 + sim = stim.TableauSimulator() + sim.set_inverse_tableau(t3) + p = 1 + #note to future selves: stim uses little endian convention by default, and we typically use + #big endian. That doesn't make a difference in this case, but does elsewhere to be mindful to + #save on grief. + for q in range(len(t3)): + e = sim.peek_z(q) + if e == -1: + return 0 + if e == 0: + p *= 0.5 + sim.postselect_z(q, desired_value=False) + return p + +def bitstring_to_tableau(bitstring): + """ + Map a computational basis bit string into a corresponding Tableau which maps the all zero + state into that state. + + Parameters + ---------- + bitstring : str + String of 0's and 1's corresponding to the computational basis state to prepare the Tableau for. + + Returns + ------- + stim.Tableau + Tableau which maps the all zero string to this computational basis state + """ + pauli_string = stim.PauliString(''.join(['I' if bit=='0' else 'X' for bit in bitstring])) + #convert this to a stim.Tableau + pauli_tableau = pauli_string.to_tableau() + return pauli_tableau + + +#Modified from Gidney +#https://quantumcomputing.stackexchange.com/questions/34610/get-the-amplitude-of-a-computational-basis-in-stim +def amplitude_of_state(tableau, desired_state): + """ + Get the amplitude of a particular computational basis state for given + stabilizer state. + + Parameters + ---------- + tableau : stim.Tableau + Stim tableau corresponding to the stabilizer state we wish to extract + the amplitude from. + + desired_state : str + String of 0's and 1's corresponding to the computational basis state to extract the amplitude for. + """ + + sim = stim.TableauSimulator() + sim.set_inverse_tableau(tableau**-1) + n = sim.num_qubits + + #convert desired state into a list of bools + desired_state = [desired_state[i] == '1' for i in range(n)] + + # Determine the magnitude of the target state. + copy = sim.copy() + num_random = 0 + for q in range(n): + desired_bit = desired_state[q] + z = copy.peek_z(q) + forced_bit = z == -1 + if z == 0: + num_random += 1 + elif desired_bit != forced_bit: #forced bit is true if the state is |1>, so this is checking whether the bits match. + return 0 + copy.postselect_z(q, desired_value=desired_bit) + magnitude = 2**-(num_random / 2) + # For a phase reference, use the smallest state with non-zero amplitude. + copy = sim.copy() + ref_state = [False]*n + for q in range(n): + z = copy.peek_z(q) + forced_bit = z == -1 + ref_state[q] = forced_bit + copy.postselect_z(q, desired_value=forced_bit) + if ref_state == desired_state: + return magnitude + + # Postselect away states that aren't the desired or reference states. + # Also move the ref state to |00..00> and the desired state to |00..01>. + copy = sim.copy() + found_difference = False + for q in range(n): + desired_bit = desired_state[q] + ref_bit = ref_state[q] + if desired_bit == ref_bit: + copy.postselect_z(q, desired_value=ref_bit) + if desired_bit: + copy.x(q) + elif not found_difference: + found_difference = True + if q: + copy.swap(0, q) + if ref_bit: + copy.x(0) + else: + # Remove difference between target state and ref state at this bit. + copy.cnot(0, q) + copy.postselect_z(q, desired_value=ref_bit) + + # The phase difference between |00..00> and |00..01> is what we want. + # Since other states are gone, this is the bloch vector phase of qubit 0. + assert found_difference + s = str(copy.peek_bloch(0)) + + if s == "+X": + phase_factor = 1 + if s == "-X": + phase_factor = -1 + if s == "+Y": + phase_factor = 1j + if s == "-Y": + phase_factor = -1j + + return phase_factor*magnitude + +def pauli_phase_update(pauli, bitstring): + """ + Takes as input a pauli and a bit string and computes the output bitstring + and the overall phase that bit string accumulates. + + Parameters + ---------- + pauli : str or stim.PauliString + Pauli to apply + + bitstring : str + String of 0's and 1's representing the bit string to apply the pauli to. + + Returns + ------- + Tuple whose first element is the phase accumulated, and whose second element + is a string corresponding to the updated bit string. + """ + + if isinstance(pauli, str): + pauli = stim.PauliString(pauli) + + bitstring = [False if bit=='0' else True for bit in bitstring] + #list of phase correction for each pauli (conditional on 0) + #Read [I, X, Y, Z] + pauli_phases_0 = [1, 1, -1j, 1] + + #list of the phase correction for each pauli (conditional on 1) + #Read [I, X, Y, Z] + pauli_phases_1 = [1, 1, 1j, -1] + + #list of bools corresponding to whether each pauli flips the target bit + pauli_flips = [False, True, True, False] + + overall_phase = 1 + indices_to_flip = [] + for i, (elem, bit) in enumerate(zip(pauli, bitstring)): + if bit: + overall_phase*=pauli_phases_1[elem] + else: + overall_phase*=pauli_phases_0[elem] + if pauli_flips[elem]: + indices_to_flip.append(i) + #if the input pauli had any overall phase associated with it add that back + #in too. + overall_phase*=pauli.sign + #apply the flips to get the output bit string. + for idx in indices_to_flip: + bitstring[idx] = not bitstring[idx] + #turn this back into a string + output_bitstring = ''.join(['1' if bit else '0' for bit in bitstring]) + + return overall_phase, output_bitstring + +#TODO: This function needs a more evocative name +def phi(tableau, desired_bitstring, P, Q): + """ + This function computes a quantity whose value is used in expression for the sensitivity of probabilities to error generators. + + Parameters + ---------- + tableau : stim.Tableau + A stim Tableau corresponding to the input stabilizer state. + + desired_bitstring : str + A string of zeros and ones corresponding to the bit string being measured. + + P : str or stim.PauliString + The first pauli string index. + Q : str or stim.PauliString + The second pauli string index. + + Returns + ------- + A complex number corresponding to the value of the phi function. + """ + + #start by getting the pauli string which maps the all-zeros string to the target bitstring. + initial_pauli_string = stim.PauliString(''.join(['I' if bit=='0' else 'X' for bit in desired_bitstring])) + + #map P and Q to stim.PauliString if needed. + if isinstance(P, str): + P = stim.PauliString(P) + if isinstance(Q, str): + Q = stim.PauliString(Q) + + #combine this initial pauli string with the two input paulis + eff_P = initial_pauli_string*P + eff_Q = Q*initial_pauli_string + + #now get the bit strings which need their amplitudes extracted from the input stabilizer state and get + #the corresponding phase corrections. + all_zeros = '0'*len(eff_P) + phase1, bitstring1 = pauli_phase_update(eff_P, all_zeros) + phase2, bitstring2 = pauli_phase_update(eff_Q, all_zeros) + + #get the amplitude of these two bitstrings in the stabilizer state. + amp1 = amplitude_of_state(tableau, bitstring1) + amp2 = amplitude_of_state(tableau, bitstring2) + + #now apply the phase corrections. + amp1*=phase1 + amp2*=phase2 + + #calculate phi. + #The second amplitude also needs a complex conjugate applied + phi = amp1*amp2.conjugate() + + #phi should ultimately be either 0, +/-1 or +/-i, scaling might overflow + #so avoid scaling and just identify which of these it should be. For really + #tiny phi this may still have an issue... + if abs(phi)>1e-14: + if abs(phi.real) > 1e-14: + if phi.real > 0: + return complex(1) + else: + return complex(-1) + else: + if phi.imag > 0: + return 1j + else: + return -1j + else: + return complex(0) + +def alpha(errorgen, tableau, desired_bitstring): + """ + First-order error generator sensitivity function for probability. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator label for which to calculate sensitivity. + + tableau : stim.Tableau + Stim Tableau corresponding to the stabilizer state to calculate the sensitivity for. + + desired_bitstring : str + Bit string to calculate the sensitivity for. + """ + + errgen_type = errorgen.errorgen_type + basis_element_labels = errorgen.basis_element_labels + + if not isinstance(basis_element_labels[0], stim.PauliString): + basis_element_labels = tuple([stim.PauliString(lbl) for lbl in basis_element_labels]) + + identity_pauli = stim.PauliString('I'*len(basis_element_labels[0])) + + if errgen_type == 'H': + sensitivity = 2*phi(tableau, desired_bitstring, basis_element_labels[0], identity_pauli).imag + + elif errgen_type == 'S': + sensitivity = phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[0]) \ + - phi(tableau, desired_bitstring, identity_pauli, identity_pauli) + elif errgen_type == 'C': #TODO simplify this logic + first_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1]) + second_term = phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) \ + + phi(tableau, desired_bitstring, basis_element_labels[1]*basis_element_labels[0], identity_pauli) + sensitivity = first_term.real - second_term.real + else: #A + first_term = 2*phi(tableau, desired_bitstring, basis_element_labels[1], basis_element_labels[0]) + second_term = phi(tableau, desired_bitstring, basis_element_labels[1]*basis_element_labels[0], identity_pauli) \ + - phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) + sensitivity = first_term.imag + second_term.imag + return sensitivity + +def first_order_probability_correction(errorgen_dict, tableau, desired_bitstring): + """ + Compute the first-order correction to the probability of the specified bit string. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + tableau : stim.Tableau + Stim tableau corresponding to a particular stabilizer state being measured. + + desired_bitstring : str + String of 0's and 1's corresponding to the output bitstring being measured. + + Returns + ------- + correction : float + float corresponding to the correction to the output probability for the + desired bitstring induced by the error generator (to first order). + """ + + num_random = random_support(tableau) + scale = 1/2**(num_random) #TODO: This might overflow + + #now get the sum over the alphas and the error generator rate products needed. + alpha_errgen_prods = [0]*len(errorgen_dict) + + for i, (lbl, rate) in enumerate(errorgen_dict.items()): + alpha_errgen_prods[i] = alpha(lbl, tableau, desired_bitstring)*rate + + correction = scale*sum(alpha_errgen_prods) + return correction + +def stabilizer_probability(tableau, desired_bitstring): + """ + Calculate the output probability for the specifed output bitstring. + + TODO: Should be able to do this more efficiently for many bitstrings + by looking at the structure of the random support. + + Parameters + ---------- + tableau : stim.Tableau + Stim tableau for the stabilizer state being measured. + + desired_bitstring : str + String of 0's and 1's corresponding to the output bitstring being measured. + + Returns + ------- + p : float + probability of desired bitstring. + """ + #compute what Gidney calls the tableau fidelity (which in this case gives the probability). + return tableau_fidelity(tableau, bitstring_to_tableau(desired_bitstring)) + +def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring): + """ + Calculate the approximate probability of a desired bit string using a first-order approximation. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `ElementaryErrorgenLabel` and whose values are corresponding + rates. + + circuit : `Circuit` or `stim.Tableau` + A pygsti `Circuit` or a stim.Tableau to compute the output probability for. In either + case this should be a Clifford circuit and convertable to a stim.Tableau. + + desired_bitstring : str + String of 0's and 1's corresponding to the output bitstring being measured. + + Returns + ------- + p : float + Approximate output probability for desired bitstring. + """ + + if isinstance(circuit, _Circuit): + tableau = circuit.convert_to_stim_tableau() + elif isinstance(circuit, stim.Tableau): + tableau = circuit + else: + raise ValueError('`circuit` should either be a pygsti `Circuit` or a stim.Tableau.') + + #recast keys to local stim ones if needed. + first_lbl = next(iter(errorgen_dict)) + if isinstance(first_lbl, (_GEEL, _LEEL)): + errorgen_dict = {_LSE.cast(lbl):val for lbl,val in errorgen_dict.items()} + + ideal_prob = stabilizer_probability(tableau, desired_bitstring) + first_order_correction = first_order_probability_correction(errorgen_dict, tableau, desired_bitstring) + return ideal_prob + first_order_correction + +def approximate_stabilizer_probabilities(errorgen_dict, circuit): + """ + Calculate the approximate probability distribution over all bitstrings using a first-order approximation. + Note the size of this distribtion scales exponentially in the qubit count, so this is very inefficient for + any more than a few qubits. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `ElementaryErrorgenLabel` and whose values are corresponding + rates. + + circuit : `Circuit` or `stim.Tableau` + A pygsti `Circuit` or a stim.Tableau to compute the output probability for. In either + case this should be a Clifford circuit and convertable to a stim.Tableau. + + Returns + ------- + p : float + Approximate output probability for desired bitstring. + """ + if isinstance(circuit, _Circuit): + tableau = circuit.convert_to_stim_tableau() + elif isinstance(circuit, stim.Tableau): + tableau = circuit + else: + raise ValueError('`circuit` should either be a pygsti `Circuit` or a stim.Tableau.') + + #get set of all bit strings + num_qubits = len(tableau) + bitstrings = ["".join(bitstring) for bitstring in product(['0','1'], repeat=num_qubits)] + + #initialize an array for the probabilities + probs = _np.zeros(2**num_qubits) + + for i, bitstring in enumerate(bitstrings): + probs[i] = approximate_stabilizer_probability(errorgen_dict, tableau, bitstring) + + return probs \ No newline at end of file From 165329065cd87599aa794c1fd5647b802608faba Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 18 Dec 2024 22:42:22 -0700 Subject: [PATCH 045/102] Checkpoint error generator composition implementation Checkpoint the implementation of the pygsti code for the analytic computation of the composition of elementary error generators. This push includes H-H, H-S and H-C so far. Additionally adds new helper/utility functions for doing numeric versions of the composition and commutator calculations (not scalable but will be useful for testing). Aside from this a few minor refactors/variable renaming. --- pygsti/tools/errgenproptools.py | 753 ++++++++++++++++++++++++++------ 1 file changed, 611 insertions(+), 142 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index f7da1b679..c36a4590c 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -13,6 +13,8 @@ import stim import numpy as _np from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GEEL, LocalElementaryErrorgenLabel as _LEEL +from pygsti.baseobjs import QubitSpace as _QubitSpace +from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis as _CompleteElementaryErrorgenBasis from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.modelmembers.operations import LindbladErrorgen as _LinbladErrorgen from pygsti.circuits import Circuit as _Circuit @@ -80,6 +82,8 @@ def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): else: raise ValueError('Only `GlobalElementaryErrorgenLabel and LocalElementaryErrorgenLabel is currently supported.') +#------- Error Generator Math -------------# + def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_threshold=1e-14): """ Apply the BCH approximation at the given order to combine the input dictionaries @@ -440,7 +444,6 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th return new_errorgen_layer_dict - def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight=1.0, identity=None): """ Returns the commutator of two error generators. I.e. [errorgen_1, errorgen_2]. @@ -471,7 +474,7 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight weighted by the specified value of `weight`. """ - errorGens=[] + errorgens=[] if flip_weight: w= -weight @@ -498,41 +501,41 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if errorgen_1_type=='H' and errorgen_2_type=='H': ptup = com(errorgen_1_bel_0 , errorgen_2_bel_0) if ptup is not None: - errorGens.append((_LSE('H', [ptup[1]]), -1j*w *ptup[0])) + errorgens.append((_LSE('H', [ptup[1]]), -1j*w *ptup[0])) elif errorgen_1_type=='H' and errorgen_2_type=='S': ptup = com(errorgen_2_bel_0 , errorgen_1_bel_0) if ptup is not None: if errorgen_2_bel_0 == ptup[1]: - errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 2*1j*w*ptup[0])) + errorgens.append(( _LSE('S', [errorgen_2_bel_0]), 2*1j*w*ptup[0])) else: new_bels = [errorgen_2_bel_0, ptup[1]] if stim_pauli_string_less_than(errorgen_2_bel_0, ptup[1])\ else [ptup[1], errorgen_2_bel_0] - errorGens.append(( _LSE('C', new_bels), 1j*w*ptup[0])) + errorgens.append(( _LSE('C', new_bels), 1j*w*ptup[0])) elif errorgen_1_type=='S' and errorgen_2_type=='H': - errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type=='H' and errorgen_2_type=='C': ptup1 = com(errorgen_2_bel_0 , errorgen_1_bel_0) ptup2 = com(errorgen_2_bel_1 , errorgen_1_bel_0) if ptup1 is not None: if ptup1[1] == errorgen_2_bel_1: - errorGens.append((_LSE('S', [errorgen_2_bel_1]), 2*1j*w*ptup1[0])) + errorgens.append((_LSE('S', [errorgen_2_bel_1]), 2*1j*w*ptup1[0])) else: new_bels = [ptup1[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup1[1], errorgen_2_bel_1)\ else [errorgen_2_bel_1, ptup1[1]] - errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0])) + errorgens.append((_LSE('C', new_bels), 1j*w*ptup1[0])) if ptup2 is not None: if ptup2[1] == errorgen_2_bel_0: - errorGens.append(( _LSE('S', [errorgen_2_bel_0]), 2*1j*w*ptup2[0])) + errorgens.append(( _LSE('S', [errorgen_2_bel_0]), 2*1j*w*ptup2[0])) else: new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0)\ else [errorgen_2_bel_0, ptup2[1]] - errorGens.append((_LSE('C', new_bels), 1j*w*ptup2[0])) + errorgens.append((_LSE('C', new_bels), 1j*w*ptup2[0])) elif errorgen_1_type=='C' and errorgen_2_type=='H': - errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type=='H' and errorgen_2_type=='A': ptup1 = com(errorgen_1_bel_0 , errorgen_2_bel_0) @@ -540,97 +543,97 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup1 is not None: if ptup1[1] != errorgen_2_bel_1: if stim_pauli_string_less_than(ptup1[1], errorgen_2_bel_1): - errorGens.append((_LSE('A', [ptup1[1], errorgen_2_bel_1]), -1j*w*ptup1[0])) + errorgens.append((_LSE('A', [ptup1[1], errorgen_2_bel_1]), -1j*w*ptup1[0])) else: - errorGens.append((_LSE('A', [errorgen_2_bel_1, ptup1[1]]), 1j*w*ptup1[0])) + errorgens.append((_LSE('A', [errorgen_2_bel_1, ptup1[1]]), 1j*w*ptup1[0])) if ptup2 is not None: if ptup2[1] != errorgen_2_bel_0: if stim_pauli_string_less_than(errorgen_2_bel_0, ptup2[1]): - errorGens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), -1j*w*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), -1j*w*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), 1j*w*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), 1j*w*ptup2[0])) elif errorgen_1_type=='A' and errorgen_2_type=='H': - errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type=='S' and errorgen_2_type=='S': #Commutator of S with S is zero. pass elif errorgen_1_type=='S' and errorgen_2_type=='C': - ptup1 = product(errorgen_1_bel_0 , errorgen_2_bel_0) - ptup2 = product(errorgen_2_bel_1 , errorgen_1_bel_0) + ptup1 = pauli_product(errorgen_1_bel_0 , errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1 , errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) - ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: - ptup2 = product(ptup1[1], errorgen_1_bel_0) + ptup2 = pauli_product(ptup1[1], errorgen_1_bel_0) #it shouldn't be possible for ptup2[1] to equal errorgen_1_bel_0, #as that would imply that errorgen_1_bel_0 was the identity. if ptup2[1] == identity: - errorGens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup2[0])) else: if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]) , 1j*.5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]) , 1j*.5*w*ptup1[0]*ptup2[0])) #ptup3 is just the product from ptup2 in reverse, so this can be done #more efficiently, but I'm not going to do that at present... - ptup3 = product(errorgen_1_bel_0, ptup1[1]) + ptup3 = pauli_product(errorgen_1_bel_0, ptup1[1]) if ptup3[1] == identity: - errorGens.append((_LSE('H', [errorgen_1_bel_0]), 1j*.5*w*ptup1[0]*ptup3[0]) ) + errorgens.append((_LSE('H', [errorgen_1_bel_0]), 1j*.5*w*ptup1[0]*ptup3[0]) ) else: if stim_pauli_string_less_than(errorgen_1_bel_0, ptup3[1]): - errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup3[1]]) , -1j*.5*w*ptup1[0]*ptup3[0])) + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup3[1]]) , -1j*.5*w*ptup1[0]*ptup3[0])) else: - errorGens.append((_LSE('A', [ptup3[1], errorgen_1_bel_0]) , 1j*.5*w*ptup1[0]*ptup3[0])) + errorgens.append((_LSE('A', [ptup3[1], errorgen_1_bel_0]) , 1j*.5*w*ptup1[0]*ptup3[0])) elif errorgen_1_type == 'C' and errorgen_2_type == 'S': - errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'S' and errorgen_2_type == 'A': - ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) - ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] - errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: if ptup[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) - ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] - errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: if ptup[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -644,65 +647,65 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #And com(errorgen_1_bel_0,com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be by the same #argument that it can't be errorgen_1_bel_0 if stim_pauli_string_less_than(errorgen_1_bel_0, ptup2[1]): - errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) elif errorgen_1_type == 'A' and errorgen_2_type == 'S': - errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'C' and errorgen_2_type == 'C': - ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) - ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_1) + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) - ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_1,errorgen_2_bel_0) - ptup2 = product(errorgen_2_bel_1,errorgen_1_bel_0) + ptup1 = pauli_product(errorgen_1_bel_1,errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1,errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_1) - ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup1 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -712,9 +715,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_2_bel_1 can't be the identity, #And com(errorgen_2_bel_0, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1): - errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_2_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [errorgen_2_bel_1, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_2_bel_1, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -724,9 +727,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_2_bel_0 can't be the identity. #And com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0): - errorGens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -736,9 +739,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_1_bel_1 can't be the identity. #And com(acom(errorgen_2_bel_0, errorgen_2_bel_1), errorgen_2_bel_0) can't be either if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1): - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [errorgen_1_bel_1, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_1_bel_1, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -748,9 +751,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_1_bel_0 can't be the identity. #And com(acom(errorgen_2_bel_0, errorgen_2_bel_1), errorgen_2_bel_1) can't be either if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -759,48 +762,48 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight ptup3 = com(ptup1[1], ptup2[1]) if ptup3 is not None: #It shouldn't be possible for ptup3 to be the identity given valid error generator indices. - errorGens.append((_LSE('H', [ptup3[1]]), .25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) + errorgens.append((_LSE('H', [ptup3[1]]), .25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) elif errorgen_1_type == 'C' and errorgen_2_type == 'A': - ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_0) - ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_1) + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if ptup1[1] != identity and ptup2[1] != identity: new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] - errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) - ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if ptup1[1] != identity and ptup2[1] != identity: new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] - errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_0) - ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) + ptup1 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if ptup1[1] != identity and ptup2[1] != identity: new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] - errorGens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_2_bel_0, errorgen_1_bel_0) - ptup2 = product(errorgen_1_bel_1, errorgen_2_bel_1) + ptup1 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup2 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_1) if ptup1[1] != ptup2[1]: if ptup1[1] != identity and ptup2[1] != identity: new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] - errorGens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) else: #ptup[1] == ptup[2] if ptup1[1] != identity: - errorGens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -810,9 +813,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_1_bel_1 can't be the identity. #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1): - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), .5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), .5*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [errorgen_1_bel_1, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_1_bel_1, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -822,9 +825,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_1_bel_0 can't be the identity. #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): - errorGens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -834,9 +837,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_2_bel_1 can't be the identity. #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either new_bels = [ptup2[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1) else [errorgen_2_bel_1, ptup2[1]] - errorGens.append((_LSE('C', new_bels), .5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), .5*1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == errorgen_2_bel_1, don't need to check that errorgen_2_bel_1 isn't identity. - errorGens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = acom(errorgen_1_bel_0,errorgen_1_bel_1) @@ -847,9 +850,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_2_bel_0 can't be the identity. #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup2[1]] - errorGens.append((_LSE('C', new_bels), -.5*1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), -.5*1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == errorgen_2_bel_0, don't need to check that errorgen_2_bel_0 isn't identity. - errorGens.append((_LSE('S', [errorgen_2_bel_0]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [errorgen_2_bel_0]), -1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -859,64 +862,64 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup3 is not None: #it shouldn't be possible for ptup3 to be identity given valid error generator #indices. - errorGens.append((_LSE('H', [ptup3[1]]), -.25*w*ptup1[0]*ptup2[0]*ptup3[0])) + errorgens.append((_LSE('H', [ptup3[1]]), -.25*w*ptup1[0]*ptup2[0]*ptup3[0])) elif errorgen_1_type == 'A' and errorgen_2_type == 'C': - errorGens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) elif errorgen_1_type == 'A' and errorgen_2_type == 'A': - ptup1 = product(errorgen_2_bel_1, errorgen_1_bel_1) - ptup2 = product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup1 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_1) + ptup2 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_2_bel_0, errorgen_1_bel_0) - ptup2 = product(errorgen_1_bel_1, errorgen_2_bel_1) + ptup1 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup2 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_1) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_1, errorgen_2_bel_0) - ptup2 = product(errorgen_2_bel_1, errorgen_1_bel_0) + ptup1 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_0) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) - ptup1 = product(errorgen_1_bel_0, errorgen_2_bel_1) - ptup2 = product(errorgen_2_bel_0, errorgen_1_bel_1) + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_1) if ptup1[1] != ptup2[1]: if (ptup1[1] != identity) and (ptup2[1] != identity): if stim_pauli_string_less_than(ptup1[1], ptup2[1]): - errorGens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: - errorGens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) elif ptup1[1] == identity: - errorGens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == identity - errorGens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -926,9 +929,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_1_bel_0 can't be the identity. #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_1_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0) else [errorgen_1_bel_0, ptup2[1]] - errorGens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == errorgen_1_bel_0 - errorGens.append((_LSE('S', [errorgen_1_bel_0]), w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [errorgen_1_bel_0]), w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -938,9 +941,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_1_bel_1 can't be the identity. #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_1_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1) else [errorgen_1_bel_1, ptup2[1]] - errorGens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == errorgen_1_bel_1 - errorGens.append((_LSE('S', [errorgen_1_bel_1]), -1*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [errorgen_1_bel_1]), -1*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) if ptup1 is not None: @@ -950,9 +953,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_2_bel_1 can't be the identity. #com(errorgen_2_bel_0, com(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1) else [errorgen_2_bel_1, ptup2[1]] - errorGens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == errorgen_2_bel_1 - errorGens.append((_LSE('S', [errorgen_2_bel_1]), w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [errorgen_2_bel_1]), w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) @@ -963,9 +966,9 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight #errorgen_2_bel_0 can't be the identity. #com(errorgen_2_bel_1, com(errorgen_1_bel_0,errorgen_1_bel_1)) can't be either. new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup2[1]] - errorGens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) else: #ptup2[1] == errorgen_2_bel_0 - errorGens.append((_LSE('S', [errorgen_2_bel_0]), -1*w*ptup1[0]*ptup2[0])) + errorgens.append((_LSE('S', [errorgen_2_bel_0]), -1*w*ptup1[0]*ptup2[0])) ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) if ptup1 is not None: @@ -975,9 +978,260 @@ def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight if ptup3 is not None: #it shouldn't be possible for ptup3 to be identity given valid error generator #indices. - errorGens.append((_LSE('H', [ptup3[1]]), .25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) + errorgens.append((_LSE('H', [ptup3[1]]), .25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) - return errorGens + return errorgens + +def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=None): + """ + Returns the composition of two error generators. I.e. errorgen_1[errorgen_2[\cdot]]. + + Parameters + ---------- + errorgen1 : `LocalStimErrorgenLabel` + First error generator. + + errorgen2 : `LocalStimErrorgenLabel` + Second error generator + + weight : float, optional (default 1.0) + An optional weighting value to apply to the value of the composition. + + identity : stim.PauliString, optional (default None) + An optional stim.PauliString to use for comparisons to the identity. + Passing in this kwarg isn't necessary, but can allow for reduced + stim.PauliString creation when calling this function many times for + improved efficiency. + + Returns + ------- + list of `LocalStimErrorgenLabel`s corresponding to the composition of the two input error generators, + weighted by the specified value of `weight`. + """ + + composed_errorgens = [] + + w = weight + + errorgen_1_type = errorgen_1.errorgen_type + errorgen_2_type = errorgen_2.errorgen_type + + #The first basis element label is always well defined, + #the second we'll define only of the error generator is C or A type. + errorgen_1_bel_0 = errorgen_1.basis_element_labels[0] + errorgen_2_bel_0 = errorgen_2.basis_element_labels[0] + + if errorgen_1_type == 'C' or errorgen_1_type == 'A': + errorgen_1_bel_1 = errorgen_1.basis_element_labels[1] + if errorgen_2_type == 'C' or errorgen_2_type == 'A': + errorgen_2_bel_1 = errorgen_2.basis_element_labels[1] + + #create the identity stim.PauliString for later comparisons. + if identity is None: + identity = stim.PauliString('I'*len(errorgen_1_bel_0)) + + if errorgen_1_type == 'H' and errorgen_2_type == 'H': + if errorgen_1_bel_0.commutes(errorgen_2_bel_0): + if errorgen_1_bel_0==errorgen_2_bel_0: + composed_errorgens.append((_LSE('S', [errorgen_1_bel_0]), 2*w)) + else: + new_bels = [errorgen_1_bel_0, errorgen_2_bel_0] if stim_pauli_string_less_than(errorgen_1_bel_0, errorgen_2_bel_0) else [errorgen_2_bel_0, errorgen_1_bel_0] + composed_errorgens.append((_LSE('C', new_bels), w)) + else: + ptup = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + composed_errorgens.append((_LSE('H', [ptup[1]]), -1j*w*ptup[0])) + new_bels = [errorgen_1_bel_0, errorgen_2_bel_0] if stim_pauli_string_less_than(errorgen_1_bel_0, errorgen_2_bel_0) else [errorgen_2_bel_0, errorgen_1_bel_0] + composed_errorgens.append((_LSE('C', new_bels), w)) + + elif errorgen_1_type == 'H' and errorgen_2_type == 'S': + ptup = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + if errorgen_1_bel_0.commutes(errorgen_2_bel_0): + if ptup[1] == identity: + composed_errorgens.append((_LSE('H', [errorgen_2_bel_0]), -w*ptup[0])) + composed_errorgens.append((_LSE('H', [errorgen_1_bel_0]), -w)) + else: + if stim_pauli_string_less_than(ptup[1], errorgen_2_bel_0): + composed_errorgens.append((_LSE('A', [ptup[1], errorgen_2_bel_0]), -w*ptup[0])) + else: + composed_errorgens.append((_LSE('A', [ptup[1], errorgen_2_bel_0]), w*ptup[0])) + composed_errorgens.append((_LSE('H', [errorgen_1_bel_0]), -w)) + else: #if errorgen_1_bel_0 and errorgen_2_bel_0 only multiply to identity they are equal (in which case they commute). + new_bels = [ptup[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup[1]] + composed_errorgens.append((_LSE('C', new_bels), -1j*w*ptup[0])) + composed_errorgens.append((_LSE('H', [errorgen_1_bel_0]), -w)) + #Apologies to poor soul reading this code later, switching back and forth between notation in my notes and in previous code starts to + #get too hard at this point and forward, so switching some notation. -CIO + elif errorgen_1_type == 'H' and errorgen_2_type == 'C': + #H_A[C_{P,Q}] A->errorgen_1_bel_0, P,Q -> errorgen_2_bel_0, errorgen_2_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + A = errorgen_1_bel_0 + #Case 1: [P,Q]=0 + if P.commutes(Q): + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[1]) + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PQ_ident = (PQ[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + PQ_eq_A = (PQ[1]==A) + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + + + + + + + + + return composed_errorgens + +#helper function for getting the new (properly ordered) basis element labels, error generator type (A can turn into H with certain index combinations), and additional signs. +#reduces code repetition in composition code. +def _ordered_new_bels_A(pauli1, pauli2, first_pauli_ident, second_pauli_ident, pauli_eq): + """ + Helper function for managing new basis element labels, error generator types and proper basis element label ordering. Returns None + if both pauli identity flags are True, which signals that the error generator is zero (i.e. should be skipped). Same for is pauli_eq is True. + """ + if pauli_eq: + return (None,None,None) + if first_pauli_ident: + if second_pauli_ident: + return (None,None,None) + else: + new_eg_type = 'H' + new_bels = [pauli2] + addl_sign = 1 + else: + if second_pauli_ident: + new_eg_type = 'H' + new_bels = [pauli1] + addl_sign = -1 + else: + new_eg_type = 'A' + new_bels, addl_sign = ([pauli1, pauli2], 1) if stim_pauli_string_less_than(pauli2, pauli2) else ([pauli2, pauli1], -1) + return new_eg_type, new_bels, addl_sign + +def _ordered_new_bels_C(pauli1, pauli2, first_pauli_ident, second_pauli_ident, pauli_eq): + """ + Helper function for managing new basis element labels, error generator types and proper basis element label ordering. Returns None + if both pauli identity flags are True, which signals that the error generator is zero (i.e. should be skipped). Same for is pauli_eq is True. + """ + if first_pauli_ident or second_pauli_ident: + return (None,None,None) + + if pauli_eq: + new_eg_type = 'S' + new_bels = [pauli1] + addl_scale_fac = 2 + else: + new_eg_type = 'C' + addl_scale_fac = 1 + new_bels = [pauli1, pauli2] if stim_pauli_string_less_than(pauli2, pauli2) else [pauli2, pauli1] + return new_eg_type, new_bels, addl_scale_fac def com(P1, P2): #P1 and P2 either commute or anticommute. @@ -1000,7 +1254,7 @@ def acom(P1, P2): #return (sign(P3) * 2 if P1 and P2 commute, 0 o.w., # unsigned P3) -def product(P1, P2): +def pauli_product(P1, P2): P3 = P1*P2 return (P3.sign, P3 / P3.sign) #return (sign(P3), @@ -1025,6 +1279,221 @@ def stim_pauli_string_less_than(pauli1, pauli2): return unsigned_pauli1_str < unsigned_pauli2_str +def errorgen_layer_to_matrix(errorgen_layer, num_qubits, errorgen_matrix_dict=None, sslbls=None): + """ + Converts an iterable over error generator coefficients and rates into the corresponding + dense numpy array representation. + + Parameters + ---------- + errorgen_layer : list, tuple or dict + An iterable over error generator coefficient and rates. If a list or a tuple the + elements should correspond to two-element tuples, the first value being an `ElementaryErrorgenLabel` + and the second value the rate. If a dictionary the keys should be `ElementaryErrorgenLabel` and the + values the rates. + + num_qubits : int + Number of qubits for the error generator matrix being constructed. + + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + sslbls : list or tuple, optional (default None) + A tuple or list of state space labels corresponding to the qubits upon which the error generators + can supported. Only required when passing in a value of `errorgen_matrix_dict` with + `GlobalElementaryErrogenLabel` keys in conjunction with an `errorgen_layer` with labels + which are `LocalElementaryErrorgenLabel` (or vice-versa). + + Returns + ------- + errorgen_mat : ndarray + ndarray for the dense representation of the specified error generator in the standard basis. + """ + + #if the list is empty return all zeros + #initialize empty array for accumulation. + mat = _np.zeros((4**num_qubits, 4**num_qubits), dtype=_np.complex128) + if not errorgen_layer: + return mat + + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + #infer the correct label type. + if errorgen_matrix_dict: + first_label = next(iter(errorgen_matrix_dict)) + if isinstance(first_label, _LEEL): + label_type = 'local' + elif isinstance(first_label, _GEEL): + label_type = 'global' + else: + msg = f'Label type {type(first_label)} is not supported as a key for errorgen_matrix_dict.'\ + + 'Please use either LocalElementaryErrorgenLabel or GlobalElementaryErrorgenLabel.' + raise ValueError() + else: + raise ValueError('Non-empty errorgen_layer, but errorgen_matrix_dict is empty. Cannot convert.') + + #loop through errorgen_layer and accumulate the weighted error generators prescribed. + if isinstance(errorgen_layer, (list, tuple)): + first_coefficient_lbl = errorgen_layer[0][0] + errorgen_layer_iter = errorgen_layer + elif isinstance(errorgen_layer, dict): + first_coefficient_lbl = next(iter(errorgen_layer)) + errorgen_layer_iter = errorgen_layer.items() + else: + raise ValueError(f'errorgen_layer should be either a list, tuple or dict. {type(errorgen_layer)=}') + + if ((isinstance(first_coefficient_lbl, _LEEL) and label_type == 'global') \ + or (isinstance(first_coefficient_lbl, _GEEL) and label_type == 'local')) and sslbls is None: + msg = "You have passed in an `errogen_layer` with `LocalElementaryErrorgenLabel` coefficients, and " \ + +"an `errorgen_matrix_dict` with keys which are `GlobalElementaryErrorgenLabel` (or vice-versa). When using this "\ + +"combination you must also specify the state space labels with `sslbls`." + raise ValueError(msg) + + if isinstance(first_coefficient_lbl, _LSE): + if label_type == 'local': + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[lbl.to_local_eel()] + else: + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[lbl.to_global_eel()] + elif isinstance(first_coefficient_lbl, _LEEL): + if label_type == 'local': + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[lbl] + else: + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[_GEEL.cast(lbl, sslbls=sslbls)] + elif isinstance(first_coefficient_lbl, _GEEL): + if label_type == 'local': + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[_LEEL.cast(lbl, sslbls=sslbls)] + else: + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[lbl] + else: + raise ValueError('The coefficient labels in `errorgen_layer` should be either `LocalStimErrorgenLabel`, `LocalElementaryErrorgenLabel` or `GlobalElementaryErrorgenLabel`.') + + return mat + + +#Helper function for doing numeric commutators and compositions. + +def error_generator_commutator_numerical(errorgen1, errorgen2, errorgen_matrix_dict=None, num_qubits=None): + """ + Numerically compute the commutator of the two specified elementary error generators. + + Parameters + ---------- + errorgen1 : `LocalElementaryErrorgenLabel` or `LocalStimErrorgenLabel` + First error generator. + + errorgen2 : `ElementaryErrorgenLabel` or `LocalStimErrorgenLabel` + Second error generator. + + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + num_qubits : int, optional (default None) + Number of qubits for the error generator commutator being computed. Only required if `errorgen_matrix_dict` is None. + + Returns + ------- + ndarray + Numpy array corresponding to the dense representation of the commutator of the input error generators in the standard basis. + """ + + assert isinstance(errorgen1, (_LEEL, _LSE)) and isinstance(errorgen2, (_LEEL, _LSE)) + assert type(errorgen1) == type(errorgen2), "The elementary error generator labels have mismatched types." + + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + first_label = next(iter(errorgen_matrix_dict)) + + if isinstance(first_label, _LEEL): + if isinstance(errorgen1, _LEEL): + comm = errorgen_matrix_dict[errorgen1]@errorgen_matrix_dict[errorgen2] - errorgen_matrix_dict[errorgen2]@errorgen_matrix_dict[errorgen1] + else: + comm = errorgen_matrix_dict[errorgen1.to_local_eel()]@errorgen_matrix_dict[errorgen2.to_local_eel()]\ + - errorgen_matrix_dict[errorgen2.to_local_eel()]@errorgen_matrix_dict[errorgen1.to_local_eel()] + else: + if isinstance(errorgen1, _LSE): + comm = errorgen_matrix_dict[errorgen1]@errorgen_matrix_dict[errorgen2] - errorgen_matrix_dict[errorgen2]@errorgen_matrix_dict[errorgen1] + else: + comm = errorgen_matrix_dict[_LSE.cast(errorgen1)]@errorgen_matrix_dict[_LSE.cast(errorgen2)]\ + - errorgen_matrix_dict[_LSE.cast(errorgen2)]@errorgen_matrix_dict[_LSE.cast(errorgen1)] + return comm + +def error_generator_composition_numerical(errorgen1, errorgen2, errorgen_matrix_dict=None, num_qubits=None): + """ + Numerically compute the composition of the two specified elementary error generators. + + Parameters + ---------- + errorgen1 : `LocalElementaryErrorgenLabel` or `LocalStimErrorgenLabel` + First error generator. + + errorgen2 : `ElementaryErrorgenLabel` or `LocalStimErrorgenLabel` + Second error generator. + + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + num_qubits : int, optional (default None) + Number of qubits for the error generator commutator being computed. Only required if `errorgen_matrix_dict` is None. + + Returns + ------- + ndarray + Numpy array corresponding to the dense representation of the composition of the input error generators in the standard basis. + + """ + assert isinstance(errorgen1, (_LEEL, _LSE)) and isinstance(errorgen2, (_LEEL, _LSE)) + assert type(errorgen1) == type(errorgen2), "The elementary error generator labels have mismatched types." + + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + first_label = next(iter(errorgen_matrix_dict)) + + if isinstance(first_label, _LEEL): + if isinstance(errorgen1, _LEEL): + comp = errorgen_matrix_dict[errorgen1]@errorgen_matrix_dict[errorgen2] + else: + comp = errorgen_matrix_dict[errorgen1.to_local_eel()]@errorgen_matrix_dict[errorgen2.to_local_eel()] + else: + if isinstance(errorgen1, _LSE): + comp = errorgen_matrix_dict[errorgen1]@errorgen_matrix_dict[errorgen2] + else: + comp = errorgen_matrix_dict[_LSE.cast(errorgen1)]@errorgen_matrix_dict[_LSE.cast(errorgen2)] + return comp + + #-----------First-Order Approximate Error Generator Probabilities---------------# def random_support(tableau, return_support=False): From b795bde665a12954155442c0e6c8682ee165bef4 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Thu, 19 Dec 2024 22:08:58 -0700 Subject: [PATCH 046/102] Another errorgen composition checkpoint Another round of checkpointing for the error generator composition implementation. This included the newly implemented H-A compositions, a bunch of bugs caught and corrected for the H-C compositions, and some refactors of the H-H and H-S to use the stuff introduced for H-C to reduce code repetition. --- pygsti/tools/errgenproptools.py | 181 ++++++++++++++++++++++++++------ 1 file changed, 148 insertions(+), 33 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index c36a4590c..b1e44cc7c 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -1031,36 +1031,35 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non identity = stim.PauliString('I'*len(errorgen_1_bel_0)) if errorgen_1_type == 'H' and errorgen_2_type == 'H': - if errorgen_1_bel_0.commutes(errorgen_2_bel_0): - if errorgen_1_bel_0==errorgen_2_bel_0: - composed_errorgens.append((_LSE('S', [errorgen_1_bel_0]), 2*w)) - else: - new_bels = [errorgen_1_bel_0, errorgen_2_bel_0] if stim_pauli_string_less_than(errorgen_1_bel_0, errorgen_2_bel_0) else [errorgen_2_bel_0, errorgen_1_bel_0] - composed_errorgens.append((_LSE('C', new_bels), w)) + P = errorgen_1_bel_0 + Q = errorgen_2_bel_0 + P_eq_Q = (P==Q) + if P.commutes(Q): + new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(P, Q, False, False, P_eq_Q) + composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_scale*w)) else: - ptup = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) - composed_errorgens.append((_LSE('H', [ptup[1]]), -1j*w*ptup[0])) - new_bels = [errorgen_1_bel_0, errorgen_2_bel_0] if stim_pauli_string_less_than(errorgen_1_bel_0, errorgen_2_bel_0) else [errorgen_2_bel_0, errorgen_1_bel_0] - composed_errorgens.append((_LSE('C', new_bels), w)) + PQ = pauli_product(P, Q) + composed_errorgens.append((_LSE('H', [PQ[1]]), -1j*w*PQ[0])) + new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(P, Q, False, False, P_eq_Q) + composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_scale*w)) elif errorgen_1_type == 'H' and errorgen_2_type == 'S': - ptup = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) - if errorgen_1_bel_0.commutes(errorgen_2_bel_0): - if ptup[1] == identity: - composed_errorgens.append((_LSE('H', [errorgen_2_bel_0]), -w*ptup[0])) - composed_errorgens.append((_LSE('H', [errorgen_1_bel_0]), -w)) - else: - if stim_pauli_string_less_than(ptup[1], errorgen_2_bel_0): - composed_errorgens.append((_LSE('A', [ptup[1], errorgen_2_bel_0]), -w*ptup[0])) - else: - composed_errorgens.append((_LSE('A', [ptup[1], errorgen_2_bel_0]), w*ptup[0])) - composed_errorgens.append((_LSE('H', [errorgen_1_bel_0]), -w)) + P = errorgen_1_bel_0 + Q = errorgen_2_bel_0 + PQ = pauli_product(P, Q) + PQ_ident = (PQ[1] == identity) + PQ_eq_Q = (PQ[1]==Q) + if P.commutes(Q): + new_eg_type, new_bels, addl_sign = _ordered_new_bels_A(PQ[1], Q, PQ_ident, False, PQ_eq_Q) + if new_eg_type is not None: + composed_errorgens.append((_LSE(new_eg_type, new_bels), -PQ[0]*addl_sign*w)) + composed_errorgens.append((_LSE('H', [P]), -w)) else: #if errorgen_1_bel_0 and errorgen_2_bel_0 only multiply to identity they are equal (in which case they commute). - new_bels = [ptup[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup[1]] - composed_errorgens.append((_LSE('C', new_bels), -1j*w*ptup[0])) + new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(PQ[1], Q, PQ_ident, False, PQ_eq_Q) + if new_eg_type is not None: + composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_scale*w)) composed_errorgens.append((_LSE('H', [errorgen_1_bel_0]), -w)) - #Apologies to poor soul reading this code later, switching back and forth between notation in my notes and in previous code starts to - #get too hard at this point and forward, so switching some notation. -CIO + elif errorgen_1_type == 'H' and errorgen_2_type == 'C': #H_A[C_{P,Q}] A->errorgen_1_bel_0, P,Q -> errorgen_2_bel_0, errorgen_2_bel_1 P = errorgen_2_bel_0 @@ -1072,7 +1071,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non PA = pauli_product(P, A) QA = pauli_product(Q, A) PQ = pauli_product(P, Q) - APQ = pauli_product(A, PQ[1]) + APQ = pauli_product(A, PQ[0]*PQ[1]) #also precompute whether pairs commute or anticommute com_AP = A.commutes(P) com_AQ = A.commutes(Q) @@ -1155,7 +1154,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) if new_eg_type_1 is not None: composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) - #Case 1b: {A,P}=0, {A,Q}=0 + #Case 2b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) @@ -1163,7 +1162,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) if new_eg_type_1 is not None: composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) - #Case 1c: [A,P]=0, {A,Q}=0 + #Case 2c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) @@ -1171,7 +1170,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) if new_eg_type_1 is not None: composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) - #Case 1d: {A,P}=0, [A,Q]=0 + #Case 2d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) @@ -1180,8 +1179,124 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_1 is not None: composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) - - + elif errorgen_1_type == 'H' and errorgen_2_type == 'A': + #H_A[A_{P,Q}] A->errorgen_1_bel_0, P,Q -> errorgen_2_bel_0, errorgen_2_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + A = errorgen_1_bel_0 + #Case 1: P and Q commute. + if P.commutes(Q): + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_scale_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_sign_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_sign_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_scale_1*w)) + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PQ_ident = (PQ[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + PQ_eq_A = (PQ[1]==A) + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) @@ -1212,7 +1327,7 @@ def _ordered_new_bels_A(pauli1, pauli2, first_pauli_ident, second_pauli_ident, p addl_sign = -1 else: new_eg_type = 'A' - new_bels, addl_sign = ([pauli1, pauli2], 1) if stim_pauli_string_less_than(pauli2, pauli2) else ([pauli2, pauli1], -1) + new_bels, addl_sign = ([pauli1, pauli2], 1) if stim_pauli_string_less_than(pauli1, pauli2) else ([pauli2, pauli1], -1) return new_eg_type, new_bels, addl_sign def _ordered_new_bels_C(pauli1, pauli2, first_pauli_ident, second_pauli_ident, pauli_eq): @@ -1230,7 +1345,7 @@ def _ordered_new_bels_C(pauli1, pauli2, first_pauli_ident, second_pauli_ident, p else: new_eg_type = 'C' addl_scale_fac = 1 - new_bels = [pauli1, pauli2] if stim_pauli_string_less_than(pauli2, pauli2) else [pauli2, pauli1] + new_bels = [pauli1, pauli2] if stim_pauli_string_less_than(pauli1, pauli2) else [pauli2, pauli1] return new_eg_type, new_bels, addl_scale_fac def com(P1, P2): From b948a18586c64ee3b221676480053b6d2a33507a Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Thu, 19 Dec 2024 22:50:00 -0700 Subject: [PATCH 047/102] S-H and S-S Compositions Adds in the S-H and S-S error generator compositions. --- pygsti/tools/errgenproptools.py | 36 +++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index b1e44cc7c..d9f5f145a 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -1031,6 +1031,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non identity = stim.PauliString('I'*len(errorgen_1_bel_0)) if errorgen_1_type == 'H' and errorgen_2_type == 'H': + #H_P[H_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 P = errorgen_1_bel_0 Q = errorgen_2_bel_0 P_eq_Q = (P==Q) @@ -1044,6 +1045,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_scale*w)) elif errorgen_1_type == 'H' and errorgen_2_type == 'S': + #H_P[S_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 P = errorgen_1_bel_0 Q = errorgen_2_bel_0 PQ = pauli_product(P, Q) @@ -1058,7 +1060,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(PQ[1], Q, PQ_ident, False, PQ_eq_Q) if new_eg_type is not None: composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_scale*w)) - composed_errorgens.append((_LSE('H', [errorgen_1_bel_0]), -w)) + composed_errorgens.append((_LSE('H', [P]), -w)) elif errorgen_1_type == 'H' and errorgen_2_type == 'C': #H_A[C_{P,Q}] A->errorgen_1_bel_0, P,Q -> errorgen_2_bel_0, errorgen_2_bel_1 @@ -1298,9 +1300,39 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_2 is not None: composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) + #Note: This could be done by leveraging the commutator code, but that adds + #additional overhead which I am opting to avoid. + elif errorgen_1_type == 'S' and errorgen_2_type == 'H': + #S_P[H_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_2_bel_0 + PQ = pauli_product(P, Q) + PQ_ident = (PQ[1] == identity) + PQ_eq_Q = (PQ[1]==Q) + if P.commutes(Q): + new_eg_type, new_bels, addl_sign = _ordered_new_bels_A(PQ[1], P, PQ_ident, False, PQ_eq_Q) + if new_eg_type is not None: + composed_errorgens.append((_LSE(new_eg_type, new_bels), -PQ[0]*addl_sign*w)) + composed_errorgens.append((_LSE('H', [Q]), -w)) + else: #if errorgen_1_bel_0 and errorgen_2_bel_0 only multiply to identity they are equal (in which case they commute). + new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(PQ[1], P, PQ_ident, False, PQ_eq_Q) + if new_eg_type is not None: + composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_scale*w)) + composed_errorgens.append((_LSE('H', [Q]), -w)) + elif errorgen_1_type == 'S' and errorgen_2_type == 'S': + #S_P[S_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_2_bel_0 + PQ = pauli_product(P, Q) + PQ_ident = (PQ[1] == identity) + if not PQ_ident: + composed_errorgens.append((_LSE('S', [PQ[1]]), w)) + composed_errorgens.append((_LSE('S', [P]), -w)) + composed_errorgens.append((_LSE('S', [Q]),- w)) - + elif errorgen_1_type == 'S' and errorgen_2_type == 'C': + pass return composed_errorgens From 0db5baf1d156deb9992882800ffbac168a4ce25d Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 3 Jan 2025 21:05:00 -0700 Subject: [PATCH 048/102] Fix StateSpace bugs Fix some bugs I introduced when making some of the attributes of StateSpace and its child classes properties. --- pygsti/baseobjs/statespace.py | 42 +++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/pygsti/baseobjs/statespace.py b/pygsti/baseobjs/statespace.py index 163a57a96..e06079340 100644 --- a/pygsti/baseobjs/statespace.py +++ b/pygsti/baseobjs/statespace.py @@ -1117,10 +1117,10 @@ def is_label(x): if udims is not None: udims = [udims] if types is not None: types = [types] - self.labels = tuple([tuple(tpbLabels) for tpbLabels in label_list]) + self._labels = tuple([tuple(tpbLabels) for tpbLabels in label_list]) #Type check - labels must be strings or ints - for tpbLabels in self.labels: # loop over tensor-prod-blocks + for tpbLabels in self._labels: # loop over tensor-prod-blocks for lbl in tpbLabels: if not is_label(lbl): raise ValueError("'%s' is an invalid state-space label (must be a string or integer)" % lbl) @@ -1128,11 +1128,11 @@ def is_label(x): # Get the type of each labeled space self.label_types = {} if types is None: # use defaults - for tpbLabels in self.labels: # loop over tensor-prod-blocks + for tpbLabels in self._labels: # loop over tensor-prod-blocks for lbl in tpbLabels: self.label_types[lbl] = 'C' if (isinstance(lbl, str) and lbl.startswith('C')) else 'Q' # default else: - for tpbLabels, tpbTypes in zip(self.labels, types): + for tpbLabels, tpbTypes in zip(self._labels, types): for lbl, typ in zip(tpbLabels, tpbTypes): self.label_types[lbl] = typ @@ -1140,7 +1140,7 @@ def is_label(x): self.label_udims = {} self.label_dims = {} if udims is None: - for tpbLabels in self.labels: # loop over tensor-prod-blocks + for tpbLabels in self._labels: # loop over tensor-prod-blocks for lbl in tpbLabels: if isinstance(lbl, _numbers.Integral): d = 2 # ints = qubits elif lbl.startswith('T'): d = 3 # qutrit @@ -1151,7 +1151,7 @@ def is_label(x): self.label_udims[lbl] = d self.label_dims[lbl] = d**2 if (isinstance(lbl, _numbers.Integral) or lbl[0] in ('Q', 'T')) else d else: - for tpbLabels, tpbDims in zip(self.labels, udims): + for tpbLabels, tpbDims in zip(self._labels, udims): for lbl, udim in zip(tpbLabels, tpbDims): self.label_udims[lbl] = udim self.label_dims[lbl] = udim**2 @@ -1162,7 +1162,7 @@ def is_label(x): self.tpb_dims = [] self.tpb_udims = [] - for iTPB, tpbLabels in enumerate(self.labels): + for iTPB, tpbLabels in enumerate(self._labels): float_prod = _np.prod(_np.array([self.label_dims[lbl] for lbl in tpbLabels], 'd')) if float_prod >= float(_sys.maxsize): # too many qubits to hold dimension in an integer self.tpb_dims.append(_np.inf) @@ -1181,11 +1181,11 @@ def is_label(x): self._udim = sum(self.tpb_udims) self._nqubits = self._nqudits = None - if len(self.labels) == 1: + if len(self._labels) == 1: if all([v == 2 for v in self.label_udims.values()]): - self._nqudits = self._nqubits = len(self.labels[0]) # there's a well-defined number of qubits + self._nqudits = self._nqubits = len(self._labels[0]) # there's a well-defined number of qubits elif all([typ == 'Q' for typ in self.label_types.values()]): - self._nqudits = len(self.labels[0]) + self._nqudits = len(self._labels[0]) #This state space is effectively static, so we can precompute the hash for it for performance self._hash = hash((self.tensor_product_blocks_labels, @@ -1210,9 +1210,9 @@ def __setstate__(self, state_dict): def _to_nice_serialization(self): state = super()._to_nice_serialization() - state.update({'labels': self.labels, - 'unitary_space_dimensions': [[self.label_udims[l] for l in tpb] for tpb in self.labels], - 'types': [[self.label_types[l] for l in tpb] for tpb in self.labels] + state.update({'labels': self._labels, + 'unitary_space_dimensions': [[self.label_udims[l] for l in tpb] for tpb in self._labels], + 'types': [[self.label_types[l] for l in tpb] for tpb in self._labels] }) return state @@ -1229,7 +1229,7 @@ def labels(self): ------- tuple of tuples """ - return self.labels + return self._labels @property def udim(self): @@ -1276,7 +1276,7 @@ def num_tensor_product_blocks(self): ------- int """ - return len(self.labels) + return len(self._labels) @property def tensor_product_blocks_labels(self): @@ -1287,7 +1287,7 @@ def tensor_product_blocks_labels(self): ------- tuple of tuples """ - return self.labels + return self._labels @property def tensor_product_blocks_dimensions(self): @@ -1298,7 +1298,7 @@ def tensor_product_blocks_dimensions(self): ------- tuple of tuples """ - return tuple([tuple([self.label_dims[lbl] for lbl in tpb_labels]) for tpb_labels in self.labels]) + return tuple([tuple([self.label_dims[lbl] for lbl in tpb_labels]) for tpb_labels in self._labels]) @property def tensor_product_blocks_udimensions(self): @@ -1309,7 +1309,7 @@ def tensor_product_blocks_udimensions(self): ------- tuple of tuples """ - return tuple([tuple([self.label_udims[lbl] for lbl in tpb_labels]) for tpb_labels in self.labels]) + return tuple([tuple([self.label_udims[lbl] for lbl in tpb_labels]) for tpb_labels in self._labels]) @property def tensor_product_blocks_types(self): @@ -1320,7 +1320,7 @@ def tensor_product_blocks_types(self): ------- tuple of tuples """ - return tuple([tuple([self.label_types[lbl] for lbl in tpb_labels]) for tpb_labels in self.labels]) + return tuple([tuple([self.label_types[lbl] for lbl in tpb_labels]) for tpb_labels in self._labels]) def label_dimension(self, label): """ @@ -1383,10 +1383,10 @@ def label_type(self, label): return self.label_types[label] def __str__(self): - if len(self.labels) == 0: return "ZeroDimSpace" + if len(self._labels) == 0: return "ZeroDimSpace" return ' + '.join( ['*'.join(["%s(%d%s)" % (lbl, self.label_dims[lbl], 'c' if (self.label_types[lbl] == 'C') else '') - for lbl in tpb]) for tpb in self.labels]) + for lbl in tpb]) for tpb in self._labels]) def default_space_for_dim(dim): """ From 4882e403f69d11a597e742b9bd732b91e3bc2803 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 3 Jan 2025 21:07:23 -0700 Subject: [PATCH 049/102] Error generator basis efficiency Significantly improve the performance of error generator basis construction. This is achieved by creating a new bulk error generator matrix construction function for use by the error generator basis class. This allows reusing Basis objects which drastically reduces runtime for higher dimensional systems. --- pygsti/baseobjs/errorgenbasis.py | 40 +++--- pygsti/tools/optools.py | 209 ++++++++++++++++++++++++++----- 2 files changed, 203 insertions(+), 46 deletions(-) diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 99ec98a64..c349de51c 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -150,10 +150,12 @@ def elemgen_dual_matrices(self): in this basis, returned in the same order as they appear in `labels`. """ if self._cached_dual_matrices is None: - self._cached_dual_matrices = tuple([_ot.create_elementary_errorgen_nqudit_dual( - elemgen_label.errorgen_type, elemgen_label.basis_element_labels, - self._basis_1q, normalize=False, sparse=False, - tensorprod_basis=True) for elemgen_label in self._labels]) + elemgen_types = [elemgen_label.errorgen_type for elemgen_label in self._labels] + elemgen_labels = [elemgen_label.basis_element_labels for elemgen_label in self._labels] + self._cached_dual_matrices = tuple(_ot.bulk_create_elementary_errorgen_nqudit_dual( + elemgen_types, elemgen_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True)) return self._cached_dual_matrices @property @@ -164,10 +166,12 @@ def elemgen_matrices(self): in this basis, returned in the same order as they appear in `labels`. """ if self._cached_matrices is None: - self._cached_matrices = tuple([_ot.create_elementary_errorgen_nqudit( - elemgen_label.errorgen_type, elemgen_label.basis_element_labels, - self._basis_1q, normalize=False, sparse=False, - tensorprod_basis=True) for elemgen_label in self._labels]) + elemgen_types = [elemgen_label.errorgen_type for elemgen_label in self._labels] + elemgen_labels = [elemgen_label.basis_element_labels for elemgen_label in self._labels] + self._cached_matrices = tuple(_ot.bulk_create_elementary_errorgen_nqudit( + elemgen_types, elemgen_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True)) return self._cached_matrices @property @@ -668,10 +672,12 @@ def elemgen_dual_matrices(self): in this basis, returned in the same order as they appear in `labels`. """ if self._cached_dual_matrices is None: - self._cached_dual_matrices = tuple([_ot.create_elementary_errorgen_nqudit_dual( - elemgen_label.errorgen_type, elemgen_label.basis_element_labels, - self._basis_1q, normalize=False, sparse=False, - tensorprod_basis=True) for elemgen_label in self.labels]) + elemgen_types = [elemgen_label.errorgen_type for elemgen_label in self.labels] + elemgen_labels = [elemgen_label.basis_element_labels for elemgen_label in self.labels] + self._cached_dual_matrices = tuple(_ot.bulk_create_elementary_errorgen_nqudit_dual( + elemgen_types, elemgen_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True)) return self._cached_dual_matrices @property @@ -682,10 +688,12 @@ def elemgen_matrices(self): in this basis, returned in the same order as they appear in `labels`. """ if self._cached_matrices is None: - self._cached_matrices = tuple([_ot.create_elementary_errorgen_nqudit( - elemgen_label.errorgen_type, elemgen_label.basis_element_labels, - self._basis_1q, normalize=False, sparse=False, - tensorprod_basis=True) for elemgen_label in self.labels]) + elemgen_types = [elemgen_label.errorgen_type for elemgen_label in self.labels] + elemgen_labels = [elemgen_label.basis_element_labels for elemgen_label in self.labels] + self._cached_matrices = tuple(_ot.bulk_create_elementary_errorgen_nqudit( + elemgen_types, elemgen_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True)) return self._cached_matrices @property diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index e194c8f5b..f3f69f0a9 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -1835,55 +1835,204 @@ def _assert_shape(ar, shape, sparse=False): def create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize=False, sparse=False, tensorprod_basis=False): """ - TODO: docstring - labels can be, e.g. ('H', 'XX') and basis should be a 1-qubit basis w/single-char labels + Construct the elementary error generator matrix, either in a dense or sparse representation, + corresponding to the specified type and basis element subscripts. + + Parameters + ---------- + typ : str + String specifying the type of error generator to be constructed. Can be either 'H', 'S', 'C' or 'A'. + + basis_element_labels : list or tuple of str + A list or tuple of strings corresponding to the basis element labels subscripting the desired elementary + error generators. If `typ` is 'H' or 'S' this should be length-1, and for 'C' and 'A' length-2. + + basis_1q : `Basis` + A one-qubit `Basis` object used in the construction of the elementary error generator. + + normalize : bool, optional (default False) + If True the elementary error generator is normalized to have unit Frobenius norm. + + sparse : bool, optional (default False) + If True the elementary error generator is returned as a sparse array. + + tensorprod_basis : bool, optional (default False) + If True, the returned array is given in a basis consisting of the appropriate tensor product of + single-qubit standard bases, as opposed to the N=2^n dimensional standard basis (the values are the same + but this may result in some reordering of entries). + + Returns + ------- + np.ndarray or Scipy CSR matrix """ - return _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, + eglist = _create_elementary_errorgen_nqudit([typ], [basis_element_labels], basis_1q, normalize, sparse, tensorprod_basis, create_dual=False) - + return eglist[0] def create_elementary_errorgen_nqudit_dual(typ, basis_element_labels, basis_1q, normalize=False, sparse=False, tensorprod_basis=False): """ - TODO: docstring - labels can be, e.g. ('H', 'XX') and basis should be a 1-qubit basis w/single-char labels + Construct the dual elementary error generator matrix, either in a dense or sparse representation, + corresponding to the specified type and basis element subscripts. + + Parameters + ---------- + typ : str + String specifying the type of dual error generator to be constructed. Can be either 'H', 'S', 'C' or 'A'. + + basis_element_labels : list or tuple of str + A list or tuple of strings corresponding to the basis element labels subscripting the desired dual elementary + error generators. If `typ` is 'H' or 'S' this should be length-1, and for 'C' and 'A' length-2. + + basis_1q : `Basis` + A one-qubit `Basis` object used in the construction of the dual elementary error generator. + + normalize : bool, optional (default False) + If True the dual elementary error generator is normalized to have unit Frobenius norm. + + sparse : bool, optional (default False) + If True the dual elementary error generator is returned as a sparse array. + + tensorprod_basis : bool, optional (default False) + If True, the returned array is given in a basis consisting of the appropriate tensor product of + single-qubit standard bases, as opposed to the N=2^n dimensional standard basis (the values are the same + but this may result in some reordering of entries). + + Returns + ------- + np.ndarray or Scipy CSR matrix """ - return _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, + eglist = _create_elementary_errorgen_nqudit([typ], [basis_element_labels], basis_1q, normalize, sparse, tensorprod_basis, create_dual=True) + return eglist[0] + +def bulk_create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize=False, + sparse=False, tensorprod_basis=False): + """ + Construct the elementary error generator matrices, either in a dense or sparse representation, + corresponding to the specified types and list of basis element subscripts. + + Parameters + ---------- + typ : list of str + List of strings specifying the types of error generator to be constructed. Entries can be 'H', 'S', 'C' or 'A'. + + basis_element_labels : list of lists or tuples of str + A list containing sublists or subtuple of strings corresponding to the basis element labels subscripting the desired elementary + error generators. For each sublist, if the corresponding entry of `typ` is 'H' or 'S' this should be length-1, + and for 'C' and 'A' length-2. + + basis_1q : `Basis` + A one-qubit `Basis` object used in the construction of the elementary error generators. + normalize : bool, optional (default False) + If True the elementary error generators are normalized to have unit Frobenius norm. + + sparse : bool, optional (default False) + If True the elementary error generators are returned as a sparse array. + + tensorprod_basis : bool, optional (default False) + If True, the returned arrays are given in a basis consisting of the appropriate tensor product of + single-qubit standard bases, as opposed to the N=2^n dimensional standard basis (the values are the same + but this may result in some reordering of entries). + + Returns + ------- + list of np.ndarray or Scipy CSR matrix + """ + + return _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize, + sparse, tensorprod_basis, create_dual=False) + + +def bulk_create_elementary_errorgen_nqudit_dual(typ, basis_element_labels, basis_1q, normalize=False, + sparse=False, tensorprod_basis=False): + """ + Construct the dual elementary error generator matrices, either in a dense or sparse representation, + corresponding to the specified types and list of basis element subscripts. + + Parameters + ---------- + typ : list of str + List of strings specifying the types of dual error generators to be constructed. Entries can be 'H', 'S', 'C' or 'A'. + + basis_element_labels : list of lists or tuples of str + A list containing sublists or subtuple of strings corresponding to the basis element labels subscripting the desired dual elementary + error generators. For each sublist, if the corresponding entry of `typ` is 'H' or 'S' this should be length-1, + and for 'C' and 'A' length-2. + + basis_1q : `Basis` + A one-qubit `Basis` object used in the construction of the dual elementary error generators. + + normalize : bool, optional (default False) + If True the dual elementary error generators are normalized to have unit Frobenius norm. + + sparse : bool, optional (default False) + If True the dual elementary error generators are returned as a sparse array. + + tensorprod_basis : bool, optional (default False) + If True, the returned arrays are given in a basis consisting of the appropriate tensor product of + single-qubit standard bases, as opposed to the N=2^n dimensional standard basis (the values are the same + but this may result in some reordering of entries). + + Returns + ------- + list of np.ndarray or Scipy CSR matrix + """ + + return _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize, + sparse, tensorprod_basis, create_dual=True) def _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize=False, sparse=False, tensorprod_basis=False, create_dual=False): - create_fn = _lt.create_elementary_errorgen_dual if create_dual else _lt.create_elementary_errorgen - if typ in 'HS': - B = _functools.reduce(_np.kron, [basis_1q[bel] for bel in basis_element_labels[0]]) - ret = create_fn(typ, B, sparse=sparse) # in std basis - elif typ in 'CA': - B = _functools.reduce(_np.kron, [basis_1q[bel] for bel in basis_element_labels[0]]) - C = _functools.reduce(_np.kron, [basis_1q[bel] for bel in basis_element_labels[1]]) - ret = create_fn(typ, B, C, sparse=sparse) # in std basis - else: - raise ValueError("Invalid elementary error generator type: %s" % str(typ)) - - if normalize: - normfn = _spsl.norm if sparse else _np.linalg.norm - norm = normfn(ret) # same as norm(term.flat) - if not _np.isclose(norm, 0): - ret /= norm # normalize projector - assert(_np.isclose(normfn(ret), 1.0)) + #See docstrings for `bulk_create_elementary_errorgen_nqudit` and `bulk_create_elementary_errorgen_nqudit_dual`. + create_fn = _lt.create_elementary_errorgen_dual if create_dual else _lt.create_elementary_errorgen + normfn = _spsl.norm if sparse else _np.linalg.norm + if tensorprod_basis: # convert from "flat" std basis to tensorprod of std bases (same elements but in # a different order). Important if want to also construct ops by kroneckering the # returned maps with, e.g., identities - nQubits = int(round(_np.log(ret.shape[0]) / _np.log(4))); assert(ret.shape[0] == 4**nQubits) - current_basis = _Basis.cast('std', ret.shape[0]) - tensorprod_basis = _Basis.cast('std', [(4,) * nQubits]) - ret = _bt.change_basis(ret, current_basis, tensorprod_basis) - - return ret + orig_bases = dict() #keys will be numbers of qubits, values basis objects. + tensorprod_bases = dict() + + eglist = [] + for egtyp, bels in zip(typ, basis_element_labels): + if egtyp in 'HS': + B = _functools.reduce(_np.kron, [basis_1q[bel] for bel in bels[0]]) + ret = create_fn(egtyp, B, sparse=sparse) # in std basis + elif egtyp in 'CA': + B = _functools.reduce(_np.kron, [basis_1q[bel] for bel in bels[0]]) + C = _functools.reduce(_np.kron, [basis_1q[bel] for bel in bels[1]]) + ret = create_fn(egtyp, B, C, sparse=sparse) # in std basis + else: + raise ValueError("Invalid elementary error generator type: %s" % str(typ)) + + if normalize: + norm = normfn(ret) # same as norm(term.flat) + if not _np.isclose(norm, 0): + ret /= norm # normalize projector + assert(_np.isclose(normfn(ret), 1.0)) + + if tensorprod_basis: + num_qudits = int(round(_np.log(ret.shape[0]) / _np.log(basis_1q.dim))); + assert(ret.shape[0] == basis_1q.dim**num_qudits) + current_basis = orig_bases.get(num_qudits, None) + tensorprod_basis = tensorprod_bases.get(num_qudits, None) + if current_basis is None: + current_basis = _Basis.cast('std', basis_1q.dim**num_qudits) + orig_bases[num_qudits] = current_basis + if tensorprod_basis is None: + tensorprod_basis = _Basis.cast('std', [(basis_1q.dim,)*num_qudits]) + tensorprod_bases[num_qudits] = tensorprod_basis + + ret = _bt.change_basis(ret, current_basis, tensorprod_basis) + eglist.append(ret) + + return eglist -#TODO: replace two_qubit_gate, one_qubit_gate, unitary_to_pauligate_* with -# calls to this one and unitary_to_std_processmx def rotation_gate_mx(r, mx_basis="gm"): """ Construct a rotation operation matrix. From 7733720f330680546084c7fb72b906acc92177a6 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 5 Jan 2025 15:18:02 -0700 Subject: [PATCH 050/102] Error generator matrix construction performance Notably improves performance of error generator matrix construction: - Improves performance of Basis class by precomputing (and safeguarding thereafter) the values of the dimension and other properties previously recomputed on the fly. - Add LRU caching of basis transformation matrices to improve basis change performance. - Reimplement the error generator matrix construction to directly leverage the fact that we always perform the construction in the standard basis. - Add in specialized function for the pauli basis which enable significant simplifications. --- pygsti/baseobjs/basis.py | 164 +++++++++-------- pygsti/baseobjs/basisconstructors.py | 2 +- pygsti/tools/basistools.py | 4 +- pygsti/tools/lindbladtools.py | 258 ++++++++++++++++++++++++--- pygsti/tools/optools.py | 15 +- 5 files changed, 339 insertions(+), 104 deletions(-) diff --git a/pygsti/baseobjs/basis.py b/pygsti/baseobjs/basis.py index 2505cf06e..42c4950b9 100644 --- a/pygsti/baseobjs/basis.py +++ b/pygsti/baseobjs/basis.py @@ -475,7 +475,8 @@ def is_equivalent(self, other, sparseness_must_match=True): return _np.array_equal(self.elements, other.elements) else: return _np.array_equal(self.elements, other) - + + @lru_cache(maxsize=4) def create_transform_matrix(self, to_basis): """ Get the matrix that transforms a vector from this basis to `to_basis`. @@ -505,6 +506,7 @@ def create_transform_matrix(self, to_basis): else: return _np.dot(to_basis.from_std_transform_matrix, self.to_std_transform_matrix) + @lru_cache(maxsize=4) def reverse_transform_matrix(self, from_basis): """ Get the matrix that transforms a vector from `from_basis` to this basis. @@ -1097,6 +1099,11 @@ def __init__(self, name, dim_or_statespace, sparse=False): super(BuiltinBasis, self).__init__(name, longname, real, sparse) + #precompute some properties + self._size, self._dim, self._elshape = _basis_constructor_dict[self.name].sizes(dim=self.state_space.dim, sparse=self.sparse) + #Check that sparse is True only when elements are *matrices* + assert(not self.sparse or len(self._elshape) == 2), "`sparse == True` is only allowed for *matrix*-valued bases!" + def _to_nice_serialization(self): state = super()._to_nice_serialization() state.update({'name': self.name, @@ -1117,16 +1124,14 @@ def dim(self): spans. Equivalently, the length of the `vector_elements` of the basis. """ - size, dim, elshape = _basis_constructor_dict[self.name].sizes(dim=self.state_space.dim, sparse=self.sparse) - return dim + return self._dim @property def size(self): """ The number of elements (or vector-elements) in the basis. """ - size, dim, elshape = _basis_constructor_dict[self.name].sizes(dim=self.state_space.dim, sparse=self.sparse) - return size + return self._size @property def elshape(self): @@ -1136,12 +1141,7 @@ def elshape(self): Note that *vector elements* always have shape `(dim,)` (or `(dim,1)` in the sparse case). """ - size, dim, elshape = _basis_constructor_dict[self.name].sizes(dim=self.state_space.dim, sparse=self.sparse) - - #Check that sparse is True only when elements are *matrices* - assert(not self.sparse or len(elshape) == 2), "`sparse == True` is only allowed for *matrix*-valued bases!" - - return elshape + return self._elshape @property def first_element_is_identity(self): @@ -1248,26 +1248,29 @@ def __init__(self, component_bases, name=None, longname=None): ''' assert(len(component_bases) > 0), "Must supply at least one component basis" - self.component_bases = [] + self._component_bases = [] self._vector_elements = None # vectorized elements: 1D arrays for compbasis in component_bases: if isinstance(compbasis, Basis): - self.component_bases.append(compbasis) + self._component_bases.append(compbasis) else: #compbasis can be a list/tuple of args to Basis.cast, e.g. ('pp',2) - self.component_bases.append(Basis.cast(*compbasis)) + self._component_bases.append(Basis.cast(*compbasis)) if name is None: - name = "+".join([c.name for c in self.component_bases]) + name = "+".join([c.name for c in self._component_bases]) if longname is None: longname = "Direct-sum basis with components " + ", ".join( - [c.name for c in self.component_bases]) + [c.name for c in self._component_bases]) + + real = all([c.real for c in self._component_bases]) + sparse = all([c.sparse for c in self._component_bases]) + assert(all([c.real == real for c in self._component_bases])), "Inconsistent `real` value among component bases!" + assert(all([c.sparse == sparse for c in self._component_bases])), "Inconsistent sparsity among component bases!" - real = all([c.real for c in self.component_bases]) - sparse = all([c.sparse for c in self.component_bases]) - assert(all([c.real == real for c in self.component_bases])), "Inconsistent `real` value among component bases!" - assert(all([c.sparse == sparse for c in self.component_bases])), "Inconsistent sparsity among component bases!" + #precompute various basis properties. can add more as they are deemed frequently accessed. + self._dim = sum([c.dim for c in self._component_bases]) #Init everything but elements and labels & their number/size super(DirectSumBasis, self).__init__(name, longname, real, sparse) @@ -1276,7 +1279,7 @@ def _to_nice_serialization(self): state = super()._to_nice_serialization() state.update({'name': self.name, 'longname': self.longname, - 'component_bases': [b.to_nice_serialization() for b in self.component_bases] + 'component_bases': [b.to_nice_serialization() for b in self._component_bases] }) return state @@ -1285,6 +1288,11 @@ def _from_nice_serialization(cls, state): component_bases = [Basis.from_nice_serialization(b) for b in state['component_bases']] return cls(component_bases, state['name'], state['longname']) + @property + def component_bases(self): + """A list of the component bases.""" + return self._component_bases + @property def dim(self): """ @@ -1292,14 +1300,14 @@ def dim(self): spans. Equivalently, the length of the `vector_elements` of the basis. """ - return sum([c.dim for c in self.component_bases]) + return self._dim @property def size(self): """ The number of elements (or vector-elements) in the basis. """ - return sum([c.size for c in self.component_bases]) + return sum([c.size for c in self._component_bases]) @property def elshape(self): @@ -1309,13 +1317,13 @@ def elshape(self): Note that *vector elements* always have shape `(dim,)` (or `(dim,1)` in the sparse case). """ - elndim = len(self.component_bases[0].elshape) - assert(all([len(c.elshape) == elndim for c in self.component_bases]) + elndim = len(self._component_bases[0].elshape) + assert(all([len(c.elshape) == elndim for c in self._component_bases]) ), "Inconsistent element ndims among component bases!" - return tuple([sum([c.elshape[k] for c in self.component_bases]) for k in range(elndim)]) + return tuple([sum([c.elshape[k] for c in self._component_bases]) for k in range(elndim)]) def __hash__(self): - return hash(tuple((hash(comp) for comp in self.component_bases))) + return hash((self.name,)+tuple((hash(comp) for comp in self._component_bases))) def _lazy_build_vector_elements(self): if self.sparse: @@ -1324,7 +1332,7 @@ def _lazy_build_vector_elements(self): compMxs = _np.zeros((self.size, self.dim), 'complex') i, start = 0, 0 - for compbasis in self.component_bases: + for compbasis in self._component_bases: for lbl, vel in zip(compbasis.labels, compbasis.vector_elements): assert(_sps.issparse(vel) == self.sparse), "Inconsistent sparsity!" if self.sparse: @@ -1346,7 +1354,7 @@ def _lazy_build_elements(self): vstart = 0 if self.sparse: # build block-diagonal sparse mx diagBlks = [] - for compbasis in self.component_bases: + for compbasis in self._component_bases: cs = compbasis.elshape comp_vel = vel[vstart:vstart + compbasis.dim] diagBlks.append(comp_vel.reshape(cs)) @@ -1356,7 +1364,7 @@ def _lazy_build_elements(self): else: start = [0] * self.elndim el = _np.zeros(self.elshape, 'complex') - for compbasis in self.component_bases: + for compbasis in self._component_bases: cs = compbasis.elshape comp_vel = vel[vstart:vstart + compbasis.dim] slc = tuple([slice(start[k], start[k] + cs[k]) for k in range(self.elndim)]) @@ -1370,12 +1378,12 @@ def _lazy_build_elements(self): def _lazy_build_labels(self): self._labels = [] - for i, compbasis in enumerate(self.component_bases): + for i, compbasis in enumerate(self._component_bases): for lbl in compbasis.labels: self._labels.append(lbl + "/%d" % i) def _copy_with_toggled_sparsity(self): - return DirectSumBasis([cb._copy_with_toggled_sparsity() for cb in self.component_bases], + return DirectSumBasis([cb._copy_with_toggled_sparsity() for cb in self._component_bases], self.name, self.longname) def is_equivalent(self, other, sparseness_must_match=True): @@ -1397,9 +1405,9 @@ def is_equivalent(self, other, sparseness_must_match=True): """ otherIsBasis = isinstance(other, DirectSumBasis) if not otherIsBasis: return False # can't be equal to a non-DirectSumBasis - if len(self.component_bases) != len(other.component_bases): return False + if len(self._component_bases) != len(other.component_bases): return False return all([c1.is_equivalent(c2, sparseness_must_match) - for (c1, c2) in zip(self.component_bases, other.component_bases)]) + for (c1, c2) in zip(self._component_bases, other.component_bases)]) @property def vector_elements(self): @@ -1490,7 +1498,7 @@ def create_equivalent(self, builtin_basis_name): ------- DirectSumBasis """ - equiv_components = [c.create_equivalent(builtin_basis_name) for c in self.component_bases] + equiv_components = [c.create_equivalent(builtin_basis_name) for c in self._component_bases] return DirectSumBasis(equiv_components) def create_simple_equivalent(self, builtin_basis_name=None): @@ -1518,9 +1526,9 @@ def create_simple_equivalent(self, builtin_basis_name=None): """ if builtin_basis_name is None: builtin_basis_name = self.name # default - if len(self.component_bases) > 0: - first_comp_name = self.component_bases[0].name - if all([c.name == first_comp_name for c in self.component_bases]): + if len(self._component_bases) > 0: + first_comp_name = self._component_bases[0].name + if all([c.name == first_comp_name for c in self._component_bases]): builtin_basis_name = first_comp_name # if all components have the same name return BuiltinBasis(builtin_basis_name, self.elsize, sparse=self.sparse) # Note: changes dimension @@ -1575,24 +1583,36 @@ def __init__(self, component_bases, name=None, longname=None): ''' assert(len(component_bases) > 0), "Must supply at least one component basis" - self.component_bases = [] + self._component_bases = [] for compbasis in component_bases: if isinstance(compbasis, Basis): - self.component_bases.append(compbasis) + self._component_bases.append(compbasis) else: #compbasis can be a list/tuple of args to Basis.cast, e.g. ('pp',2) - self.component_bases.append(Basis.cast(*compbasis)) + self._component_bases.append(Basis.cast(*compbasis)) if name is None: - name = "*".join([c.name for c in self.component_bases]) + name = "*".join([c.name for c in self._component_bases]) if longname is None: longname = "Tensor-product basis with components " + ", ".join( - [c.name for c in self.component_bases]) + [c.name for c in self._component_bases]) - real = all([c.real for c in self.component_bases]) - sparse = all([c.sparse for c in self.component_bases]) - #assert(all([c.real == real for c in self.component_bases])), "Inconsistent `real` value among component bases!" - assert(all([c.sparse == sparse for c in self.component_bases])), "Inconsistent sparsity among component bases!" + real = all([c.real for c in self._component_bases]) + sparse = all([c.sparse for c in self._component_bases]) + #assert(all([c.real == real for c in self._component_bases])), "Inconsistent `real` value among component bases!" + assert(all([c.sparse == sparse for c in self._component_bases])), "Inconsistent sparsity among component bases!" + + #precompute certain properties. Can add more as deemed frequently accessed. + self._dim = int(_np.prod([c.dim for c in self._component_bases])) + + #NOTE: this is actually to restrictive -- what we need is a test/flag for whether the elements of a + # basis are in their "natrual" representation where it makes sense to take tensor products. For + # example, a direct-sum basis may hold elements in a compact way that violate this... but I'm not sure if they + # do and this needs to be checked. For now, we could just disable this overly-restrictive assert: + assert(all([c.is_simple() for c in self._component_bases])), \ + "Components of a tensor product basis must be *simple* (have vector-dimension == size of elements)" + # because we use the natural representation to take tensor (kronecker) products. + # Note: this assertion also means dim == product(component_elsizes) == elsize, so basis is *simple* super(TensorProdBasis, self).__init__(name, longname, real, sparse) @@ -1600,7 +1620,7 @@ def _to_nice_serialization(self): state = super()._to_nice_serialization() state.update({'name': self.name, 'longname': self.longname, - 'component_bases': [b.to_nice_serialization() for b in self.component_bases] + 'component_bases': [b.to_nice_serialization() for b in self._component_bases] }) return state @@ -1609,6 +1629,11 @@ def _from_nice_serialization(cls, state): component_bases = [Basis.from_nice_serialization(b) for b in state['component_bases']] return cls(component_bases, state['name'], state['longname']) + @property + def component_bases(self): + """A list of the component bases.""" + return self._component_bases + @property def dim(self): """ @@ -1616,25 +1641,14 @@ def dim(self): spans. Equivalently, the length of the `vector_elements` of the basis. """ - dim = int(_np.prod([c.dim for c in self.component_bases])) - - #NOTE: this is actually to restrictive -- what we need is a test/flag for whether the elements of a - # basis are in their "natrual" representation where it makes sense to take tensor products. For - # example, a direct-sum basis may hold elements in a compact way that violate this... but I'm not sure if they - # do and this needs to be checked. For now, we could just disable this overly-restrictive assert: - assert(all([c.is_simple() for c in self.component_bases])), \ - "Components of a tensor product basis must be *simple* (have vector-dimension == size of elements)" - # because we use the natural representation to take tensor (kronecker) products. - # Note: this assertion also means dim == product(component_elsizes) == elsize, so basis is *simple* - - return dim + return self._dim @property def size(self): """ The number of elements (or vector-elements) in the basis. """ - return int(_np.prod([c.size for c in self.component_bases])) + return int(_np.prod([c.size for c in self._component_bases])) @property def elshape(self): @@ -1644,16 +1658,16 @@ def elshape(self): Note that *vector elements* always have shape `(dim,)` (or `(dim,1)` in the sparse case). """ - elndim = max([c.elndim for c in self.component_bases]) + elndim = max([c.elndim for c in self._component_bases]) elshape = [1] * elndim - for c in self.component_bases: + for c in self._component_bases: off = elndim - c.elndim for k, d in enumerate(c.elshape): elshape[k + off] *= d return tuple(elshape) def __hash__(self): - return hash(tuple((hash(comp) for comp in self.component_bases))) + return hash((self.name,) + tuple((hash(comp) for comp in self._component_bases))) def _lazy_build_elements(self): #LAZY building of elements (in case we never need them) @@ -1665,7 +1679,7 @@ def _lazy_build_elements(self): #Take kronecker product of *natural* reps of component-basis elements # then reshape to vectors at the end. This requires that the vector- # dimension of the component spaces equals the "natural space" dimension. - comp_els = [c.elements for c in self.component_bases] + comp_els = [c.elements for c in self._component_bases] for i, factors in enumerate(_itertools.product(*comp_els)): if self.sparse: M = _sps.identity(1, 'complex', 'csr') @@ -1681,12 +1695,12 @@ def _lazy_build_elements(self): def _lazy_build_labels(self): self._labels = [] - comp_lbls = [c.labels for c in self.component_bases] + comp_lbls = [c.labels for c in self._component_bases] for i, factor_lbls in enumerate(_itertools.product(*comp_lbls)): self._labels.append(''.join(factor_lbls)) def _copy_with_toggled_sparsity(self): - return TensorProdBasis([cb._copy_with_toggled_sparsity() for cb in self.component_bases], + return TensorProdBasis([cb._copy_with_toggled_sparsity() for cb in self._component_bases], self.name, self.longname) def is_equivalent(self, other, sparseness_must_match=True): @@ -1708,10 +1722,10 @@ def is_equivalent(self, other, sparseness_must_match=True): """ otherIsBasis = isinstance(other, TensorProdBasis) if not otherIsBasis: return False # can't be equal to a non-DirectSumBasis - if len(self.component_bases) != len(other.component_bases): return False + if len(self._component_bases) != len(other.component_bases): return False if self.sparse != other.sparse: return False return all([c1.is_equivalent(c2, sparseness_must_match) - for (c1, c2) in zip(self.component_bases, other.component_bases)]) + for (c1, c2) in zip(self._component_bases, other.component_bases)]) def create_equivalent(self, builtin_basis_name): """ @@ -1735,11 +1749,11 @@ def create_equivalent(self, builtin_basis_name): # This is a part of what woudl go into that... but it's not complete. # if builtin_basis_name == 'std': # special case when we change classical components to 'cl' # equiv_components = [] - # for c in self.component_bases: + # for c in self._component_bases: # if c.elndim == 1: equiv_components.append(c.create_equivalent('cl')) # else: equiv_components.append(c.create_equivalent('std')) # else: - equiv_components = [c.create_equivalent(builtin_basis_name) for c in self.component_bases] + equiv_components = [c.create_equivalent(builtin_basis_name) for c in self._component_bases] return TensorProdBasis(equiv_components) def create_simple_equivalent(self, builtin_basis_name=None): @@ -1767,7 +1781,7 @@ def create_simple_equivalent(self, builtin_basis_name=None): """ #if builtin_basis_name == 'std': # special case when we change classical components to 'clmx' # equiv_components = [] - # for c in self.component_bases: + # for c in self._component_bases: # if c.elndim == 1: equiv_components.append(BuiltinBasis('clmx', c.dim**2, sparse=self.sparse)) # # c.create_simple_equivalent('clmx')) # else: equiv_components.append(c.create_simple_equivalent('std')) @@ -1776,9 +1790,9 @@ def create_simple_equivalent(self, builtin_basis_name=None): if builtin_basis_name is None: builtin_basis_name = self.name # default - if len(self.component_bases) > 0: - first_comp_name = self.component_bases[0].name - if all([c.name == first_comp_name for c in self.component_bases]): + if len(self._component_bases) > 0: + first_comp_name = self._component_bases[0].name + if all([c.name == first_comp_name for c in self._component_bases]): builtin_basis_name = first_comp_name # if all components have the same name return BuiltinBasis(builtin_basis_name, self.elsize, sparse=self.sparse) diff --git a/pygsti/baseobjs/basisconstructors.py b/pygsti/baseobjs/basisconstructors.py index 5fa8f3a19..456977d24 100644 --- a/pygsti/baseobjs/basisconstructors.py +++ b/pygsti/baseobjs/basisconstructors.py @@ -442,7 +442,7 @@ def sizes(self, dim, sparse): def std_matrices(matrix_dim): """ Get the elements of the matrix unit, or "standard", basis of matrix-dimension `matrix_dim`. - The matrices are ordered so that the row index changes the fastest. + The matrices are ordered so that the column index changes the fastest. Constructs the standard basis spanning the density-matrix space given by `matrix_dim` x `matrix_dim` matrices. diff --git a/pygsti/tools/basistools.py b/pygsti/tools/basistools.py index 835fdd264..f15ab137c 100644 --- a/pygsti/tools/basistools.py +++ b/pygsti/tools/basistools.py @@ -10,7 +10,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** -from functools import partial +from functools import partial, lru_cache import numpy as _np @@ -18,7 +18,7 @@ # from ..baseobjs.basis import Basis, BuiltinBasis, DirectSumBasis from pygsti.baseobjs import basis as _basis - +@lru_cache(maxsize=1) def basis_matrices(name_or_basis, dim, sparse=False): """ Get the elements of the specifed basis-type which spans the density-matrix space given by `dim`. diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index 30d953800..fb9bc12d1 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -59,6 +59,14 @@ def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_ sparse : bool, optional Whether to construct a sparse or dense (the default) matrix. + normalization_factor : str or float, optional (default 'auto') + String or float specifying the normalization factor to apply. If + a string the options are 'auto' and 'auto_return', which both use + the corresponding (primal) elementary error generator to calculate + this automatically and only differ in whether they return this + normalization factor. If a float, the reciprocal of the input value + is used directly. + Returns ------- ndarray or Scipy CSR matrix @@ -77,17 +85,21 @@ def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_ "Wrong number of basis elements provided for %s-type elementary errorgen!" % typ # Loop through the standard basis as all possible input density matrices - for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx - # Only difference between H/S/C/A is how they transform input density matrices - if typ == 'H': - rho1 = -1j * (p @ rho0 - rho0 @ p) # -1j / (2 * d2) * - elif typ == 'S': - rho1 = (p @ rho0 @ pdag) # 1 / d2 * - elif typ == 'C': - rho1 = (p @ rho0 @ qdag + q @ rho0 @ pdag) # 1 / (2 * d2) * - elif typ == 'A': - rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag) # 1j / (2 * d2) - elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() + for i in range(d): # rho0 == input density mx + for j in range(d): + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elif typ == 'S': + rho1 = p[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + elif typ == 'C': + rho1 = p[:,i].reshape((d,1))@qdag[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + elif typ == 'A': + rho1 = 1j*(p[:,i].reshape((d,1))@ qdag[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d))) + + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() return_normalization = bool(normalization_factor == 'auto_return') if normalization_factor in ('auto', 'auto_return'): @@ -97,11 +109,105 @@ def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_ else: normalization_factor = _np.vdot(elem_errgen.flatten(), primal.flatten()) elem_errgen *= _np.real_if_close(1 / normalization_factor).item() # item() -> scalar - if sparse: elem_errgen = elem_errgen.tocsr() return (elem_errgen, normalization_factor) if return_normalization else elem_errgen +#TODO: Should be able to leverage the structure of the paulis as generalized permutation +#matrices to avoid explicitly doing outer products +def create_elementary_errorgen_dual_pauli(typ, p, q=None, sparse=False): + """ + Construct a "dual" elementary error generator matrix in the "standard" (matrix-unit) basis. + Specialized to p and q being elements of the pauli basis. + + The elementary error generator that is dual to the one computed by calling + :func:`create_elementary_errorgen` with the same argument. This dual element + can be used to find the coefficient of the original, or "primal" elementary generator. + For example, if `A = sum(c_i * E_i)`, where `E_i` are the elementary error generators given + by :func:`create_elementary_errorgen`), then `c_i = dot(D_i.conj(), A)` where `D_i` + is the dual to `E_i`. + + There are four different types of dual elementary error generators: 'H' (Hamiltonian), + 'S' (stochastic), 'C' (correlation), and 'A' (active). See arxiv:2103.01928. + Each type transforms an input density matrix differently. The action of an elementary + error generator `L` on an input density matrix `rho` is given by: + Hamiltonian: `L(rho) = -1j/(2d^2) * [ p, rho ]` + Stochastic: `L(rho) = 1/(d^2) p * rho * p` + Correlation: `L(rho) = 1/(2d^2) ( p * rho * q + q * rho * p)` + Active: `L(rho) = 1j/(2d^2) ( p * rho * q - q * rho * p)` + + where `d` is the dimension of the Hilbert space, e.g. 2 for a single qubit. Square + brackets denotes the commutator and curly brackets the anticommutator. + `L` is returned as a superoperator matrix that acts on vectorized density matrices. + + Parameters + ---------- + typ : {'H','S','C','A'} + The type of dual error generator to construct. + + p : numpy.ndarray + d-dimensional basis matrix. + + q : numpy.ndarray, optional + d-dimensional basis matrix; must be non-None if and only if `typ` is `'C'` or `'A'`. + + sparse : bool, optional + Whether to construct a sparse or dense (the default) matrix. + + Returns + ------- + ndarray or Scipy CSR matrix + """ + d = p.shape[0]; d2 = d**2 + + if sparse: + elem_errgen = _sps.lil_matrix((d2, d2), dtype=p.dtype) + else: + elem_errgen = _np.empty((d2, d2), dtype=p.dtype) + + assert(typ in ('H', 'S', 'C', 'A')), "`typ` must be one of 'H', 'S', 'C', or 'A'" + assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \ + "Wrong number of basis elements provided for %s-type elementary errorgen!" % typ + + if typ == 'H': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'S': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'C': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@q[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = 1j*(p[:,i].reshape((d,1))@ q[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@p[j,:].reshape((1,d))) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + + if typ in 'HCA': + normalization_factor = 1/(2*d2) + else: + normalization_factor = 1/d2 + + elem_errgen *= normalization_factor + if sparse: elem_errgen = elem_errgen.tocsr() + return elem_errgen + + +#TODO: The construction can be made a bit more efficient if we know we will be constructing multiple +#error generators with overlapping indices by reusing intermediate results. def create_elementary_errorgen(typ, p, q=None, sparse=False): """ Construct an elementary error generator as a matrix in the "standard" (matrix-unit) basis. @@ -137,7 +243,8 @@ def create_elementary_errorgen(typ, p, q=None, sparse=False): ------- ndarray or Scipy CSR matrix """ - d = p.shape[0]; d2 = d**2 + d = p.shape[0] + d2 = d**2 if sparse: elem_errgen = _sps.lil_matrix((d2, d2), dtype=p.dtype) else: @@ -155,23 +262,124 @@ def create_elementary_errorgen(typ, p, q=None, sparse=False): pq_minus_qp = pdag @ q - qdag @ p # Loop through the standard basis as all possible input density matrices - for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx - # Only difference between H/S/C/A is how they transform input density matrices - if typ == 'H': - rho1 = -1j * (p @ rho0 - rho0 @ p) # Add "/2" to have PP ham gens match previous versions of pyGSTi - elif typ == 'S': - pdag_p = (pdag @ p) - rho1 = p @ rho0 @ pdag - 0.5 * (pdag_p @ rho0 + rho0 @ pdag_p) - elif typ == 'C': - rho1 = p @ rho0 @ qdag + q @ rho0 @ pdag - 0.5 * (pq_plus_qp @ rho0 + rho0 @ pq_plus_qp) - elif typ == 'A': - rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag + 0.5 * (pq_minus_qp @ rho0 + rho0 @ pq_minus_qp)) + for i in range(d): + for j in range(d): + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elif typ == 'S': + pdag_p = (pdag @ p) + rho1 = p[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + rho1[:, j] += -.5*pdag_p[:, i] + rho1[i, :] += -.5*pdag_p[j, :] + elif typ == 'C': + rho1 = p[:,i].reshape((d,1))@qdag[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + rho1[:, j] += -.5*pq_plus_qp[:, i] + rho1[i, :] += -.5*pq_plus_qp[j, :] + elif typ == 'A': + rho1 = 1j*(p[:,i].reshape((d,1))@ qdag[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d))) + rho1[:, j] += 1j*.5*pq_minus_qp[:, i] + rho1[i, :] += 1j*.5*pq_minus_qp[j, :] + + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + + if sparse: elem_errgen = elem_errgen.tocsr() + return elem_errgen + +#TODO: Should be able to leverage the structure of the paulis as generalized permutation +#matrices to avoid explicitly doing outer products +def create_elementary_errorgen_pauli(typ, p, q=None, sparse=False): + """ + Construct an elementary error generator as a matrix in the "standard" (matrix-unit) basis. + Specialized to the case where p and q are elements of the pauli basis. + + There are four different types of elementary error generators: 'H' (Hamiltonian), + 'S' (stochastic), 'C' (correlation), and 'A' (active). See arxiv:2103.01928. + Each type transforms an input density matrix differently. The action of an elementary + error generator `L` on an input density matrix `rho` is given by: + + Hamiltonian: `L(rho) = -1j * [ p, rho ]` + Stochastic: `L(rho) = p * rho * p - rho` + Correlation: `L(rho) = p * rho * q + q * rho * p - 0.5 {{p,q}, rho}` + Active: `L(rho) = 1j( p * rho * q - q * rho * p + 0.5 {[p,q], rho} )` + + Square brackets denotes the commutator and curly brackets the anticommutator. + `L` is returned as a superoperator matrix that acts on vectorized density matrices. + + Parameters + ---------- + typ : {'H','S','C','A'} + The type of error generator to construct. - elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() + p : numpy.ndarray + d-dimensional basis matrix. + + q : numpy.ndarray, optional + d-dimensional basis matrix; must be non-None if and only if `typ` is `'C'` or `'A'`. + + sparse : bool, optional + Whether to construct a sparse or dense (the default) matrix. + + Returns + ------- + ndarray or Scipy CSR matrix + """ + d = p.shape[0] + d2 = d**2 + if sparse: + elem_errgen = _sps.lil_matrix((d2, d2), dtype=p.dtype) + else: + elem_errgen = _np.empty((d2, d2), dtype=p.dtype) + + assert(typ in ('H', 'S', 'C', 'A')), "`typ` must be one of 'H', 'S', 'C', or 'A'" + assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \ + "Wrong number of basis elements provided for %s-type elementary errorgen!" % typ + + #should be able to get away with just doing one product here. + if typ in 'CA': + pq = p@q + qp = q@p + pq_plus_qp = pq + qp + pq_minus_qp = pq - qp + + if typ == 'H': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'S': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + rho1[i,j] += -1 + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'C': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@q[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + rho1[:, j] += -.5*pq_plus_qp[:, i] + rho1[i, :] += -.5*pq_plus_qp[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = 1j*(p[:,i].reshape((d,1))@ q[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@p[j,:].reshape((1,d))) + rho1[:, j] += 1j*.5*pq_minus_qp[:, i] + rho1[i, :] += 1j*.5*pq_minus_qp[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() if sparse: elem_errgen = elem_errgen.tocsr() return elem_errgen + def create_lindbladian_term_errorgen(typ, Lm, Ln=None, sparse=False): # noqa N803 """ Construct the superoperator for a term in the common Lindbladian expansion of an error generator. diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index f3f69f0a9..c9f5cf5d1 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -1987,7 +1987,20 @@ def _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, norm sparse=False, tensorprod_basis=False, create_dual=False): #See docstrings for `bulk_create_elementary_errorgen_nqudit` and `bulk_create_elementary_errorgen_nqudit_dual`. - create_fn = _lt.create_elementary_errorgen_dual if create_dual else _lt.create_elementary_errorgen + #check if we're using the pauli basis + is_pauli = set(basis_1q.name.split('*')) == set(['PP']) or set(basis_1q.name.split('*')) == set(['pp']) + + if create_dual: + if is_pauli: + create_fn = _lt.create_elementary_errorgen_dual_pauli + else: + create_fn = _lt.create_elementary_errorgen_dual + else: + if is_pauli: + create_fn = _lt.create_elementary_errorgen_pauli + else: + create_fn = _lt.create_elementary_errorgen + normfn = _spsl.norm if sparse else _np.linalg.norm if tensorprod_basis: From 4f1a8aae975a0f390ecebf3812e406dfc48a21d3 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 5 Jan 2025 17:15:51 -0700 Subject: [PATCH 051/102] Add fallback for sparse basis matrices Add a fallback to the original error generator implementation for case where one of the input basis element labels is a sparse matrix. --- pygsti/tools/lindbladtools.py | 219 +++++++++++++++++++++------------- 1 file changed, 139 insertions(+), 80 deletions(-) diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index fb9bc12d1..64632797b 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -84,22 +84,36 @@ def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_ assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \ "Wrong number of basis elements provided for %s-type elementary errorgen!" % typ - # Loop through the standard basis as all possible input density matrices - for i in range(d): # rho0 == input density mx - for j in range(d): + #if p or q is a sparse matrix fall back to original implementation + if not isinstance(p, _np.ndarray) or (q is not None and not isinstance(q, _np.ndarray)): + for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx # Only difference between H/S/C/A is how they transform input density matrices if typ == 'H': - rho1 = _np.zeros((d,d), dtype=_np.complex128) - rho1[:, j] = -1j*p[:, i] - rho1[i, :] += 1j*p[j, :] + rho1 = -1j * (p @ rho0 - rho0 @ p) # -1j / (2 * d2) * elif typ == 'S': - rho1 = p[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + rho1 = (p @ rho0 @ pdag) # 1 / d2 * elif typ == 'C': - rho1 = p[:,i].reshape((d,1))@qdag[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + rho1 = (p @ rho0 @ qdag + q @ rho0 @ pdag) # 1 / (2 * d2) * elif typ == 'A': - rho1 = 1j*(p[:,i].reshape((d,1))@ qdag[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d))) + rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag) # 1j / (2 * d2) + elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): # rho0 == input density mx + for j in range(d): + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elif typ == 'S': + rho1 = p[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + elif typ == 'C': + rho1 = p[:,i].reshape((d,1))@qdag[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + elif typ == 'A': + rho1 = 1j*(p[:,i].reshape((d,1))@ qdag[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d))) - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() return_normalization = bool(normalization_factor == 'auto_return') if normalization_factor in ('auto', 'auto_return'): @@ -169,32 +183,46 @@ def create_elementary_errorgen_dual_pauli(typ, p, q=None, sparse=False): assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \ "Wrong number of basis elements provided for %s-type elementary errorgen!" % typ - if typ == 'H': - # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): - rho1 = _np.zeros((d,d), dtype=_np.complex128) - rho1[:, j] = -1j*p[:, i] - rho1[i, :] += 1j*p[j, :] - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() - elif typ == 'S': - # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): - rho1 = p[:,i].reshape((d,1))@p[j,:].reshape((1,d)) - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() - elif typ == 'C': - # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): - rho1 = p[:,i].reshape((d,1))@q[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@p[j,:].reshape((1,d)) - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + #if p or q is a sparse matrix fall back to original implementation + if not isinstance(p, _np.ndarray) or (q is not None and not isinstance(q, _np.ndarray)): + for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = -1j * (p @ rho0 - rho0 @ p) # -1j / (2 * d2) * + elif typ == 'S': + rho1 = (p @ rho0 @ p) # 1 / d2 * + elif typ == 'C': + rho1 = (p @ rho0 @ q + q @ rho0 @ p) # 1 / (2 * d2) * + elif typ == 'A': + rho1 = 1j * (p @ rho0 @ q - q @ rho0 @ p) # 1j / (2 * d2) + elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() else: - # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): - rho1 = 1j*(p[:,i].reshape((d,1))@ q[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@p[j,:].reshape((1,d))) - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + if typ == 'H': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'S': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'C': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@q[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = 1j*(p[:,i].reshape((d,1))@ q[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@p[j,:].reshape((1,d))) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() if typ in 'HCA': normalization_factor = 1/(2*d2) @@ -261,29 +289,45 @@ def create_elementary_errorgen(typ, p, q=None, sparse=False): pq_plus_qp = pdag @ q + qdag @ p pq_minus_qp = pdag @ q - qdag @ p - # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): + #if p or q is a sparse matrix fall back to original implementation + if not isinstance(p, _np.ndarray) or (q is not None and not isinstance(q, _np.ndarray)): + # Loop through the standard basis as all possible input density matrices + for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx # Only difference between H/S/C/A is how they transform input density matrices if typ == 'H': - rho1 = _np.zeros((d,d), dtype=_np.complex128) - rho1[:, j] = -1j*p[:, i] - rho1[i, :] += 1j*p[j, :] + rho1 = -1j * (p @ rho0 - rho0 @ p) # Add "/2" to have PP ham gens match previous versions of pyGSTi elif typ == 'S': pdag_p = (pdag @ p) - rho1 = p[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) - rho1[:, j] += -.5*pdag_p[:, i] - rho1[i, :] += -.5*pdag_p[j, :] + rho1 = p @ rho0 @ pdag - 0.5 * (pdag_p @ rho0 + rho0 @ pdag_p) elif typ == 'C': - rho1 = p[:,i].reshape((d,1))@qdag[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) - rho1[:, j] += -.5*pq_plus_qp[:, i] - rho1[i, :] += -.5*pq_plus_qp[j, :] + rho1 = p @ rho0 @ qdag + q @ rho0 @ pdag - 0.5 * (pq_plus_qp @ rho0 + rho0 @ pq_plus_qp) elif typ == 'A': - rho1 = 1j*(p[:,i].reshape((d,1))@ qdag[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d))) - rho1[:, j] += 1j*.5*pq_minus_qp[:, i] - rho1[i, :] += 1j*.5*pq_minus_qp[j, :] + rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag + 0.5 * (pq_minus_qp @ rho0 + rho0 @ pq_minus_qp)) + elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elif typ == 'S': + pdag_p = (pdag @ p) + rho1 = p[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + rho1[:, j] += -.5*pdag_p[:, i] + rho1[i, :] += -.5*pdag_p[j, :] + elif typ == 'C': + rho1 = p[:,i].reshape((d,1))@qdag[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + rho1[:, j] += -.5*pq_plus_qp[:, i] + rho1[i, :] += -.5*pq_plus_qp[j, :] + elif typ == 'A': + rho1 = 1j*(p[:,i].reshape((d,1))@ qdag[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d))) + rho1[:, j] += 1j*.5*pq_minus_qp[:, i] + rho1[i, :] += 1j*.5*pq_minus_qp[j, :] - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() if sparse: elem_errgen = elem_errgen.tocsr() return elem_errgen @@ -344,37 +388,52 @@ def create_elementary_errorgen_pauli(typ, p, q=None, sparse=False): pq_plus_qp = pq + qp pq_minus_qp = pq - qp - if typ == 'H': - # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): - rho1 = _np.zeros((d,d), dtype=_np.complex128) - rho1[:, j] = -1j*p[:, i] - rho1[i, :] += 1j*p[j, :] - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() - elif typ == 'S': + #if p or q is a sparse matrix fall back to original implementation + if not isinstance(p, _np.ndarray) or (q is not None and not isinstance(q, _np.ndarray)): # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): - rho1 = p[:,i].reshape((d,1))@p[j,:].reshape((1,d)) - rho1[i,j] += -1 - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() - elif typ == 'C': - # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): - rho1 = p[:,i].reshape((d,1))@q[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@p[j,:].reshape((1,d)) - rho1[:, j] += -.5*pq_plus_qp[:, i] - rho1[i, :] += -.5*pq_plus_qp[j, :] - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = -1j * (p @ rho0 - rho0 @ p) # Add "/2" to have PP ham gens match previous versions of pyGSTi + elif typ == 'S': + rho1 = p @ rho0 @ p - rho0 + elif typ == 'C': + rho1 = p @ rho0 @ q + q @ rho0 @ p - 0.5 * (pq_plus_qp @ rho0 + rho0 @ pq_plus_qp) + elif typ == 'A': + rho1 = 1j * (p @ rho0 @ q - q @ rho0 @ p + 0.5 * (pq_minus_qp @ rho0 + rho0 @ pq_minus_qp)) + elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() else: - # Loop through the standard basis as all possible input density matrices - for i in range(d): - for j in range(d): - rho1 = 1j*(p[:,i].reshape((d,1))@ q[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@p[j,:].reshape((1,d))) - rho1[:, j] += 1j*.5*pq_minus_qp[:, i] - rho1[i, :] += 1j*.5*pq_minus_qp[j, :] - elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + if typ == 'H': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'S': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + rho1[i,j] += -1 + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'C': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@q[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + rho1[:, j] += -.5*pq_plus_qp[:, i] + rho1[i, :] += -.5*pq_plus_qp[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = 1j*(p[:,i].reshape((d,1))@ q[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@p[j,:].reshape((1,d))) + rho1[:, j] += 1j*.5*pq_minus_qp[:, i] + rho1[i, :] += 1j*.5*pq_minus_qp[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() if sparse: elem_errgen = elem_errgen.tocsr() return elem_errgen From b47a9c6300ff6c8dff3e974e00250aa1765bcbc0 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 5 Jan 2025 17:53:31 -0700 Subject: [PATCH 052/102] Restrict specialization to unnormalized Paulis Actually, it looks like the change I implemented specializing the error generator construction when using paulis is actually only valid when using unnormalized ones. Could get it working for the other case by tracking the additional scale factor, but for now just revert partially. --- pygsti/tools/lindbladtools.py | 4 ++-- pygsti/tools/optools.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index 64632797b..a2dbdfa38 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -131,7 +131,7 @@ def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_ def create_elementary_errorgen_dual_pauli(typ, p, q=None, sparse=False): """ Construct a "dual" elementary error generator matrix in the "standard" (matrix-unit) basis. - Specialized to p and q being elements of the pauli basis. + Specialized to p and q being elements of the (unnormalized) pauli basis. The elementary error generator that is dual to the one computed by calling :func:`create_elementary_errorgen` with the same argument. This dual element @@ -337,7 +337,7 @@ def create_elementary_errorgen(typ, p, q=None, sparse=False): def create_elementary_errorgen_pauli(typ, p, q=None, sparse=False): """ Construct an elementary error generator as a matrix in the "standard" (matrix-unit) basis. - Specialized to the case where p and q are elements of the pauli basis. + Specialized to the case where p and q are elements of the (unnormalized) pauli basis. There are four different types of elementary error generators: 'H' (Hamiltonian), 'S' (stochastic), 'C' (correlation), and 'A' (active). See arxiv:2103.01928. diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index c9f5cf5d1..958493b56 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -1988,8 +1988,7 @@ def _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, norm #See docstrings for `bulk_create_elementary_errorgen_nqudit` and `bulk_create_elementary_errorgen_nqudit_dual`. #check if we're using the pauli basis - is_pauli = set(basis_1q.name.split('*')) == set(['PP']) or set(basis_1q.name.split('*')) == set(['pp']) - + is_pauli = set(basis_1q.name.split('*')) == set(['PP']) if create_dual: if is_pauli: create_fn = _lt.create_elementary_errorgen_dual_pauli From d9e525b04112303927eac8c00327d87285a18370 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Thu, 16 Jan 2025 16:02:55 -0700 Subject: [PATCH 053/102] S-C and S-A Compositions Checkpoint composition implementation with the addition of S-C and S-A compositions. --- pygsti/tools/errgenproptools.py | 233 +++++++++++++++++++++++++++++++- 1 file changed, 232 insertions(+), 1 deletion(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index d9f5f145a..7a4289d9b 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -1332,7 +1332,238 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non composed_errorgens.append((_LSE('S', [Q]),- w)) elif errorgen_1_type == 'S' and errorgen_2_type == 'C': - pass + #S_A[C_P,Q] A-> errorgen_1_bel_0, P->errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + + if P.commutes(Q): #Case 1: [P,Q] = 0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + #APQ can't equal A since that implies P==Q, which would be an invalid C term input. + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + assert not PA_eq_QA #(I'm almost positive this should be true) + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. + + elif errorgen_1_type == 'S' and errorgen_2_type == 'A': + #S_A[A_P,Q] A-> errorgen_1_bel_0, P->errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + + if P.commutes(Q): #Case 1: [P,Q]=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + assert not PA_eq_QA #(I'm almost positive this should be true) + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_sign_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_sign_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_sign_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_sign_1*w)) + #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + #APQ can't equal A since that implies P==Q, which would be an invalid C term input. + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) + + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) + + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), APQ[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), APQ[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) + #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. return composed_errorgens From 668a5480bede2d094a5eb2d3a444ce2c047235aa Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 17 Jan 2025 15:33:02 -0700 Subject: [PATCH 054/102] Checkpoint C-H and C-S Checkpoint the composition implementation at the inclusion of C-H and C-S compositions. These will eventually get refactored and combined into one block each with H-C and S-C. --- pygsti/tools/errgenproptools.py | 299 ++++++++++++++++++++++++++++---- 1 file changed, 261 insertions(+), 38 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 7a4289d9b..c78bceaa2 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -1067,6 +1067,10 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non P = errorgen_2_bel_0 Q = errorgen_2_bel_1 A = errorgen_1_bel_0 + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #Case 1: [P,Q]=0 if P.commutes(Q): #precompute some products we'll need. @@ -1074,15 +1078,13 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non QA = pauli_product(Q, A) PQ = pauli_product(P, Q) APQ = pauli_product(A, PQ[0]*PQ[1]) - #also precompute whether pairs commute or anticommute - com_AP = A.commutes(P) - com_AQ = A.commutes(Q) - #also also precompute whether any of these products are the identity + + #also precompute whether any of these products are the identity PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) PQ_ident = (PQ[1] == identity) APQ_ident = (APQ[1] == identity) - #also also also precompute whether certain relevant pauli pairs are equal. + #also also precompute whether certain relevant pauli pairs are equal. PA_eq_Q = (PA[1]==Q) QA_eq_P = (QA[1]==P) PQ_eq_A = (PQ[1]==A) @@ -1139,13 +1141,10 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #precompute some products we'll need. PA = pauli_product(P, A) QA = pauli_product(Q, A) - #also precompute whether pairs commute or anticommute - com_AP = A.commutes(P) - com_AQ = A.commutes(Q) - #also also precompute whether any of these products are the identity + #also precompute whether any of these products are the identity PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) - #also also also precompute whether certain relevant pauli pairs are equal. + #also also precompute whether certain relevant pauli pairs are equal. PA_eq_Q = (PA[1]==Q) QA_eq_P = (QA[1]==P) #Case 2a: [A,P]=0, [A,Q]=0 @@ -1186,18 +1185,18 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non P = errorgen_2_bel_0 Q = errorgen_2_bel_1 A = errorgen_1_bel_0 + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) #Case 1: P and Q commute. if P.commutes(Q): #precompute some products we'll need. PA = pauli_product(P, A) QA = pauli_product(Q, A) - #also precompute whether pairs commute or anticommute - com_AP = A.commutes(P) - com_AQ = A.commutes(Q) - #also also precompute whether any of these products are the identity + #also precompute whether any of these products are the identity PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) - #also also also precompute whether certain relevant pauli pairs are equal. + #also also precompute whether certain relevant pauli pairs are equal. PA_eq_Q = (PA[1]==Q) QA_eq_P = (QA[1]==P) #Case 1a: [A,P]=0, [A,Q]=0 @@ -1238,9 +1237,6 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non QA = pauli_product(Q, A) PQ = pauli_product(P, Q) APQ = pauli_product(A, PQ[0]*PQ[1]) - #also precompute whether pairs commute or anticommute - com_AP = A.commutes(P) - com_AQ = A.commutes(Q) #also also precompute whether any of these products are the identity PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) @@ -1337,20 +1333,21 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non P = errorgen_2_bel_0 Q = errorgen_2_bel_1 + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + if P.commutes(Q): #Case 1: [P,Q] = 0 #precompute some products we'll need. PA = pauli_product(P, A) QA = pauli_product(Q, A) PQ = pauli_product(P, Q) APQ = pauli_product(A, PQ[0]*PQ[1]) - #also precompute whether pairs commute or anticommute - com_AP = A.commutes(P) - com_AQ = A.commutes(Q) - #also also precompute whether any of these products are the identity + #also precompute whether any of these products are the identity PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) APQ_ident = (APQ[1] == identity) - #also also also precompute whether certain relevant pauli pairs are equal. + #also also precompute whether certain relevant pauli pairs are equal. PA_eq_QA = (PA[1]==QA[1]) #APQ can't equal A since that implies P==Q, which would be an invalid C term input. @@ -1403,13 +1400,10 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #precompute some products we'll need. PA = pauli_product(P, A) QA = pauli_product(Q, A) - #also precompute whether pairs commute or anticommute - com_AP = A.commutes(P) - com_AQ = A.commutes(Q) - #also also precompute whether any of these products are the identity + #also precompute whether any of these products are the identity PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) - #also also also precompute whether certain relevant pauli pairs are equal. + #also also precompute whether certain relevant pauli pairs are equal. PA_eq_QA = (PA[1]==QA[1]) assert not PA_eq_QA #(I'm almost positive this should be true) @@ -1453,17 +1447,19 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non P = errorgen_2_bel_0 Q = errorgen_2_bel_1 + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + if P.commutes(Q): #Case 1: [P,Q]=0 #precompute some products we'll need. PA = pauli_product(P, A) QA = pauli_product(Q, A) - #also precompute whether pairs commute or anticommute - com_AP = A.commutes(P) - com_AQ = A.commutes(Q) - #also also precompute whether any of these products are the identity + + #also precompute whether any of these products are the identity PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) - #also also also precompute whether certain relevant pauli pairs are equal. + #also also precompute whether certain relevant pauli pairs are equal. PA_eq_QA = (PA[1]==QA[1]) assert not PA_eq_QA #(I'm almost positive this should be true) @@ -1506,14 +1502,11 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non QA = pauli_product(Q, A) PQ = pauli_product(P, Q) APQ = pauli_product(A, PQ[0]*PQ[1]) - #also precompute whether pairs commute or anticommute - com_AP = A.commutes(P) - com_AQ = A.commutes(Q) - #also also precompute whether any of these products are the identity + #also precompute whether any of these products are the identity PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) APQ_ident = (APQ[1] == identity) - #also also also precompute whether certain relevant pauli pairs are equal. + #also also precompute whether certain relevant pauli pairs are equal. PA_eq_QA = (PA[1]==QA[1]) #APQ can't equal A since that implies P==Q, which would be an invalid C term input. @@ -1564,6 +1557,236 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_2 is not None: composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. + + elif errorgen_1_type == 'C' and errorgen_2_type == 'H': + #C_P,Q[H_A]: P -> errorgen_1_bel_0, Q-> errorgen_1_bel_1, A -> errorgen_2_bel_0 + #TODO: This only differs from H-C by a few signs, should be able to combine the two implementations to save space. + P = errorgen_1_bel_0 + Q = errorgen_1_bel_1 + A = errorgen_2_bel_0 + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + if P.commutes(Q): #[P,Q]=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether any of these products are the identity (PQ can't be the identity if this is a valid C term). + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PQ_ident = (PQ[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + PQ_eq_A = (PQ[1]==A) + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_scale_1*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_scale_1*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'S': #This differs from S-C by just a few signs. Should be able to combine and significantly compress code. + #C_P,Q[S_A] P-> errorgen_1_bel_0, Q -> errorgen_1_bel_1, A->errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_2_bel_1 + A = errorgen_2_bel_0 + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + if P.commutes(Q): #Case 1: [P,Q] = 0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + #APQ can't equal A since that implies P==Q, which would be an invalid C term input. + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_scale_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*APQ[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*APQ[0]*addl_sign_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + assert not PA_eq_QA #(I'm almost positive this should be true) + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_scale_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_sign_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. return composed_errorgens From cad9d3e175db3a67aeadb74c680c789840ff2c91 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 17 Jan 2025 16:26:12 -0700 Subject: [PATCH 055/102] Minor typo fix --- pygsti/tools/errgenproptools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index c78bceaa2..60df3bfaa 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -1678,7 +1678,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non elif errorgen_1_type == 'C' and errorgen_2_type == 'S': #This differs from S-C by just a few signs. Should be able to combine and significantly compress code. #C_P,Q[S_A] P-> errorgen_1_bel_0, Q -> errorgen_1_bel_1, A->errorgen_2_bel_0 P = errorgen_1_bel_0 - Q = errorgen_2_bel_1 + Q = errorgen_1_bel_1 A = errorgen_2_bel_0 #also precompute whether pairs commute or anticommute com_AP = A.commutes(P) From 63488450cdce9140e72c8ea44b73e0ce4ad425fe Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 27 Jan 2025 23:03:56 -0700 Subject: [PATCH 056/102] C-C Compositions Completed implementation of the C-C error generator compositions. Tested for all pairs of C terms on 2-qubits. Also includes refactors to other compositions to reduce the distinction between C and A conventions for faster implementation. --- pygsti/tools/errgenproptools.py | 1539 +++++++++++++++++++++++++------ 1 file changed, 1279 insertions(+), 260 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 60df3bfaa..c78ad40c8 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -1036,13 +1036,13 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non Q = errorgen_2_bel_0 P_eq_Q = (P==Q) if P.commutes(Q): - new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(P, Q, False, False, P_eq_Q) - composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_scale*w)) + new_eg_type, new_bels, addl_factor = _ordered_new_bels_C(P, Q, False, False, P_eq_Q) + composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_factor*w)) else: PQ = pauli_product(P, Q) composed_errorgens.append((_LSE('H', [PQ[1]]), -1j*w*PQ[0])) - new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(P, Q, False, False, P_eq_Q) - composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_scale*w)) + new_eg_type, new_bels, addl_factor = _ordered_new_bels_C(P, Q, False, False, P_eq_Q) + composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_factor*w)) elif errorgen_1_type == 'H' and errorgen_2_type == 'S': #H_P[S_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 @@ -1052,14 +1052,14 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non PQ_ident = (PQ[1] == identity) PQ_eq_Q = (PQ[1]==Q) if P.commutes(Q): - new_eg_type, new_bels, addl_sign = _ordered_new_bels_A(PQ[1], Q, PQ_ident, False, PQ_eq_Q) + new_eg_type, new_bels, addl_factor = _ordered_new_bels_A(PQ[1], Q, PQ_ident, False, PQ_eq_Q) if new_eg_type is not None: - composed_errorgens.append((_LSE(new_eg_type, new_bels), -PQ[0]*addl_sign*w)) + composed_errorgens.append((_LSE(new_eg_type, new_bels), -PQ[0]*addl_factor*w)) composed_errorgens.append((_LSE('H', [P]), -w)) else: #if errorgen_1_bel_0 and errorgen_2_bel_0 only multiply to identity they are equal (in which case they commute). - new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(PQ[1], Q, PQ_ident, False, PQ_eq_Q) + new_eg_type, new_bels, addl_factor = _ordered_new_bels_C(PQ[1], Q, PQ_ident, False, PQ_eq_Q) if new_eg_type is not None: - composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_scale*w)) + composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_factor*w)) composed_errorgens.append((_LSE('H', [P]), -w)) elif errorgen_1_type == 'H' and errorgen_2_type == 'C': @@ -1091,52 +1091,52 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 1a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) if not APQ_ident: composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) #Case 1b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) if not APQ_ident: composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) #Case 1c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) #Case 1d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) else: #Case 2: {P,Q}=0 #precompute some products we'll need. PA = pauli_product(P, A) @@ -1149,36 +1149,36 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non QA_eq_P = (QA[1]==P) #Case 2a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) #Case 2b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) #Case 2c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) #Case 2d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) elif errorgen_1_type == 'H' and errorgen_2_type == 'A': #H_A[A_{P,Q}] A->errorgen_1_bel_0, P,Q -> errorgen_2_bel_0, errorgen_2_bel_1 @@ -1201,36 +1201,36 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non QA_eq_P = (QA[1]==P) #Case 1a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) #Case 1b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) #Case 1c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) #Case 1d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) else: #Case 2: {P,Q}=0 #precompute some products we'll need. PA = pauli_product(P, A) @@ -1249,52 +1249,52 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 2a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) if not APQ_ident: composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) #Case 2b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) if not APQ_ident: composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) #Case 2c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) #Case 2d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) #Note: This could be done by leveraging the commutator code, but that adds #additional overhead which I am opting to avoid. @@ -1306,14 +1306,14 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non PQ_ident = (PQ[1] == identity) PQ_eq_Q = (PQ[1]==Q) if P.commutes(Q): - new_eg_type, new_bels, addl_sign = _ordered_new_bels_A(PQ[1], P, PQ_ident, False, PQ_eq_Q) + new_eg_type, new_bels, addl_factor = _ordered_new_bels_A(PQ[1], P, PQ_ident, False, PQ_eq_Q) if new_eg_type is not None: - composed_errorgens.append((_LSE(new_eg_type, new_bels), -PQ[0]*addl_sign*w)) + composed_errorgens.append((_LSE(new_eg_type, new_bels), -PQ[0]*addl_factor*w)) composed_errorgens.append((_LSE('H', [Q]), -w)) else: #if errorgen_1_bel_0 and errorgen_2_bel_0 only multiply to identity they are equal (in which case they commute). - new_eg_type, new_bels, addl_scale = _ordered_new_bels_C(PQ[1], P, PQ_ident, False, PQ_eq_Q) + new_eg_type, new_bels, addl_factor = _ordered_new_bels_C(PQ[1], P, PQ_ident, False, PQ_eq_Q) if new_eg_type is not None: - composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_scale*w)) + composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_factor*w)) composed_errorgens.append((_LSE('H', [Q]), -w)) elif errorgen_1_type == 'S' and errorgen_2_type == 'S': @@ -1353,48 +1353,48 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 1a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 1b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 1c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 1d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. else: #Case 2: {P,Q}=0 #precompute some products we'll need. @@ -1409,36 +1409,36 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 2a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 2b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 2c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 2d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. elif errorgen_1_type == 'S' and errorgen_2_type == 'A': @@ -1465,36 +1465,36 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 1a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 1b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 1c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 1d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. else: #precompute some products we'll need. @@ -1512,50 +1512,50 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 2a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 2b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 2c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), APQ[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 2d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), APQ[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. elif errorgen_1_type == 'C' and errorgen_2_type == 'H': @@ -1586,52 +1586,52 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 1a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) if not APQ_ident: composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) #Case 1b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) if not APQ_ident: composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) #Case 1c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) #Case 1d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) - new_eg_type_2, new_bels_2, addl_sign_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_sign_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) else: #Case 2: {P,Q}=0 #precompute some products we'll need. PA = pauli_product(P, A) @@ -1644,38 +1644,38 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non QA_eq_P = (QA[1]==P) #Case 2a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) #Case 2b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) #Case 2c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) #Case 2d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) - elif errorgen_1_type == 'C' and errorgen_2_type == 'S': #This differs from S-C by just a few signs. Should be able to combine and significantly compress code. + elif errorgen_1_type == 'C' and errorgen_2_type == 'S': #TODO: This differs from S-C by just a few signs. Should be able to combine and significantly compress code. #C_P,Q[S_A] P-> errorgen_1_bel_0, Q -> errorgen_1_bel_1, A->errorgen_2_bel_0 P = errorgen_1_bel_0 Q = errorgen_1_bel_1 @@ -1700,48 +1700,48 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 1a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 1b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 1c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*APQ[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #Case 1d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_sign_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) - new_eg_type_2, new_bels_2, addl_scale_2 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*APQ[0]*addl_sign_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*APQ[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_scale_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. else: #Case 2: {P,Q}=0 #precompute some products we'll need. @@ -1756,38 +1756,1057 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non #Case 2a: [A,P]=0, [A,Q]=0 if com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 2b: {A,P}=0, {A,Q}=0 elif not com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_scale_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_scale_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 2c: [A,P]=0, {A,Q}=0 elif com_AP and not com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #Case 2d: {A,P}=0, [A,Q]=0 elif not com_AP and com_AQ: - new_eg_type_0, new_bels_0, addl_sign_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) - new_eg_type_1, new_bels_1, addl_scale_1 = _ordered_new_bels_C(P, Q, False, False, False) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_sign_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: - composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_scale_1*w)) + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. + elif errorgen_1_type == 'C' and errorgen_2_type == 'C': + #C_A,B[C_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + B = errorgen_1_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + #precompute commutation relations we'll need. + com_PQ = P.commutes(Q) + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + com_BP = B.commutes(P) + com_BQ = B.commutes(Q) + + #There are 64 separate cases, so this is gonna suck... + if A.commutes(B): + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + AB = pauli_product(A, B) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + ABPQ_ident= (ABPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQ_eq_AB = (PQ[1] == AB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(P, QAB[1], False, QAB_ident, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(PB[1], QA[1], PB_ident, QA_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(A, BPQ[1], False, BPQ_ident, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*PB[0]*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + + else: #[P,Q] !=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + AB = pauli_product(A, B) + ABP = pauli_product(AB[0]*AB[1], P) + ABQ = pauli_product(AB[0]*AB[1], Q) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + ABP_ident = (ABP[1] == identity) + ABQ_ident = (ABQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + ABP_eq_Q = (ABP[1] == Q) + ABQ_eq_P = (ABQ[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + else: #[A,B] != 0 + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + PQB = pauli_product(PQ[0]*PQ[1], B) + PQA = pauli_product(PQ[0]*PQ[1], A) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + PQB_ident = (PQB[1] == identity) + PQA_ident = (PQA[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQB_eq_A = (PQB[1] == A) + PQA_eq_B = (PQA[1] == B) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + else: #[P,Q]!=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0),-1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + + return composed_errorgens #helper function for getting the new (properly ordered) basis element labels, error generator type (A can turn into H with certain index combinations), and additional signs. @@ -1805,16 +2824,16 @@ def _ordered_new_bels_A(pauli1, pauli2, first_pauli_ident, second_pauli_ident, p else: new_eg_type = 'H' new_bels = [pauli2] - addl_sign = 1 + addl_factor = 1 else: if second_pauli_ident: new_eg_type = 'H' new_bels = [pauli1] - addl_sign = -1 + addl_factor = -1 else: new_eg_type = 'A' - new_bels, addl_sign = ([pauli1, pauli2], 1) if stim_pauli_string_less_than(pauli1, pauli2) else ([pauli2, pauli1], -1) - return new_eg_type, new_bels, addl_sign + new_bels, addl_factor = ([pauli1, pauli2], 1) if stim_pauli_string_less_than(pauli1, pauli2) else ([pauli2, pauli1], -1) + return new_eg_type, new_bels, addl_factor def _ordered_new_bels_C(pauli1, pauli2, first_pauli_ident, second_pauli_ident, pauli_eq): """ @@ -1827,12 +2846,12 @@ def _ordered_new_bels_C(pauli1, pauli2, first_pauli_ident, second_pauli_ident, p if pauli_eq: new_eg_type = 'S' new_bels = [pauli1] - addl_scale_fac = 2 + addl_factor = 2 else: new_eg_type = 'C' - addl_scale_fac = 1 + addl_factor = 1 new_bels = [pauli1, pauli2] if stim_pauli_string_less_than(pauli1, pauli2) else [pauli2, pauli1] - return new_eg_type, new_bels, addl_scale_fac + return new_eg_type, new_bels, addl_factor def com(P1, P2): #P1 and P2 either commute or anticommute. From f7431f359c1fd45dc33dc5317bebc4a3882bf6d8 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 28 Jan 2025 23:19:06 -0700 Subject: [PATCH 057/102] Checkpoint C-A Compositions Checkpoint implementation of the C and A error generator compositions. Still a few more bugs to track down before this pair of sectors is down pat. --- pygsti/tools/errgenproptools.py | 1004 +++++++++++++++++++++++++++++++ 1 file changed, 1004 insertions(+) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index c78ad40c8..c5cb7eac1 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -1816,6 +1816,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non PAB = pauli_product(P, AB[0]*AB[1]) QAB = pauli_product(Q, AB[0]*AB[1]) ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + #precompute whether any of these products are identities. PA_ident = (PA[1] == identity) QA_ident = (QA[1] == identity) @@ -2806,6 +2807,1009 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_1 is not None: composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif errorgen_1_type == 'C' and errorgen_2_type == 'A': + #C_A,B[A_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + B = errorgen_1_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + #precompute commutation relations we'll need. + com_PQ = P.commutes(Q) + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + com_BP = B.commutes(P) + com_BQ = B.commutes(Q) + + if A.commutes(B): + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + AB = pauli_product(A, B) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + else: #[P,Q]!=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + AB = pauli_product(A, B) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + ABPQ_ident= (ABPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQ_eq_AB = (PQ[1] == AB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(PB[1], QA[1], PB_ident, QA_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*PB[0]*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + else: #[A,B] != 0 + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) return composed_errorgens From 57684b3b403c21b1f76cac90b48052797674acd5 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 29 Jan 2025 23:26:05 -0700 Subject: [PATCH 058/102] A-C Compositions Implement the compositions of A and C error generators. Somewhat unexpectedly the relationship between these and the C-A compositions is more complication that I had anticipated (and more complicated than the comparable expressions that came out of things like H-C vs C-H, for example). --- pygsti/tools/errgenproptools.py | 1016 ++++++++++++++++++++++++++++++- 1 file changed, 1010 insertions(+), 6 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index c5cb7eac1..407af47a0 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -3044,7 +3044,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) if new_eg_type_0 is not None: - composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) if new_eg_type_1 is not None: composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) if new_eg_type_2 is not None: @@ -3105,7 +3105,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_5 is not None: composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) if new_eg_type_6 is not None: - composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) elif com_AP and com_AQ and com_BP and not com_BQ: new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) @@ -3235,7 +3235,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_1 is not None: composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) if new_eg_type_2 is not None: - composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) if new_eg_type_3 is not None: composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) if new_eg_type_4 is not None: @@ -3383,7 +3383,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_6 is not None: composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) elif not com_AP and not com_AQ and com_BP and not com_BQ: - new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) @@ -3405,7 +3405,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_6 is not None: composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) if not ABPQ_ident: - composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) elif not com_AP and not com_AQ and not com_BP and com_BQ: new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) @@ -3427,7 +3427,7 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_5 is not None: composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) if new_eg_type_6 is not None: - composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) if not ABPQ_ident: composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) elif not com_AP and not com_AQ and not com_BP and not com_BQ: @@ -3810,6 +3810,1010 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) if new_eg_type_3 is not None: composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'C': + #A_A,B[C_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + B = errorgen_1_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + #precompute commutation relations we'll need. + com_PQ = P.commutes(Q) + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + com_BP = B.commutes(P) + com_BQ = B.commutes(Q) + + if A.commutes(B): + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + else: #[P,Q]!=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + else: #[A,B] != 0 + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + AB = pauli_product(A, B) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + ABPQ_ident= (ABPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQ_eq_AB = (PQ[1] == AB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(PB[1], QA[1], PB_ident, QA_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*PB[0]*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + AB = pauli_product(A, B) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) return composed_errorgens From 57f8530dd86b31e84b8b8ec61b16868bcd84c30c Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Thu, 30 Jan 2025 22:34:05 -0700 Subject: [PATCH 059/102] A-A Compositions Add analytic compositions of A-A error generator pairs. --- pygsti/tools/errgenproptools.py | 1003 +++++++++++++++++++++++++++++++ 1 file changed, 1003 insertions(+) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 407af47a0..c080a0997 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -4815,6 +4815,1009 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non if new_eg_type_3 is not None: composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif errorgen_1_type == 'A' and errorgen_2_type == 'A': + #A_A,B[A_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + B = errorgen_1_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + #precompute commutation relations we'll need. + com_PQ = P.commutes(Q) + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + com_BP = B.commutes(P) + com_BQ = B.commutes(Q) + if A.commutes(B): + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + else: + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + AB = pauli_product(A, B) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + AB = pauli_product(A, B) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + ABPQ_ident= (ABPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQ_eq_AB = (PQ[1] == AB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6),- 1j*QAB[0]*addl_factor_6*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(PB[1], QA[1], PB_ident, QA_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*PB[0]*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + return composed_errorgens #helper function for getting the new (properly ordered) basis element labels, error generator type (A can turn into H with certain index combinations), and additional signs. From ea647169f2249f1e3105fef40db5f94356781bb5 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Feb 2025 17:33:30 -0700 Subject: [PATCH 060/102] Enable higher-order approximate stabilizer probs This commit contains the following: - Iterative error generator compositions and error generator taylor expansion functionality. - Arbitrary-order corrections to the stabilizer probabilities. - Bugfixes for the phi and alpha functions, and new numerical methods for computing these for use in testing infrastructure. --- pygsti/tools/errgenproptools.py | 436 +++++++++++++++++++++++++++++--- 1 file changed, 404 insertions(+), 32 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index c080a0997..27ae3c2d3 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -14,12 +14,15 @@ import numpy as _np from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GEEL, LocalElementaryErrorgenLabel as _LEEL from pygsti.baseobjs import QubitSpace as _QubitSpace +from pygsti.baseobjs.basis import BuiltinBasis as _BuiltinBasis from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis as _CompleteElementaryErrorgenBasis from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.modelmembers.operations import LindbladErrorgen as _LinbladErrorgen from pygsti.circuits import Circuit as _Circuit +from pygsti.tools.optools import create_elementary_errorgen_nqudit, state_to_dmvec from functools import reduce from itertools import chain, product +from math import factorial def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): """ @@ -1005,8 +1008,10 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non Returns ------- - list of `LocalStimErrorgenLabel`s corresponding to the composition of the two input error generators, - weighted by the specified value of `weight`. + list of tuples. The first element of each tuple is a `LocalStimErrorgenLabel`s + corresponding to a component of the composition of the two input error generators. + The second element is the weight of that term, additionally weighted by the specified + value of `weight`. """ composed_errorgens = [] @@ -6295,7 +6300,7 @@ def amplitude_of_state(tableau, desired_state): return phase_factor*magnitude -def pauli_phase_update(pauli, bitstring): +def pauli_phase_update(pauli, bitstring, dual=False): """ Takes as input a pauli and a bit string and computes the output bitstring and the overall phase that bit string accumulates. @@ -6308,6 +6313,8 @@ def pauli_phase_update(pauli, bitstring): bitstring : str String of 0's and 1's representing the bit string to apply the pauli to. + dual : bool, optional (default False) + If True then then the pauli is acting to the left on a row vector. Returns ------- Tuple whose first element is the phase accumulated, and whose second element @@ -6318,14 +6325,23 @@ def pauli_phase_update(pauli, bitstring): pauli = stim.PauliString(pauli) bitstring = [False if bit=='0' else True for bit in bitstring] - #list of phase correction for each pauli (conditional on 0) - #Read [I, X, Y, Z] - pauli_phases_0 = [1, 1, -1j, 1] - - #list of the phase correction for each pauli (conditional on 1) - #Read [I, X, Y, Z] - pauli_phases_1 = [1, 1, 1j, -1] - + if not dual: + #list of phase correction for each pauli (conditional on 0) + #Read [I, X, Y, Z] + pauli_phases_0 = [1, 1, 1j, 1] + + #list of the phase correction for each pauli (conditional on 1) + #Read [I, X, Y, Z] + pauli_phases_1 = [1, 1, -1j, -1] + else: + #list of phase correction for each pauli (conditional on 0) + #Read [I, X, Y, Z] + pauli_phases_0 = [1, 1, -1j, 1] + + #list of the phase correction for each pauli (conditional on 1) + #Read [I, X, Y, Z] + pauli_phases_1 = [1, 1, 1j, -1] + #list of bools corresponding to whether each pauli flips the target bit pauli_flips = [False, True, True, False] @@ -6350,7 +6366,7 @@ def pauli_phase_update(pauli, bitstring): return overall_phase, output_bitstring #TODO: This function needs a more evocative name -def phi(tableau, desired_bitstring, P, Q): +def phi(tableau, desired_bitstring, P, Q, debug = False): """ This function computes a quantity whose value is used in expression for the sensitivity of probabilities to error generators. @@ -6374,7 +6390,8 @@ def phi(tableau, desired_bitstring, P, Q): #start by getting the pauli string which maps the all-zeros string to the target bitstring. initial_pauli_string = stim.PauliString(''.join(['I' if bit=='0' else 'X' for bit in desired_bitstring])) - + if debug: + print(f'{initial_pauli_string=}') #map P and Q to stim.PauliString if needed. if isinstance(P, str): P = stim.PauliString(P) @@ -6384,24 +6401,35 @@ def phi(tableau, desired_bitstring, P, Q): #combine this initial pauli string with the two input paulis eff_P = initial_pauli_string*P eff_Q = Q*initial_pauli_string - + if debug: + print(f'{eff_P=}') + print(f'{eff_Q=}') #now get the bit strings which need their amplitudes extracted from the input stabilizer state and get #the corresponding phase corrections. all_zeros = '0'*len(eff_P) - phase1, bitstring1 = pauli_phase_update(eff_P, all_zeros) + phase1, bitstring1 = pauli_phase_update(eff_P, all_zeros, dual=True) phase2, bitstring2 = pauli_phase_update(eff_Q, all_zeros) + if debug: + print(f'{phase1=}') + print(f'{phase2=}') + print(f'{bitstring1=}') + print(f'{bitstring2=}') + #get the amplitude of these two bitstrings in the stabilizer state. amp1 = amplitude_of_state(tableau, bitstring1) - amp2 = amplitude_of_state(tableau, bitstring2) + amp2 = amplitude_of_state(tableau, bitstring2).conjugate() #The second amplitude also needs a complex conjugate applied + if debug: + print(f'{amp1=}') + print(f'{amp2=}') + #now apply the phase corrections. amp1*=phase1 amp2*=phase2 - + #calculate phi. - #The second amplitude also needs a complex conjugate applied - phi = amp1*amp2.conjugate() + phi = amp1*amp2 #phi should ultimately be either 0, +/-1 or +/-i, scaling might overflow #so avoid scaling and just identify which of these it should be. For really @@ -6420,7 +6448,61 @@ def phi(tableau, desired_bitstring, P, Q): else: return complex(0) -def alpha(errorgen, tableau, desired_bitstring): +#helper function for numerically computing phi, primarily used for testing. +def phi_numerical(tableau, desired_bitstring, P, Q): + """ + This function computes a quantity whose value is used in expression for the sensitivity of probabilities to error generators. + (This version does this calculation numerically and is primarily intended for testing infrastructure.) + + Parameters + ---------- + tableau : stim.Tableau + A stim Tableau corresponding to the input stabilizer state. + + desired_bitstring : str + A string of zeros and ones corresponding to the bit string being measured. + + P : str or stim.PauliString + The first pauli string index. + Q : str or stim.PauliString + The second pauli string index. + + Returns + ------- + A complex number corresponding to the value of the phi function. + """ + + #start by getting the pauli string which maps the all-zeros string to the target bitstring. + initial_pauli_string = stim.PauliString(''.join(['I' if bit=='0' else 'X' for bit in desired_bitstring])).to_unitary_matrix(endian = 'big') + + + #map P and Q to stim.PauliString if needed. + if isinstance(P, str): + P = stim.PauliString(P) + if isinstance(Q, str): + Q = stim.PauliString(Q) + + stabilizer_state = tableau.to_state_vector(endian = 'big') + stabilizer_state.reshape((len(stabilizer_state),1)) + #combine this initial pauli string with the two input paulis + eff_P = initial_pauli_string@P.to_unitary_matrix(endian = 'big') + eff_Q = Q.to_unitary_matrix(endian = 'big')@initial_pauli_string + + #now get the bit strings which need their amplitudes extracted from the input stabilizer state and get + #the corresponding phase corrections. + #all_zeros = '0'*len(eff_P) + all_zeros = _np.zeros((2**len(desired_bitstring),1)) + all_zeros[0] = 1 + #calculate phi. + #The second amplitude also needs a complex conjugate applied + phi = (all_zeros.T@eff_P@stabilizer_state) * (stabilizer_state.conj().T@eff_Q@all_zeros) + + num_random = random_support(tableau) + scale = 2**(num_random) + + return phi*scale + +def alpha(errorgen, tableau, desired_bitstring, debug=False): """ First-order error generator sensitivity function for probability. @@ -6451,17 +6533,92 @@ def alpha(errorgen, tableau, desired_bitstring): sensitivity = phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[0]) \ - phi(tableau, desired_bitstring, identity_pauli, identity_pauli) elif errgen_type == 'C': #TODO simplify this logic + if debug: + print(f'{2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1])=}') first_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1]) - second_term = phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) \ - + phi(tableau, desired_bitstring, basis_element_labels[1]*basis_element_labels[0], identity_pauli) - sensitivity = first_term.real - second_term.real + #print(f'C: ({basis_element_labels[0], basis_element_labels[1]}) {phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1])=}', flush=True) + sensitivity = first_term.real + if basis_element_labels[0].commutes(basis_element_labels[1]): + #print(f'C: ({basis_element_labels[0], basis_element_labels[1]}) {phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli)=}', flush=True) + second_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) + if debug: + print(f'{2*phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli)=}') + sensitivity -= second_term.real + + #first_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1]) + #second_term = phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) \ + # + phi(tableau, desired_bitstring, basis_element_labels[1]*basis_element_labels[0], identity_pauli) + #test_sensitivity = first_term.real - second_term.real + #assert abs(sensitivity-test_sensitivity)<1e-10 else: #A - first_term = 2*phi(tableau, desired_bitstring, basis_element_labels[1], basis_element_labels[0]) - second_term = phi(tableau, desired_bitstring, basis_element_labels[1]*basis_element_labels[0], identity_pauli) \ - - phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) - sensitivity = first_term.imag + second_term.imag + first_term = phi(tableau, desired_bitstring, basis_element_labels[1], basis_element_labels[0]) + if not basis_element_labels[0].commutes(basis_element_labels[1]): + second_term = phi(tableau, desired_bitstring, basis_element_labels[1]*basis_element_labels[0], identity_pauli) + sensitivity = 2*((first_term + second_term).imag) + else: + sensitivity = 2*first_term.imag return sensitivity +def alpha_numerical(errorgen, tableau, desired_bitstring): + """ + First-order error generator sensitivity function for probability. This implementation calculates + this quantity numerically, and as such is primarily intended for used as parting of testing + infrastructure. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator label for which to calculate sensitivity. + + tableau : stim.Tableau + Stim Tableau corresponding to the stabilizer state to calculate the sensitivity for. + + desired_bitstring : str + Bit string to calculate the sensitivity for. + """ + + #get the stabilizer state corresponding to the tableau. + stabilizer_state = tableau.to_state_vector(endian='big') + #print(f'{stabilizer_state=}') + stabilizer_state_dmvec = state_to_dmvec(stabilizer_state) + stabilizer_state_dmvec.reshape((len(stabilizer_state_dmvec),1)) + #also get the superoperator (in the standard basis) corresponding to the elementary error generator + if isinstance(errorgen, _LSE): + local_eel = errorgen.to_local_eel() + elif isinstance(errorgen, _GEEL): + local_eel = _LEEL.cast(errorgen) + else: + local_eel = errorgen + + errgen_type = local_eel.errorgen_type + basis_element_labels = local_eel.basis_element_labels + basis_1q = _BuiltinBasis('PP', 4) + errorgen_superop = create_elementary_errorgen_nqudit(errgen_type, basis_element_labels, basis_1q, normalize=False, sparse=False, + tensorprod_basis=False) + + #also need a superbra for the desired bitstring. + desired_bitstring_vec = _np.zeros(2**len(desired_bitstring)) + desired_bitstring_vec[_bitstring_to_int(desired_bitstring)] = 1 + desired_bitstring_dmvec = state_to_dmvec(desired_bitstring_vec) + desired_bitstring_dmvec.reshape((1, len(desired_bitstring_dmvec))) + num_random = random_support(tableau) + scale = 2**(num_random) + + #compute the needed trace inner product. + alpha = _np.real_if_close(scale*(desired_bitstring_dmvec.conj().T@errorgen_superop@stabilizer_state_dmvec)) + + return alpha + +def _bitstring_to_int(bitstring) -> int: + if isinstance(bitstring, str): + # If the input is a string, convert it directly + return int(bitstring, 2) + elif isinstance(bitstring, tuple): + # If the input is a tuple, join the elements to form a string + return int(''.join(bitstring), 2) + else: + raise ValueError("Input must be either a string or a tuple of '0's and '1's") + def first_order_probability_correction(errorgen_dict, tableau, desired_bitstring): """ Compute the first-order correction to the probability of the specified bit string. @@ -6494,9 +6651,146 @@ def first_order_probability_correction(errorgen_dict, tableau, desired_bitstring for i, (lbl, rate) in enumerate(errorgen_dict.items()): alpha_errgen_prods[i] = alpha(lbl, tableau, desired_bitstring)*rate + #print(f'{alpha_errgen_prods=}') correction = scale*sum(alpha_errgen_prods) return correction +def stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, order = 1, truncation_threshold = 1e-14): + """ + Compute the kth-order correction to the probability of the specified bit string. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + tableau : stim.Tableau + Stim tableau corresponding to a particular stabilizer state being measured. + + desired_bitstring : str + String of 0's and 1's corresponding to the output bitstring being measured. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding rates + are below this value. + + Returns + ------- + correction : float + float corresponding to the correction to the output probability for the + desired bitstring induced by the error generator (to specified order). + """ + + num_random = random_support(tableau) + scale = 1/2**(num_random) #TODO: This might overflow + + #do the first order correction separately since it doesn't require composition logic: + #now get the sum over the alphas and the error generator rate products needed. + alpha_errgen_prods = _np.zeros(len(errorgen_dict)) + + + for i, (lbl, rate) in enumerate(errorgen_dict.items()): + if abs(rate) > truncation_threshold: + alpha_errgen_prods[i] = alpha(lbl, tableau, desired_bitstring)*rate + #print(f'{alpha_errgen_prods=}') + correction = scale*_np.sum(alpha_errgen_prods) + #print(f'{correction=}') + if order > 1: + #The order of the approximation determines the combinations of error generators + #which need to be composed. (given by cartesian products of labels in errorgen_dict). + labels_by_order = [list(product(errorgen_dict.keys(), repeat = i+1)) for i in range(1,order)] + #Get a similar structure for the corresponding rates + rates_by_order = [list(product(errorgen_dict.values(), repeat = i+1)) for i in range(1,order)] + #print(f'{labels_by_order=}') + #print(f'{rates_by_order=}') + for current_order, (current_order_labels, current_order_rates) in enumerate(zip(labels_by_order, rates_by_order), start=2): + current_order_scale = 1/factorial(current_order) + composition_results = [] + for label_tup, rate_tup in zip(current_order_labels, current_order_rates): + composition_results.extend(iterative_error_generator_composition(label_tup, rate_tup)) + #print(f'{composition_results=}') + #aggregate together any overlapping terms in composition_results + composition_results_dict = dict() + for lbl, rate in composition_results: + if composition_results_dict.get(lbl,None) is None: + composition_results_dict[lbl] = rate + else: + composition_results_dict[lbl] += rate + #print(f'{composition_results_dict=}') + alpha_errgen_prods = _np.zeros(len(composition_results_dict)) + for i, (lbl, rate) in enumerate(composition_results_dict.items()): + if current_order_scale*abs(rate) > truncation_threshold: + sensitivity = alpha(lbl, tableau, desired_bitstring) + #print(f'{lbl}: alpha(lbl, tableau, desired_bitstring)= {sensitivity}') + alpha_errgen_prods[i] = _np.real_if_close(sensitivity*rate) + #print(f'{alpha_errgen_prods=}') + #print(f'{_np.sum(alpha_errgen_prods)=}') + #print(f'{current_order_scale*scale*_np.sum(alpha_errgen_prods)=}') + correction += current_order_scale*scale*_np.sum(alpha_errgen_prods) + + return correction + + +def iterative_error_generator_composition(errorgen_labels, rates): + """ + Iteratively compute error generator compositions. Each error generator + composition in general returns a list of multiple new error generators, + so this function manages the distribution and recursive application + of the compositions for two-or-more error generator labels. + + Parameters + ---------- + errorgen_labels : tuple of `LocalStimErrorgenLabel` + A tuple of the elementary error generator labels to be composed. + + rates : tuple of float + A tuple of corresponding error generator rates of the same length as the tuple + of error generator labels. + + Returns + ------- + List of tuples, the first element of each tuple is a `LocalStimErrorgenLabel`. + The second element of each tuple is the final rate for that term. + """ + + if len(errorgen_labels) == 1: + return [(errorgen_labels[0], rates[0])] + else: + labels_to_process = [errorgen_labels] + rates_to_process = [rates] + + fully_processed_labels = [] + + while labels_to_process: + new_labels_to_process = [] + new_rates_to_process = [] + #loop through the labels to process + for label_tup, rate_tup in zip(labels_to_process, rates_to_process): + #grab the last two elements of each of these and do the composition. + new_labels_and_rates = error_generator_composition(label_tup[-2], label_tup[-1], rate_tup[-2]*rate_tup[-1]) + for new_label_rate_tup in new_labels_and_rates: + #print(f'{new_label_rate_tup=}') + new_label_tup = label_tup[:-2] + (new_label_rate_tup[0],) + new_rate_tup = rate_tup[:-2] + (new_label_rate_tup[1],) + #print(f'{new_label_tup=}') + #print(f'{new_rate_tup=}') + + if len(new_label_tup) == 1: + fully_processed_labels.append(new_label_rate_tup) + else: + new_labels_to_process.append(new_label_tup) + new_rates_to_process.append(new_rate_tup) + + labels_to_process = new_labels_to_process + rates_to_process = new_rates_to_process + + return fully_processed_labels + def stabilizer_probability(tableau, desired_bitstring): """ Calculate the output probability for the specifed output bitstring. @@ -6520,7 +6814,7 @@ def stabilizer_probability(tableau, desired_bitstring): #compute what Gidney calls the tableau fidelity (which in this case gives the probability). return tableau_fidelity(tableau, bitstring_to_tableau(desired_bitstring)) -def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring): +def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring, order=1, truncation_threshold=1e-14): """ Calculate the approximate probability of a desired bit string using a first-order approximation. @@ -6537,6 +6831,14 @@ def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring desired_bitstring : str String of 0's and 1's corresponding to the output bitstring being measured. + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding error generator rates + are below this value. (Used internally in computation of probability corrections) + Returns ------- p : float @@ -6556,10 +6858,10 @@ def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring errorgen_dict = {_LSE.cast(lbl):val for lbl,val in errorgen_dict.items()} ideal_prob = stabilizer_probability(tableau, desired_bitstring) - first_order_correction = first_order_probability_correction(errorgen_dict, tableau, desired_bitstring) + first_order_correction = stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, order, truncation_threshold) return ideal_prob + first_order_correction -def approximate_stabilizer_probabilities(errorgen_dict, circuit): +def approximate_stabilizer_probabilities(errorgen_dict, circuit, order=1, truncation_threshold=1e-14): """ Calculate the approximate probability distribution over all bitstrings using a first-order approximation. Note the size of this distribtion scales exponentially in the qubit count, so this is very inefficient for @@ -6574,6 +6876,14 @@ def approximate_stabilizer_probabilities(errorgen_dict, circuit): circuit : `Circuit` or `stim.Tableau` A pygsti `Circuit` or a stim.Tableau to compute the output probability for. In either case this should be a Clifford circuit and convertable to a stim.Tableau. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding error generator rates + are below this value. (Used internally in computation of probability corrections) Returns ------- @@ -6595,6 +6905,68 @@ def approximate_stabilizer_probabilities(errorgen_dict, circuit): probs = _np.zeros(2**num_qubits) for i, bitstring in enumerate(bitstrings): - probs[i] = approximate_stabilizer_probability(errorgen_dict, tableau, bitstring) + probs[i] = approximate_stabilizer_probability(errorgen_dict, tableau, bitstring, order, truncation_threshold) + + return probs + +def error_generator_taylor_expansion(errorgen_dict, order = 1, truncation_threshold = 1e-14): + """ + Compute the nth-order taylor expansion for the exponentiation of the error generator described by the input + error generator dictionary. (Excluding the zeroth-order identity). + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding rates + are below this value. + + Returns + ------- + list of dictionaries + List of dictionaries whose keys are error generator labels and whose values are rates (including + whatever scaling comes from order of taylor expansion). Each list corresponds to an order + of the taylor expansion. + """ + + + taylor_order_terms = [dict() for _ in range(order)] + + for lbl, rate in errorgen_dict.items(): + if abs(rate) > truncation_threshold: + taylor_order_terms[0][lbl] = rate + + if order > 1: + #The order of the approximation determines the combinations of error generators + #which need to be composed. (given by cartesian products of labels in errorgen_dict). + labels_by_order = [list(product(errorgen_dict.keys(), repeat = i+1)) for i in range(1,order)] + #Get a similar structure for the corresponding rates + rates_by_order = [list(product(errorgen_dict.values(), repeat = i+1)) for i in range(1,order)] + #print(f'{labels_by_order=}') + #print(f'{rates_by_order=}') + for current_order, (current_order_labels, current_order_rates) in enumerate(zip(labels_by_order, rates_by_order), start=2): + order_scale = 1/factorial(current_order) + composition_results = [] + for label_tup, rate_tup in zip(current_order_labels, current_order_rates): + composition_results.extend(iterative_error_generator_composition(label_tup, rate_tup)) + #print(f'{composition_results=}') + #aggregate together any overlapping terms in composition_results + composition_results_dict = dict() + for lbl, rate in composition_results: + if composition_results_dict.get(lbl,None) is None: + composition_results_dict[lbl] = rate + else: + composition_results_dict[lbl] += rate + #print(f'{composition_results_dict=}') + for lbl, rate in composition_results_dict.items(): + if order_scale*abs(rate) > truncation_threshold: + taylor_order_terms[current_order-1][lbl] = order_scale*rate - return probs \ No newline at end of file + return taylor_order_terms \ No newline at end of file From f46c7e8a43ddc250d8401304d059c9fa0e33c916 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Feb 2025 18:48:06 -0700 Subject: [PATCH 061/102] Minor bugfix The S term phi function could in some cases be a complex 0j instead of a real 0, which mucked with stuff. --- pygsti/tools/errgenproptools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 27ae3c2d3..71b43b612 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -6530,8 +6530,8 @@ def alpha(errorgen, tableau, desired_bitstring, debug=False): sensitivity = 2*phi(tableau, desired_bitstring, basis_element_labels[0], identity_pauli).imag elif errgen_type == 'S': - sensitivity = phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[0]) \ - - phi(tableau, desired_bitstring, identity_pauli, identity_pauli) + sensitivity = (phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[0]) \ + - phi(tableau, desired_bitstring, identity_pauli, identity_pauli)).real elif errgen_type == 'C': #TODO simplify this logic if debug: print(f'{2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1])=}') From 2b9ca213e58d60161bd44631fc0c5b06fef7a185 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Feb 2025 22:23:11 -0700 Subject: [PATCH 062/102] Unit tests for errorgenproptools New test module for functionality in errorgenproptools. There is still a few function missing coverage, so this is just the first stab. --- test/unit/tools/test_errgenproptools.py | 431 ++++++++++++++++++++++++ 1 file changed, 431 insertions(+) create mode 100644 test/unit/tools/test_errgenproptools.py diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py new file mode 100644 index 000000000..9dd904d27 --- /dev/null +++ b/test/unit/tools/test_errgenproptools.py @@ -0,0 +1,431 @@ +import numpy as np +from pygsti.baseobjs import Label, QubitSpace +from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis +from pygsti.algorithms.randomcircuit import create_random_circuit +from pygsti.models.modelconstruction import create_crosstalk_free_model +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as LEEL +from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE +from pygsti.tools import errgenproptools as _eprop +from pygsti.tools.matrixtools import print_mx +from ..util import BaseCase +from itertools import product +import random +import stim +from pygsti.processors import QubitProcessorSpec +from pygsti.errorgenpropagation.errorpropagator_dev import ErrorGeneratorPropagator + +#TODO: BCH approximation, errorgen_layer_to_matrix, stim_pauli_string_less_than + +class ErrgenCompositionCommutationTester(BaseCase): + + def test_errorgen_commutators(self): + #confirm we get the correct analytic commutators by comparing to numerics. + + #create an error generator basis. + errorgen_basis = CompleteElementaryErrorgenBasis('PP', QubitSpace(2), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_lbl_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + #loop through all of the pairs of indices. + errorgen_label_pairs = list(product(errorgen_lbls, repeat=2)) + + #also get a version of this list where the labels are local stim ones + local_stim_errorgen_lbls = [_LSE.cast(lbl) for lbl in errorgen_lbls] + stim_errorgen_label_pairs = list(product(local_stim_errorgen_lbls, repeat=2)) + + #for each pair compute the commutator directly and compute it analytically (then converting it to + #a numeric array) and see how they compare. + for pair1, pair2 in zip(errorgen_label_pairs, stim_errorgen_label_pairs): + numeric_commutator = _eprop.error_generator_commutator_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict) + analytic_commutator = _eprop.error_generator_commutator(pair2[0], pair2[1]) + analytic_commutator_mat = _eprop.errorgen_layer_to_matrix(analytic_commutator, errorgen_lbl_matrix_dict, 2) + + norm_diff = np.linalg.norm(numeric_commutator-analytic_commutator_mat) + if norm_diff > 1e-10: + print(f'Difference in commutators for pair {pair1} is greater than 1e-10.') + print(f'{np.linalg.norm(numeric_commutator-analytic_commutator_mat)=}') + print('numeric_commutator=') + print_mx(numeric_commutator) + + #Decompose the numerical commutator into rates. + for lbl, dual in zip(errorgen_lbls, errorgen_basis.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_commutator) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_commutator=}') + print('analytic_commutator_mat=') + print_mx(analytic_commutator_mat) + raise ValueError() + + def test_errorgen_composition(self): + + #create an error generator basis. + complete_errorgen_basis_2Q = CompleteElementaryErrorgenBasis('PP', QubitSpace(2), default_label_type='local') + complete_errorgen_basis_3Q = CompleteElementaryErrorgenBasis('PP', QubitSpace(3), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls_2Q = complete_errorgen_basis_2Q.labels + errorgen_lbl_matrix_dict_2Q = {lbl: mat for lbl, mat in zip(errorgen_lbls_2Q, complete_errorgen_basis_2Q.elemgen_matrices)} + + #augment testing with random selection of 3Q labels (some commutation relations for C and A terms require a minimum of 3 qubits). + errorgen_lbls_3Q, errorgen_mats_3Q = select_random_items_from_multiple_lists([complete_errorgen_basis_3Q.labels, complete_errorgen_basis_3Q.elemgen_matrices], 1000, seed= 1234) + errorgen_lbl_matrix_dict_3Q = {lbl: mat for lbl, mat in zip(errorgen_lbls_3Q, errorgen_mats_3Q)} + + complete_errorgen_lbl_matrix_dict_3Q = {lbl: mat for lbl, mat in zip(complete_errorgen_basis_3Q.labels, complete_errorgen_basis_3Q.elemgen_matrices)} + + #loop through all of the pairs of indices. + errorgen_label_pairs_2Q = list(product(errorgen_lbls_2Q, repeat=2)) + errorgen_label_pairs_3Q = list(product(errorgen_lbls_3Q, repeat=2)) + + #also get a version of this list where the labels are local stim ones + local_stim_errorgen_lbls_2Q = [_LSE.cast(lbl) for lbl in errorgen_lbls_2Q] + local_stim_errorgen_lbls_3Q = [_LSE.cast(lbl) for lbl in errorgen_lbls_3Q] + + stim_errorgen_label_pairs_2Q = list(product(local_stim_errorgen_lbls_2Q, repeat=2)) + stim_errorgen_label_pairs_3Q = list(product(local_stim_errorgen_lbls_3Q, repeat=2)) + + #for each pair compute the composition directly and compute it analytically (then converting it to + #a numeric array) and see how they compare. + for pair1, pair2 in zip(errorgen_label_pairs_2Q, stim_errorgen_label_pairs_2Q): + numeric_composition = _eprop.error_generator_composition_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict_2Q) + analytic_composition = _eprop.error_generator_composition(pair2[0], pair2[1]) + try: + analytic_composition_mat = _eprop.errorgen_layer_to_matrix(analytic_composition, 2, errorgen_matrix_dict = errorgen_lbl_matrix_dict_2Q) + except KeyError: + print(f'{analytic_composition=}') + norm_diff = np.linalg.norm(numeric_composition-analytic_composition_mat) + if norm_diff > 1e-10: + print(f'Difference in compositions for pair {pair1} is greater than 1e-10.') + print(f'{np.linalg.norm(numeric_composition-analytic_composition_mat)=}') + print('numeric_composition=') + print_mx(numeric_composition) + + #Decompose the numerical composition into rates. + for lbl, dual in zip(complete_errorgen_basis_2Q.labels, complete_errorgen_basis_2Q.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_composition) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_composition=}') + print('analytic_composition_mat=') + print_mx(analytic_composition_mat) + raise ValueError('Numeric and analytic error generator compositions were not found to be identical!') + + for pair1, pair2 in zip(errorgen_label_pairs_3Q, stim_errorgen_label_pairs_3Q): + numeric_composition = _eprop.error_generator_composition_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict_3Q) + analytic_composition = _eprop.error_generator_composition(pair2[0], pair2[1]) + try: + analytic_composition_mat = _eprop.errorgen_layer_to_matrix(analytic_composition, 3, errorgen_matrix_dict = complete_errorgen_lbl_matrix_dict_3Q) + except KeyError: + print(f'{analytic_composition=}') + norm_diff = np.linalg.norm(numeric_composition-analytic_composition_mat) + if norm_diff > 1e-10: + print(f'Difference in compositions for pair {pair1} is greater than 1e-10.') + print(f'{np.linalg.norm(numeric_composition-analytic_composition_mat)=}') + print('numeric_composition=') + print_mx(numeric_composition) + + #Decompose the numerical composition into rates. + for lbl, dual in zip(complete_errorgen_basis_3Q.labels, complete_errorgen_basis_3Q.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_composition) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_composition=}') + print('analytic_composition_mat=') + print_mx(analytic_composition_mat) + raise ValueError('Numeric and analytic error generator compositions were not found to be identical!') + + +class ApproxStabilizerProbTester(BaseCase): + def setUp(self): + num_qubits = 4 + gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] + availability = {'Gcphase':[(0,1), (1,2), (2,3), (3,0)]} + pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) + self.target_model = create_crosstalk_free_model(processor_spec = pspec) + self.circuit = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + self.circuit_alt = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + max_strengths = {1: {'S': .0005, 'H': .0001}, + 2: {'S': .0005, 'H': .0001}} + error_rates_dict = sample_error_rates_dict(pspec, max_strengths, seed=12345) + self.error_model = create_crosstalk_free_model(pspec, lindblad_error_coeffs=error_rates_dict) + self.error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + self.propagated_errorgen_layer = self.error_propagator.propagate_errorgens_bch(self.circuit, bch_order=1) + self.circuit_tableau = self.circuit.convert_to_stim_tableau() + self.circuit_tableau_alt = self.circuit_alt.convert_to_stim_tableau() + + #also create a 3-qubit pspec for making some tests faster. + num_qubits = 4 + gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] + availability = {'Gcphase':[(0,1), (1,2)]} + pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) + self.target_model_3Q = create_crosstalk_free_model(processor_spec = pspec) + self.circuit_3Q = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + self.circuit_tableau_3Q = self.circuit_3Q.convert_to_stim_tableau() + + + def test_random_support(self): + num_random = _eprop.random_support(self.circuit_tableau) + self.assertEqual(num_random, 8) + + #This unit test for tableau fidelity is straight out of Craig Gidney's stackexchange post. + def test_tableau_fidelity(self): + def _assert_correct_tableau_fidelity(u, v): + expected = abs(np.dot(u, np.conj(v)))**2 + ut = stim.Tableau.from_state_vector(u, endian='little') + vt = stim.Tableau.from_state_vector(v, endian='little') + actual = _eprop.tableau_fidelity(ut, vt) + np.testing.assert_allclose(actual, expected, rtol=1e-5) + + s = 0.5**0.5 + _assert_correct_tableau_fidelity([1, 0], [0, 1]) + _assert_correct_tableau_fidelity([1, 0], [1, 0]) + _assert_correct_tableau_fidelity([0, 1], [1, 0]) + _assert_correct_tableau_fidelity([s, s], [s, s]) + _assert_correct_tableau_fidelity([s, s], [s, -s]) + _assert_correct_tableau_fidelity([s, -s], [s, s]) + _assert_correct_tableau_fidelity([s, 1j * s], [s, s]) + _assert_correct_tableau_fidelity([s, s], [s, s]) + _assert_correct_tableau_fidelity([1, 0], [s, s]) + _assert_correct_tableau_fidelity([0, 1], [s, s]) + _assert_correct_tableau_fidelity([1, 0, 0, 0], [0, 0, s, s]) + _assert_correct_tableau_fidelity([0, 0, 1, 0], [0, 0, s, s]) + _assert_correct_tableau_fidelity([0, 0, 1, 0], [0, 0, 1j * s, s]) + for n in range(6): + for _ in range(10): + _assert_correct_tableau_fidelity( + stim.Tableau.random(n).to_state_vector(), + stim.Tableau.random(n).to_state_vector(), + ) + + def test_amplitude_of_state(self): + amp0000 = _eprop.amplitude_of_state(self.circuit_tableau, '0000') + amp1111 = _eprop.amplitude_of_state(self.circuit_tableau, '1111') + + self.assertTrue(abs(amp0000)<1e-7) + self.assertTrue(abs(amp1111 - np.sqrt(.125))<1e-7) + + amp0000 = _eprop.amplitude_of_state(self.circuit_tableau_alt, '0000') + amp1111 = _eprop.amplitude_of_state(self.circuit_tableau_alt, '1111') + + self.assertTrue(abs(amp0000)<1e-7) + self.assertTrue(abs(amp1111 - np.sqrt(.125))<1e-7) + + def test_bitstring_to_tableau(self): + tableau = _eprop.bitstring_to_tableau('1010') + self.assertEqual(tableau, stim.PauliString('XIXI').to_tableau()) + + def test_pauli_phase_update(self): + test_paulis = ['YII', 'ZII', stim.PauliString('XYZ'), stim.PauliString('+iIII')] + test_bitstring = '100' + + correct_phase_updates_standard = [-1j, -1, -1j, 1j] + correct_phase_updates_dual = [1j, -1, 1j, 1j] + correct_output_bitstrings = ['000', '100', '010', '100'] + + for i, test_pauli in enumerate(test_paulis): + phase_update, output_bitstring = _eprop.pauli_phase_update(test_pauli, test_bitstring) + self.assertEqual(phase_update, correct_phase_updates_standard[i]) + self.assertEqual(output_bitstring, correct_output_bitstrings[i]) + + for i, test_pauli in enumerate(test_paulis): + phase_update, output_bitstring = _eprop.pauli_phase_update(test_pauli, test_bitstring, dual=True) + self.assertEqual(phase_update, correct_phase_updates_dual[i]) + self.assertEqual(output_bitstring, correct_output_bitstrings[i]) + + def test_phi(self): + bit_strings_3Q = list(product(['0','1'], repeat=3)) + for bit_string in bit_strings_3Q: + for pauli_1, pauli_2 in product(stim.PauliString.iter_all(3), stim.PauliString.iter_all(3)): + phi_num = _eprop.phi_numerical(self.circuit_tableau_3Q, bit_string, pauli_1, pauli_2) + phi_analytic = _eprop.phi(self.circuit_tableau_3Q, bit_string, pauli_1, pauli_2) + if abs(phi_num-phi_analytic) > 1e-4: + _eprop.phi(self.circuit_tableau_3Q, bit_string, pauli_1, pauli_2, debug=True) + raise ValueError(f'{pauli_1}, {pauli_2}, {bit_string}, {phi_num=}, {phi_analytic=}') + + def test_alpha(self): + bit_strings_3Q = list(product(['0','1'], repeat=3)) + complete_errorgen_basis_3Q = CompleteElementaryErrorgenBasis('PP', QubitSpace(3), default_label_type='local') + for bit_string in bit_strings_3Q: + for lbl in complete_errorgen_basis_3Q.labels: + alpha_num = _eprop.alpha_numerical(lbl, self.circuit_tableau_3Q, bit_string) + assert abs(alpha_num - _eprop.alpha(lbl, self.circuit_tableau_3Q, bit_string)) <1e-4 + + def test_stabilizer_probability_correction(self): + #The corrections testing here will just be integration testing, we'll + #check for correctness with the probability functions instead. + bitstrings = ['0000', '1000'] + orders = [1,2,3] + for bitstring in bitstrings: + for order in orders: + _eprop.stabilizer_probability_correction(self.propagated_errorgen_layer, self.circuit_tableau, bitstring, order) + + #def test_iterative_error_generator_composition(self): + # test_labels = [(_LSE('H', [stim.PauliString('X')]), _LSE('H', [stim.PauliString('X')]), _LSE('H', [stim.PauliString('X')])), + # (_LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')])), + # (_LSE('S', [stim.PauliString('YY')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')]))] + # rates = [(1,1,1), (1,1,1), (1,1,1)] + # + # correct_iterative_compositions = [ [], {_LSE('A', [stim.PauliString('IX'), stim.PauliString('XX')])} + # ] + + def test_approximate_stabilizer_probability(self): + exact_prop_probs = probabilities_errorgen_prop(self.error_propagator, self.target_model, + self.circuit, use_bch=True, bch_order=1) + first_order_diff = exact_prop_probs[1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '0001') + second_order_diff = exact_prop_probs[1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '0001', order=2) + third_order_diff = exact_prop_probs[1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '0001', order=3) + + assert abs(first_order_diff) > abs(second_order_diff) + assert abs(second_order_diff) > abs(third_order_diff) + + first_order_diff = exact_prop_probs[-1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '1111') + second_order_diff = exact_prop_probs[-1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '1111', order=2) + third_order_diff = exact_prop_probs[-1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '1111', order=3) + + assert abs(first_order_diff) > abs(second_order_diff) + assert abs(second_order_diff) > abs(third_order_diff) + + def test_approximate_stabilizer_probabilities(self): + exact_prop_probs = probabilities_errorgen_prop(self.error_propagator, self.target_model, + self.circuit, use_bch=True, bch_order=1) + approx_stab_prob_vec_order_1 = _eprop.approximate_stabilizer_probabilities(self.propagated_errorgen_layer, self.circuit_tableau) + approx_stab_prob_vec_order_2 = _eprop.approximate_stabilizer_probabilities(self.propagated_errorgen_layer, self.circuit_tableau, order=2) + + tvd_order_1 = np.linalg.norm(exact_prop_probs-approx_stab_prob_vec_order_1, ord=1) + tvd_order_2 = np.linalg.norm(exact_prop_probs-approx_stab_prob_vec_order_2, ord=1) + + assert tvd_order_1 > tvd_order_2 + + exact_prop_probs = probabilities_errorgen_prop(self.error_propagator, self.target_model, + self.circuit_alt, use_bch=True, bch_order=1) + approx_stab_prob_vec_order_1 = _eprop.approximate_stabilizer_probabilities(self.propagated_errorgen_layer, self.circuit_tableau_alt) + approx_stab_prob_vec_order_2 = _eprop.approximate_stabilizer_probabilities(self.propagated_errorgen_layer, self.circuit_tableau_alt, order=2) + + tvd_order_1 = np.linalg.norm(exact_prop_probs-approx_stab_prob_vec_order_1, ord=1) + tvd_order_2 = np.linalg.norm(exact_prop_probs-approx_stab_prob_vec_order_2, ord=1) + + assert tvd_order_1 > tvd_order_2 + + def test_error_generator_taylor_expansion(self): + #this is just an integration test atm. + _eprop.error_generator_taylor_expansion(self.propagated_errorgen_layer, order=2) + +class ErrorGenPropUtilsTester(BaseCase): + pass +#helper functions + +def select_random_items_from_multiple_lists(input_lists, num_items, seed=None): + """ + Select a specified number of items at random from multiple lists without replacement. + + Parameters: + input_lists (list of lists): The lists from which to select items. + num_items (int): The number of items to select. + seed (int, optional): The seed for the random number generator. Defaults to None. + + Returns: + list of lists: A list of lists containing the randomly selected items from each input list. + """ + if not input_lists: + raise ValueError("input_lists cannot be empty") + + list_length = len(input_lists[0]) + for lst in input_lists: + if len(lst) != list_length: + raise ValueError("All input lists must have the same length") + + if num_items > list_length: + raise ValueError("num_items cannot be greater than the length of the input lists") + + if seed is not None: + random.seed(seed) + + indices = random.sample(range(list_length), num_items) + + return [[lst[i] for i in indices] for lst in input_lists] + +def sample_error_rates_dict(pspec, strengths, seed=None): + """ + For example: + strengths = {1: {'S':0.001, 'H':0.01}, + 2: {'S':0.01,'H':0.1}} + + The 'S' and 'H' entries in the strengths dictionary give + the maximum possible contribution to the infidelity from a given gate. + """ + qubits = pspec.qubit_labels + errors_rates_dict = {} + for gate, availability in pspec.availability.items(): + n = pspec.gate_num_qubits(gate) + if availability == 'all-edges': + assert(n == 1), "Currently require all 2-qubit gates have a specified availability!" + qubits_for_gate = qubits + else: + qubits_for_gate = availability + for qs in qubits_for_gate: + label = Label(gate, qs) + # First, check if there's a strength specified for this specific gate. + max_stength = strengths.get(label, None) # to get highly biased errors can set generic error rates to be low, then set it to be high for one or two particular gates. + # Next, check if there's a strength specified for all gates with this name + if max_stength is None: + max_stength = strengths.get(gate, None) + # Finally, get error rate for all gates on this number of qubits. + if max_stength is None: + max_stength = strengths[n] + # Sample error rates. + errors_rates_dict[label] = sample_error_rates(max_stength, n, seed) + return errors_rates_dict + +def sample_error_rates(strengths, n, seed = None): + ''' + Samples an error rates dictionary for dependent gates. + ''' + error_rates_dict = {} + + #create a basis to get the basis element labels. + basis = BuiltinBasis('pp', 4**n) + + #set the rng + rng = np.random.default_rng(seed) + + # Sample stochastic error rates. First we sample the overall stochastic error rate. + # Then we sample (and normalize) the individual stochastic error rates + stochastic_strength = strengths['S'] * rng.random() + s_error_rates = rng.random(4 ** n - 1) + s_error_rates = s_error_rates / np.sum(s_error_rates) * stochastic_strength + + hamiltonian_strength = strengths['H'] * rng.random() + h_error_rates = rng.random(4 ** n - 1) + h_error_rates = h_error_rates * np.sqrt(hamiltonian_strength) / np.sqrt(np.sum(h_error_rates**2)) + + error_rates_dict.update({('S', basis.labels[i + 1]): s_error_rates[i] for i in range(4 ** n - 1)}) + error_rates_dict.update({('H', basis.labels[i + 1]): h_error_rates[i] for i in range(4 ** n - 1)}) + + return error_rates_dict + +def probabilities_errorgen_prop(error_propagator, target_model, circuit, use_bch=False, bch_order=1, truncation_threshold=1e-14): + #get the eoc error channel, and the process matrix for the ideal circuit: + if use_bch: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True, use_bch=use_bch, + bch_kwargs={'bch_order':bch_order, + 'truncation_threshold':truncation_threshold}) + else: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True) + ideal_channel = target_model.sim.product(circuit) + #also get the ideal state prep and povm: + ideal_prep = target_model.circuit_layer_operator(Label('rho0'), typ='prep').copy() + ideal_meas = target_model.circuit_layer_operator(Label('Mdefault'), typ='povm').copy() + #calculate the probabilities. + prob_vec = np.zeros(len(ideal_meas)) + for i, effect in enumerate(ideal_meas.values()): + dense_effect = effect.to_dense().copy() + dense_prep = ideal_prep.to_dense().copy() + prob_vec[i] = np.linalg.multi_dot([dense_effect.reshape((1,len(dense_effect))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]) + return prob_vec \ No newline at end of file From 6d584828976a214a1eda2b85813ebb9a4a4bbc64 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Feb 2025 22:27:18 -0700 Subject: [PATCH 063/102] Cleanup unused code and debug statements Remove unused function and debugging/print statements --- pygsti/tools/errgenproptools.py | 104 ++++---------------------------- 1 file changed, 12 insertions(+), 92 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 71b43b612..0c9bbc780 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -6021,7 +6021,7 @@ def errorgen_layer_to_matrix(errorgen_layer, num_qubits, errorgen_matrix_dict=No return mat -#Helper function for doing numeric commutators and compositions. +#Helper functions for doing numeric commutators and compositions. def error_generator_commutator_numerical(errorgen1, errorgen2, errorgen_matrix_dict=None, num_qubits=None): """ @@ -6366,20 +6366,21 @@ def pauli_phase_update(pauli, bitstring, dual=False): return overall_phase, output_bitstring #TODO: This function needs a more evocative name -def phi(tableau, desired_bitstring, P, Q, debug = False): +def phi(tableau, desired_bitstring, P, Q): """ This function computes a quantity whose value is used in expression for the sensitivity of probabilities to error generators. Parameters ---------- tableau : stim.Tableau - A stim Tableau corresponding to the input stabilizer state. - + A stim Tableau corresponding to the input stabilizer state. + desired_bitstring : str A string of zeros and ones corresponding to the bit string being measured. - + P : str or stim.PauliString The first pauli string index. + Q : str or stim.PauliString The second pauli string index. @@ -6390,8 +6391,6 @@ def phi(tableau, desired_bitstring, P, Q, debug = False): #start by getting the pauli string which maps the all-zeros string to the target bitstring. initial_pauli_string = stim.PauliString(''.join(['I' if bit=='0' else 'X' for bit in desired_bitstring])) - if debug: - print(f'{initial_pauli_string=}') #map P and Q to stim.PauliString if needed. if isinstance(P, str): P = stim.PauliString(P) @@ -6401,28 +6400,16 @@ def phi(tableau, desired_bitstring, P, Q, debug = False): #combine this initial pauli string with the two input paulis eff_P = initial_pauli_string*P eff_Q = Q*initial_pauli_string - if debug: - print(f'{eff_P=}') - print(f'{eff_Q=}') + #now get the bit strings which need their amplitudes extracted from the input stabilizer state and get #the corresponding phase corrections. all_zeros = '0'*len(eff_P) phase1, bitstring1 = pauli_phase_update(eff_P, all_zeros, dual=True) phase2, bitstring2 = pauli_phase_update(eff_Q, all_zeros) - if debug: - print(f'{phase1=}') - print(f'{phase2=}') - - print(f'{bitstring1=}') - print(f'{bitstring2=}') - + #get the amplitude of these two bitstrings in the stabilizer state. amp1 = amplitude_of_state(tableau, bitstring1) amp2 = amplitude_of_state(tableau, bitstring2).conjugate() #The second amplitude also needs a complex conjugate applied - - if debug: - print(f'{amp1=}') - print(f'{amp2=}') #now apply the phase corrections. amp1*=phase1 @@ -6502,7 +6489,7 @@ def phi_numerical(tableau, desired_bitstring, P, Q): return phi*scale -def alpha(errorgen, tableau, desired_bitstring, debug=False): +def alpha(errorgen, tableau, desired_bitstring): """ First-order error generator sensitivity function for probability. @@ -6528,28 +6515,15 @@ def alpha(errorgen, tableau, desired_bitstring, debug=False): if errgen_type == 'H': sensitivity = 2*phi(tableau, desired_bitstring, basis_element_labels[0], identity_pauli).imag - elif errgen_type == 'S': sensitivity = (phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[0]) \ - phi(tableau, desired_bitstring, identity_pauli, identity_pauli)).real - elif errgen_type == 'C': #TODO simplify this logic - if debug: - print(f'{2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1])=}') + elif errgen_type == 'C': first_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1]) - #print(f'C: ({basis_element_labels[0], basis_element_labels[1]}) {phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1])=}', flush=True) sensitivity = first_term.real if basis_element_labels[0].commutes(basis_element_labels[1]): - #print(f'C: ({basis_element_labels[0], basis_element_labels[1]}) {phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli)=}', flush=True) second_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) - if debug: - print(f'{2*phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli)=}') sensitivity -= second_term.real - - #first_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1]) - #second_term = phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) \ - # + phi(tableau, desired_bitstring, basis_element_labels[1]*basis_element_labels[0], identity_pauli) - #test_sensitivity = first_term.real - second_term.real - #assert abs(sensitivity-test_sensitivity)<1e-10 else: #A first_term = phi(tableau, desired_bitstring, basis_element_labels[1], basis_element_labels[0]) if not basis_element_labels[0].commutes(basis_element_labels[1]): @@ -6579,7 +6553,6 @@ def alpha_numerical(errorgen, tableau, desired_bitstring): #get the stabilizer state corresponding to the tableau. stabilizer_state = tableau.to_state_vector(endian='big') - #print(f'{stabilizer_state=}') stabilizer_state_dmvec = state_to_dmvec(stabilizer_state) stabilizer_state_dmvec.reshape((len(stabilizer_state_dmvec),1)) #also get the superoperator (in the standard basis) corresponding to the elementary error generator @@ -6619,42 +6592,6 @@ def _bitstring_to_int(bitstring) -> int: else: raise ValueError("Input must be either a string or a tuple of '0's and '1's") -def first_order_probability_correction(errorgen_dict, tableau, desired_bitstring): - """ - Compute the first-order correction to the probability of the specified bit string. - - Parameters - ---------- - errorgen_dict : dict - Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding - rates. - - tableau : stim.Tableau - Stim tableau corresponding to a particular stabilizer state being measured. - - desired_bitstring : str - String of 0's and 1's corresponding to the output bitstring being measured. - - Returns - ------- - correction : float - float corresponding to the correction to the output probability for the - desired bitstring induced by the error generator (to first order). - """ - - num_random = random_support(tableau) - scale = 1/2**(num_random) #TODO: This might overflow - - #now get the sum over the alphas and the error generator rate products needed. - alpha_errgen_prods = [0]*len(errorgen_dict) - - for i, (lbl, rate) in enumerate(errorgen_dict.items()): - alpha_errgen_prods[i] = alpha(lbl, tableau, desired_bitstring)*rate - - #print(f'{alpha_errgen_prods=}') - correction = scale*sum(alpha_errgen_prods) - return correction - def stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, order = 1, truncation_threshold = 1e-14): """ Compute the kth-order correction to the probability of the specified bit string. @@ -6697,23 +6634,18 @@ def stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, for i, (lbl, rate) in enumerate(errorgen_dict.items()): if abs(rate) > truncation_threshold: alpha_errgen_prods[i] = alpha(lbl, tableau, desired_bitstring)*rate - #print(f'{alpha_errgen_prods=}') correction = scale*_np.sum(alpha_errgen_prods) - #print(f'{correction=}') if order > 1: #The order of the approximation determines the combinations of error generators #which need to be composed. (given by cartesian products of labels in errorgen_dict). labels_by_order = [list(product(errorgen_dict.keys(), repeat = i+1)) for i in range(1,order)] #Get a similar structure for the corresponding rates rates_by_order = [list(product(errorgen_dict.values(), repeat = i+1)) for i in range(1,order)] - #print(f'{labels_by_order=}') - #print(f'{rates_by_order=}') for current_order, (current_order_labels, current_order_rates) in enumerate(zip(labels_by_order, rates_by_order), start=2): current_order_scale = 1/factorial(current_order) composition_results = [] for label_tup, rate_tup in zip(current_order_labels, current_order_rates): composition_results.extend(iterative_error_generator_composition(label_tup, rate_tup)) - #print(f'{composition_results=}') #aggregate together any overlapping terms in composition_results composition_results_dict = dict() for lbl, rate in composition_results: @@ -6721,16 +6653,11 @@ def stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, composition_results_dict[lbl] = rate else: composition_results_dict[lbl] += rate - #print(f'{composition_results_dict=}') alpha_errgen_prods = _np.zeros(len(composition_results_dict)) for i, (lbl, rate) in enumerate(composition_results_dict.items()): if current_order_scale*abs(rate) > truncation_threshold: sensitivity = alpha(lbl, tableau, desired_bitstring) - #print(f'{lbl}: alpha(lbl, tableau, desired_bitstring)= {sensitivity}') alpha_errgen_prods[i] = _np.real_if_close(sensitivity*rate) - #print(f'{alpha_errgen_prods=}') - #print(f'{_np.sum(alpha_errgen_prods)=}') - #print(f'{current_order_scale*scale*_np.sum(alpha_errgen_prods)=}') correction += current_order_scale*scale*_np.sum(alpha_errgen_prods) return correction @@ -6774,11 +6701,8 @@ def iterative_error_generator_composition(errorgen_labels, rates): #grab the last two elements of each of these and do the composition. new_labels_and_rates = error_generator_composition(label_tup[-2], label_tup[-1], rate_tup[-2]*rate_tup[-1]) for new_label_rate_tup in new_labels_and_rates: - #print(f'{new_label_rate_tup=}') new_label_tup = label_tup[:-2] + (new_label_rate_tup[0],) new_rate_tup = rate_tup[:-2] + (new_label_rate_tup[1],) - #print(f'{new_label_tup=}') - #print(f'{new_rate_tup=}') if len(new_label_tup) == 1: fully_processed_labels.append(new_label_rate_tup) @@ -6858,8 +6782,8 @@ def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring errorgen_dict = {_LSE.cast(lbl):val for lbl,val in errorgen_dict.items()} ideal_prob = stabilizer_probability(tableau, desired_bitstring) - first_order_correction = stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, order, truncation_threshold) - return ideal_prob + first_order_correction + correction = stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, order, truncation_threshold) + return ideal_prob + correction def approximate_stabilizer_probabilities(errorgen_dict, circuit, order=1, truncation_threshold=1e-14): """ @@ -6949,14 +6873,11 @@ def error_generator_taylor_expansion(errorgen_dict, order = 1, truncation_thresh labels_by_order = [list(product(errorgen_dict.keys(), repeat = i+1)) for i in range(1,order)] #Get a similar structure for the corresponding rates rates_by_order = [list(product(errorgen_dict.values(), repeat = i+1)) for i in range(1,order)] - #print(f'{labels_by_order=}') - #print(f'{rates_by_order=}') for current_order, (current_order_labels, current_order_rates) in enumerate(zip(labels_by_order, rates_by_order), start=2): order_scale = 1/factorial(current_order) composition_results = [] for label_tup, rate_tup in zip(current_order_labels, current_order_rates): composition_results.extend(iterative_error_generator_composition(label_tup, rate_tup)) - #print(f'{composition_results=}') #aggregate together any overlapping terms in composition_results composition_results_dict = dict() for lbl, rate in composition_results: @@ -6964,7 +6885,6 @@ def error_generator_taylor_expansion(errorgen_dict, order = 1, truncation_thresh composition_results_dict[lbl] = rate else: composition_results_dict[lbl] += rate - #print(f'{composition_results_dict=}') for lbl, rate in composition_results_dict.items(): if order_scale*abs(rate) > truncation_threshold: taylor_order_terms[current_order-1][lbl] = order_scale*rate From f4674acd84f0be747bd933eb6282aa60716afb15 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Feb 2025 23:07:51 -0700 Subject: [PATCH 064/102] More unit tests Add more unit tests, main additions are additional tests for `LocalStimErrorgenLabel` --- .../errorgenpropagation/localstimerrorgen.py | 1 - test/unit/objects/test_errorgenpropagation.py | 79 ++++++++----------- test/unit/tools/test_errgenproptools.py | 2 +- 3 files changed, 34 insertions(+), 48 deletions(-) diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index dda77e34c..247fbbcf6 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -1,6 +1,5 @@ from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel as _ElementaryErrorgenLabel, GlobalElementaryErrorgenLabel as _GEEL,\ LocalElementaryErrorgenLabel as _LEEL -from .utilspygstistimtranslator import * import stim import numpy as _np from pygsti.tools import change_basis diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py index 03c3e7731..5ccea2ce1 100644 --- a/test/unit/objects/test_errorgenpropagation.py +++ b/test/unit/objects/test_errorgenpropagation.py @@ -6,12 +6,11 @@ from pygsti.baseobjs import Label, BuiltinBasis, QubitSpace, CompleteElementaryErrorgenBasis from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel from pygsti.tools import errgenproptools as _eprop -from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel +from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.tools.matrixtools import print_mx from itertools import product - - import numpy as np +import stim class ErrorgenPropTester(BaseCase): @@ -58,48 +57,34 @@ def test_approx_propagation_probabilities_BCH(self): self.assertTrue(np.linalg.norm(probabilities_BCH_order_3 - probabilities_forward_simulation, ord=1) < 1e-2) self.assertTrue(np.linalg.norm(probabilities_BCH_order_4 - probabilities_forward_simulation, ord=1) < 1e-2) - def test_errorgen_commutators(self): - #confirm we get the correct analytic commutators by comparing to numerics. - - #create an error generator basis. - errorgen_basis = CompleteElementaryErrorgenBasis('PP', QubitSpace(2), default_label_type='local') - - #use this basis to construct a dictionary from error generator labels to their - #matrices. - errorgen_lbls = errorgen_basis.labels - errorgen_lbl_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} - - #loop through all of the pairs of indices. - errorgen_label_pairs = list(product(errorgen_lbls, repeat=2)) - - #also get a version of this list where the labels are local stim ones - local_stim_errorgen_lbls = [LocalStimErrorgenLabel.cast(lbl) for lbl in errorgen_lbls] - stim_errorgen_label_pairs = list(product(local_stim_errorgen_lbls, repeat=2)) - - #for each pair compute the commutator directly and compute it analytically (then converting it to - #a numeric array) and see how they compare. - for pair1, pair2 in zip(errorgen_label_pairs, stim_errorgen_label_pairs): - numeric_commutator = error_generator_commutator_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict) - analytic_commutator = _eprop.error_generator_commutator(pair2[0], pair2[1]) - analytic_commutator_mat = comm_list_to_matrix(analytic_commutator, errorgen_lbl_matrix_dict, 2) - - norm_diff = np.linalg.norm(numeric_commutator-analytic_commutator_mat) - if norm_diff > 1e-10: - print(f'Difference in commutators for pair {pair1} is greater than 1e-10.') - print(f'{np.linalg.norm(numeric_commutator-analytic_commutator_mat)=}') - print('numeric_commutator=') - print_mx(numeric_commutator) - - #Decompose the numerical commutator into rates. - for lbl, dual in zip(errorgen_lbls, errorgen_basis.elemgen_dual_matrices): - rate = np.trace(dual.conj().T@numeric_commutator) - if abs(rate) >1e-3: - print(f'{lbl}: {rate}') - - print(f'{analytic_commutator=}') - print('analytic_commutator_mat=') - print_mx(analytic_commutator_mat) - raise ValueError() + +class LocalStimErrorgenLabelTester(BaseCase): + def setUp(self): + self.local_eel = LocalElementaryErrorgenLabel('C', ['XX', 'YY']) + self.global_eel = GlobalElementaryErrorgenLabel('C', ['XX', 'YY'], (0,1)) + self.sslbls = [0,1] + self.tableau = stim.PauliString('XI').to_tableau() + + def test_cast(self): + correct_lse = _LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]) + + self.assertEqual(correct_lse, _LSE.cast(self.local_eel)) + self.assertEqual(correct_lse, _LSE.cast(self.global_eel, self.sslbs)) + + def test_to_local_global_eel(self): + lse = _LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]) + + self.assertEqual(lse.to_local_eel(), self.local_eel) + self.assertEqual(lse.to_global_eel(), self.global_eel) + + def test_propagate_error_gen_tableau(self): + lse = _LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]) + propagated_lse = lse.propagate_error_gen_tableau(self.tableau, 1) + self.assertEqual(propagated_lse, (_LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]), -1)) + + lse = _LSE('S', [stim.PauliString('ZI')]) + propagated_lse = lse.propagate_error_gen_tableau(self.tableau, 1) + self.assertEqual(propagated_lse, (_LSE('S', [stim.PauliString('ZI')]), 1)) #Helper Functions: def probabilities_errorgen_prop(error_propagator, target_model, circuit, use_bch=False, bch_order=1, truncation_threshold=1e-14): @@ -219,4 +204,6 @@ def comm_list_to_matrix(comm_list, errorgen_matrix_dict, num_qubits): def error_generator_commutator_numerical(errorgen_1, errorgen_2, errorgen_matrix_dict): return errorgen_matrix_dict[errorgen_1]@errorgen_matrix_dict[errorgen_2] - errorgen_matrix_dict[errorgen_2]@errorgen_matrix_dict[errorgen_1] - + + + diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py index 9dd904d27..60a1eb0a7 100644 --- a/test/unit/tools/test_errgenproptools.py +++ b/test/unit/tools/test_errgenproptools.py @@ -14,7 +14,7 @@ from pygsti.processors import QubitProcessorSpec from pygsti.errorgenpropagation.errorpropagator_dev import ErrorGeneratorPropagator -#TODO: BCH approximation, errorgen_layer_to_matrix, stim_pauli_string_less_than +#TODO: BCH approximation, errorgen_layer_to_matrix, stim_pauli_string_less_than, iterative_error_generator_composition class ErrgenCompositionCommutationTester(BaseCase): From 0f1ca5a490a487210f2a254e9cf936690a655860 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 5 Feb 2025 19:50:16 -0700 Subject: [PATCH 065/102] Clean up unneeded code Remove some code that is no longer needed for reference. --- .../errorpropagator_dev.py | 262 +++--------------- 1 file changed, 38 insertions(+), 224 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index 847e6bf87..a0fdcf66c 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -9,7 +9,8 @@ from .utilserrorgenpropagation import * import copy as _copy from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis -from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrogenLabel +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrogenLabel +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel import pygsti.tools.errgenproptools as _eprop import pygsti.tools.basistools as _bt import pygsti.tools.matrixtools as _mt @@ -17,9 +18,42 @@ class ErrorGeneratorPropagator: - def __init__(self, model, multi_gate_dict=None, bch_order=1,nonmarkovian=False, multi_gate=False): - self.model = model - self.bch_order = bch_order + def __init__(self, model, state_space=None, multi_gate_dict=None, nonmarkovian=False, multi_gate=False): + """ + Initialize an instance of `ErrorGeneratorPropagator`. This class is instantiated with a noise model + and manages operations related to propagating error generators through circuits, and constructing + effective end-of-circuit error generators. + + Parameters + ---------- + model: `OpModel` or dict + If an `OpModel` this model is used to construct error generators for each layer of a circuit + through which error generators are to be propagated. If a dictionary is passed in then this + dictionary should be an error generator coefficient dictionary, with keys that are + `ElementaryErrorgenLabel`s and values that are rates. This dictionary is then used as the + fixed per-circuit error generator independent of the circuit layers. (Dictionary support in development). + + state_space: `StateSpace`, optional (default None) + Only used if specifying a dictionary for `model` whose keys are + `GlobalElementaryErrorgenLabel`s. + """ + if isinstance(model, dict): + #convert this to one where the keys are `LocalStimErrorgenLabel`s. + if isinstance(next(iter(model)), _GlobalElementaryErrorgenLabel): + if state_space is None: + msg = 'When specifying a fixed error generator dictionary as the noise model using keys which are'\ + + '`GlobalElementaryErrorgenLabel` a corresponding `StateSpace` much be specified too.' + raise ValueError(msg) + else: + sslbls = state_space.qubit_labels + lse_dict = {_LSE.cast(lbl, sslbls): rate for lbl, rate in model.items()} + elif isinstance(next(iter(model)), _LocalElementaryErrogenLabel): + lse_dict = {_LSE.cast(lbl): rate for lbl, rate in model.items()} + else: + lse_dict = model + self.model = lse_dict + else: + self.model = model def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, use_bch=False, bch_kwargs=None, mx_basis='pp'): @@ -525,81 +559,6 @@ def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, includ fully_propagated_layers.extend(errorgen_layers[stopping_idx:]) return fully_propagated_layers - #TODO: Add an option to return the results with the different BCH order combined. - #def _propagate_errorgen_layers_bch(self, errorgen_layers, propagation_layers, bch_order=1, include_spam=True, truncation_threshold=1e-14): - # """ - # Propagates the error generator layers through each of the corresponding propagation layers - # (i.e. the clifford operations for the remainder of the circuit). In this version we - # perform a layerwise application of the BCH approximation following each propagation to - # recombine the propaged error generator layer with the layer proceeding it before each - # successive propagation step. -# - # Parameters - # ---------- - # errorgen_layers : list of lists of dicts - # Each sublist corresponds to a circuit layer, with these sublists containing dictionaries - # of the error generator coefficients and rates for a circuit layer. Each dictionary corresponds - # to a different order of the BCH approximation (when not using the BCH approximation this list will - # be length 1). The error generator coefficients are represented using LocalStimErrorgenLabel. -# - # propagation_layers : list of `stim.Tableau` - # A list of `stim.Tableau` objects, each corresponding to a cumulative product of - # ideal Clifford operations for a set of circuit layers, each corresponding to a layer - # of operations which we will be propagating error generators through. -# - # bch_order : int, optional (default 1) - # Order of the BCH approximation to use. - # - # include_spam : bool, optional (default True) - # If True then include the error generators for state preparation and measurement - # are included in errogen_layers, and the state preparation error generator should - # be propagated through (the measurement one is simply appended at the end). -# - # truncation_threshold : float, optional (default 1e-14) - # Threshold below which any error generators with magnitudes below this value - # are truncated during the BCH approximation. - # - # Returns - # ------- - # fully_propagated_layer : dict - # Dictionart corresponding to the results of having propagated each of the error generator - # layers through the circuit to the end while combining the layers in a layerwise fashion - # using the BCH approximation. - # """ - # #TODO: Refactor this and _propagate_errorgen_layers to reduce code repetition as their current - # #implementations are very close to each other. - # #initialize a variable as temporary storage of the result - # #of performing BCH on pairwise between a propagated errorgen - # #layer and an unpropagated layer for layerwise BCH. - # if len(errorgen_layers)>0: - # combined_err_layer = errorgen_layers[0] -# - # #the stopping index in errorgen_layers will depend on whether the measurement error - # #generator is included or not. - # if include_spam: - # stopping_idx = len(errorgen_layers)-2 - # else: - # stopping_idx = len(errorgen_layers)-1 -# - # for i in range(stopping_idx): - # #err_layer = errorgen_layers[i] - # prop_layer = propagation_layers[i] - # new_error_dict = dict() - # #iterate through dictionary of error generator coefficients and propagate each one. - # for errgen_coeff_lbl in combined_err_layer: - # propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, combined_err_layer[errgen_coeff_lbl]) - # new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] - # #next use BCH to combine new_err_layer with the now adjacent layer of errorgen_layers[i+1] - # #combined_err_layer = _eprop.bch_approximation(new_error_dict, errorgen_layers[i+1], bch_order=bch_order, truncation_threshold=truncation_threshold) - # combined_err_layer = _eprop.bch_approximation(errorgen_layers[i+1], new_error_dict, bch_order=bch_order, truncation_threshold=truncation_threshold) - # - # #If we are including spam then there will be one last error generator which we doesn't have an associated propagation - # #which needs to be combined using BCH. - # if include_spam: - # #combined_err_layer = _eprop.bch_approximation(combined_err_layer, errorgen_layers[-1], bch_order=bch_order, truncation_threshold=truncation_threshold) - # combined_err_layer = _eprop.bch_approximation(errorgen_layers[-1], combined_err_layer, bch_order=bch_order, truncation_threshold=truncation_threshold) -# - # return combined_err_layer def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp', return_dense=False): """ @@ -737,151 +696,6 @@ def InvertedNumericMap(errorMap,errorValues): return numeric_map -def ErrorPropagator(circ,errorModel,multi_gate_dict=None,bch_order=1,bch_layerwise=False, - nonmarkovian=False,multi_gate=False,error_layer_def=False): - if multi_gate and multi_gate_dict is None: - multi_gate_dict=dict() - stim_dict=standard_gatenames_stim_conversions() - if multi_gate: - for key in multi_gate_dict: - stim_dict[key]=stim_dict[multi_gate_dict[key]] - stim_layers=circ.convert_to_stim_tableau_layers(gate_name_conversions=stim_dict) - stim_layers.pop(0) #Immediatly toss the first layer because it is not important, - - propagation_layers=[] - if not bch_layerwise or nonmarkovian: - while len(stim_layers) != 0: - top_layer=stim_layers.pop(0) - for layer in stim_layers: - top_layer = layer*top_layer - propagation_layers.append(top_layer) - else: - propagation_layers = stim_layers - - if not error_layer_def: - errorLayers=buildErrorlayers(circ,errorModel, len(circ.line_labels)) - else: - errorLayers=[[[_copy.deepcopy(eg) for eg in errorModel]] for i in range(circ.depth)] - - num_error_layers=len(errorLayers) - - fully_propagated_layers=[] - for _ in range(0,num_error_layers-1): - err_layer=errorLayers.pop(0) - layer=propagation_layers.pop(0) - new_error_layer=[] - for err_order in err_layer: - new_error_dict=dict() - for key in err_order: - propagated_error_gen=key.propagate_error_gen_tableau(layer,err_order[key]) - new_error_dict[propagated_error_gen[0]]=propagated_error_gen[1] - new_error_layer.append(new_error_dict) - if bch_layerwise and not nonmarkovian: - following_layer = errorLayers.pop(0) - new_errors=BCH_Handler(err_layer,following_layer,bch_order) #This should be new_error_layer as the first arg. - errorLayers.insert(new_errors,0) - else: - fully_propagated_layers.append(new_error_layer) - - fully_propagated_layers.append(errorLayers.pop(0)) - - - if bch_layerwise and not nonmarkovian: - final_error=dict() - for order in errorLayers[0]: - for error in order: - if error in final_error: - final_error[error]=final_error[error]+order[error] - else: - final_error[error]=order[error] - return final_error - - elif not bch_layerwise and not nonmarkovian: - simplified_EOC_errors=dict() - if bch_order == 1: - for layer in fully_propagated_layers: - for order in layer: - for error in order: - if error in simplified_EOC_errors: - simplified_EOC_errors[error]=simplified_EOC_errors[error]+order[error] - else: - simplified_EOC_errors[error]=order[error] - - else: - Exception("Higher propagated through Errors are not Implemented Yet") - return simplified_EOC_errors - - else: - return fully_propagated_layers - - - -def buildErrorlayers(circ,errorDict,qubits): - ErrorGens=[] - #For the jth layer of each circuit - for j in range(circ.depth): - l = circ.layer(j) # get the layer - errorLayer=dict() - for _, g in enumerate(l): # for gate in layer l - gErrorDict = errorDict[g.name] #get the errors for the gate - p1=qubits*'I' # make some paulis why? - p2=qubits*'I' - for errs in gErrorDict: #for an error in the accompanying error dictionary - errType=errs[0] - paulis=[] - for ind,el in enumerate(g): #enumerate the gate ind =0 is name ind = 1 is first qubit ind = 2 is second qubit - if ind !=0: #if the gate element of concern is not the name - p1=p1[:el] + errs[1][ind-1] +p1[(el+1):] - - paulis.append(stim.PauliString(p1)) - if errType in "CA": - for ind,el in enumerate(g): - if ind !=0: - p2=p2[:el] + errs[2][ind-1] +p2[(el+1):] - paulis.append(stim.PauliString(p2)) - errorLayer[_LSE(errType,paulis)]=gErrorDict[errs] - ErrorGens.append([errorLayer]) - return ErrorGens -''' - -Inputs: -_______ -err_layer (list of dictionaries) -following_layer (list of dictionaries) -bch_order: - -''' -def BCH_Handler(err_layer,following_layer,bch_order): - new_errors=[] - for curr_order in range(0,bch_order): - working_order=dict() - #add first order terms into new layer - if curr_order == 0: - for error_key in err_layer[curr_order]: - working_order[error_key]=err_layer[curr_order][error_key] - for error_key in following_layer[curr_order]: - working_order[error_key]=following_layer[curr_order[error_key]] - new_errors.append(working_order) - - elif curr_order ==1: - working_order={} - for error1 in err_layer[curr_order-1]: - for error2 in following_layer[curr_order-1]: - errorlist = commute_errors(error1,error2,BCHweight=1/2*err_layer[error1]*following_layer[error2]) - for error_tuple in errorlist: - working_order[error_tuple[0]]=error_tuple[1] - if len(err_layer)==2: - for error_key in err_layer[1]: - working_order[error_key]=err_layer[1][error_key] - if len(following_layer)==2: - for error_key in following_layer[1]: - working_order[error_key]=following_layer[1][error_key] - new_errors.append(working_order) - - else: - Exception("Higher Orders are not Implemented Yet") - return new_errors - # There's a factor of a half missing in here. def nm_propagators(corr, Elist,qubits): Kms = [] From 8a9c0f507890208031afa76be4d1d10da3cad58d Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 5 Feb 2025 19:53:52 -0700 Subject: [PATCH 066/102] More Clean Up Remove files containing older versions of the error generator propagation code that are no longer needed. --- .../errordict_deprecated.py | 10 - pygsti/errorgenpropagation/errorpropagator.py | 239 ----------- .../errorpropagator_dev.py | 1 - .../propagatableerrorgen.py | 379 ------------------ .../utilserrorgenpropagation.py | 196 --------- .../utilspygstistimtranslator.py | 68 ---- 6 files changed, 893 deletions(-) delete mode 100644 pygsti/errorgenpropagation/errordict_deprecated.py delete mode 100644 pygsti/errorgenpropagation/errorpropagator.py delete mode 100644 pygsti/errorgenpropagation/propagatableerrorgen.py delete mode 100644 pygsti/errorgenpropagation/utilserrorgenpropagation.py delete mode 100644 pygsti/errorgenpropagation/utilspygstistimtranslator.py diff --git a/pygsti/errorgenpropagation/errordict_deprecated.py b/pygsti/errorgenpropagation/errordict_deprecated.py deleted file mode 100644 index 95dadea0b..000000000 --- a/pygsti/errorgenpropagation/errordict_deprecated.py +++ /dev/null @@ -1,10 +0,0 @@ -from pygsti.extras.errorgenpropagation.propagatableerrorgen import propagatableerrorgen -from numpy import complex128 - -class errordict(dict): - - def __setitem__(self, __key: any, __value: any) -> None: - if __key in self : - super().__setitem__(__key,self[__key]+__value) - else: - super().__setitem__(__key,__value) \ No newline at end of file diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py deleted file mode 100644 index f58115d3e..000000000 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ /dev/null @@ -1,239 +0,0 @@ -import stim -from pygsti.extras.errorgenpropagation.propagatableerrorgen import * -from pygsti.extras.errorgenpropagation.utilspygstistimtranslator import * -from numpy import abs,zeros, complex128 - -from numpy.linalg import multi_dot -from scipy.linalg import expm -from pygsti.tools.internalgates import standard_gatenames_stim_conversions - - -''' -takes a pygsti circuit where each gate has a defined error model and returns the errorgenerators necessary to create an -end of circuit error generator under a variety of scenarios - -circ: pygsti circuit -errorModel: Dictionary defined the small markovian error generators and their rates for each gate -BCHOrder: in cases where the BCH approximation is used, carries it out to the desired order (can currently only handle order 1 or 2) -BCHLayerWise: If true will push the errors through one layer of gatesand then combines them using the bch approximation at each layer -If false will simply push all errors to the end -NonMarkovian: Pushes the error generators to the end and then formats them to work with the cumulant expansion code -MultiGateDict: Containts the translation between a numbered gate Gxpi22 and the PyGSTi standard gate used when a singular gate has -multiple error iterations -MultiGate: lets the code know -returns: list of propagatableerrorgens -''' -def ErrorPropagator(circ,errorModel,MultiGateDict={},BCHOrder=1,BCHLayerwise=False,NonMarkovian=False,MultiGate=False,ErrorLayerDef=False): - stim_dict=standard_gatenames_stim_conversions() - if MultiGate: - for key in MultiGateDict: - stim_dict[key]=stim_dict[MultiGateDict[key]] - stim_layers=circ.convert_to_stim_tableau_layers(gate_name_conversions=stim_dict) - stim_layers.pop(0) #Immeditielty toss the first layer because it is not important, - - propagation_layers=[] - if not BCHLayerwise or NonMarkovian: - while len(stim_layers) != 0: - top_layer=stim_layers.pop(0) - for layer in stim_layers: - top_layer = layer*top_layer - propagation_layers.append(top_layer) - else: - propagation_layers = stim_layers - - if not ErrorLayerDef: - errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) - else: - errorLayers=[[errorModel]]*circ.depth #this doesn't work - - - num_error_layers=len(errorLayers) - fully_propagated_layers=[] - for _ in range(0,num_error_layers-1): - err_layer=errorLayers.pop(0) - layer=propagation_layers.pop(0) - for err_order in err_layer: - for errorGenerator in err_order: - errorGenerator.propagate_error_gen_inplace_tableau(layer) - - if BCHLayerwise and not NonMarkovian: - following_layer = errorLayers.pop(0) - new_errors=BCH_Handler(err_layer,following_layer,BCHOrder) - errorLayers.insert(new_errors,0) - else: - fully_propagated_layers.append(err_layer) - - fully_propagated_layers.append(errorLayers.pop(0)) - if BCHLayerwise and not NonMarkovian: - for order in errorLayers: - for error in order: - if len(fully_propagated_layers)==0: - fully_propagated_layers.append(error) - elif error in fully_propagated_layers: - idy=fully_propagated_layers.index(error) - new_error=error+fully_propagated_layers[idy] - fully_propagated_layers.pop(idy) - fully_propagated_layers.append(new_error) - else: - fully_propagated_layers.append(error) - return fully_propagated_layers - - elif not BCHLayerwise and not NonMarkovian: - simplified_EOC_errors=[] - if BCHOrder == 1: - for layer in fully_propagated_layers: - for order in layer: - for error in order: - if len(simplified_EOC_errors)==0: - simplified_EOC_errors.append(error) - elif error in simplified_EOC_errors: - idy=simplified_EOC_errors.index(error) - new_error=error+simplified_EOC_errors[idy] - simplified_EOC_errors.pop(idy) - if not (abs(new_error.get_Error_Rate()) <.000001): - simplified_EOC_errors.append(new_error) - else: - if not (abs(error.get_Error_Rate())<.000001): - simplified_EOC_errors.append(error) - else: - Exception("Higher propagated through Errors are not Implemented Yet") - return simplified_EOC_errors - - else: - return fully_propagated_layers - - -''' -takes two error layers (list of propagatableerrorgens) and find the bch combination of the two -err_layer: list lists of propagatableerrorgens -following_layer: list of propagatableerrorgens -BCHOrder: Order to carry the bch expansion out to, can currently be set to one or two -returns list of lists of propagatableerrorgens. The outer list contains each of them individual list denote order -''' -def BCH_Handler(err_layer,following_layer,BCHOrder): - new_errors=[] - for curr_order in range(0,BCHOrder): - working_order=[] - if curr_order == 0: - used_indexes=[] - for error in err_layer[curr_order]: - try: - idy=following_layer[curr_order].index(error) - working_order.append(error+following_layer[curr_order][idy]) - used_indexes.append(idy) - except: - working_order.append(error) - for idy,error in enumerate(following_layer[curr_order]): - if idy in used_indexes: - continue - else: - working_order.append(error) - - new_errors.append(working_order) - elif curr_order ==1: - working_order=[] - for error1 in err_layer[curr_order-1]: - for error2 in following_layer[curr_order-1]: - errorlist = commute_errors(error1,error2,BCHweight=1/2) - for error3 in errorlist: - if len(working_order)==0: - working_order.append(error3) - elif error3 in working_order: - idy=working_order.index(error3) - new_error=error3+working_order[idy] - working_order.pop(idy) - working_order.append(new_error) - else: - working_order.append(error3) - if len(err_layer)==2: - for error3 in err_layer[1]: - if len(working_order)==0: - working_order.append(error3) - elif error3 in working_order: - idy=working_order.index(error3) - new_error=error3+working_order[idy] - working_order.pop(idy) - working_order.append(new_error) - else: - working_order.append(error3) - if len(following_layer)==2: - for error3 in following_layer[1]: - if len(working_order)==0: - working_order.append(error3) - elif error3 in working_order: - idy=working_order.index(error3) - new_error=error3+working_order[idy] - working_order.pop(idy) - if new_error.get_Error_Rate() != 0j: - working_order.append(new_error) - else: - working_order.append(error3) - new_errors.append(working_order) - - else: - Exception("Higher Orders are not Implemented Yet") - - -''' -takes a pygst circuit object and error Dictionary and creates error layers - -inputs -circ: pygsti circuit -errorDict: Dictionary defined the small markovian error generators and their rates for each gate -qubits: number of qubits in the circuit - -output -ErrorGens, a list of error gen layers (which are list of propagatable errorgens) - -''' -def buildErrorlayers(circ,errorDict,qubits): - ErrorGens=[] - #For the jth layer of each circuit - for j in range(circ.depth): - l = circ.layer(j) # get the layer - errorLayer=[] - for _, g in enumerate(l): # for gate in layer l - gErrorDict = errorDict[g.name] #get the errors for the gate - p1=qubits*'I' # make some paulis why? - p2=qubits*'I' - for errs in gErrorDict: #for an error in the accompanying error dictionary - errType=errs[0] - paulis=[] - for ind,el in enumerate(g): #enumerate the gate ind =0 is name ind = 1 is first qubit ind = 2 is second qubit - if ind !=0: #if the gate element of concern is not the name - p1=p1[:el] + errs[1][ind-1] +p1[(el+1):] - - paulis.append(p1) - if errType in "CA": - for ind,el in enumerate(g): - if ind !=0: - p2=p2[:el] + errs[2][ind-1] +p2[(el+1):] - paulis.append(p2) - errorLayer.append(propagatableerrorgen(errType,paulis,gErrorDict[errs])) - ErrorGens.append([errorLayer]) - - return ErrorGens - - - -# There's a factor of a half missing in here. -def nm_propagators(corr, Elist,qubits): - Kms = [] - for idm in range(len(Elist)): - Am=zeros([4**qubits,4**qubits],dtype=complex128) - for idmm in range(len(Elist[idm][0])): - Am += Elist[idm][0][idmm].toWeightedErrorBasisMatrix() - # This assumes that Elist is in reverse chronological order - partials = [] - for idn in range(idm, len(Elist)): - An=zeros([4**qubits,4**qubits],dtype=complex128) - for idnn in range(len(Elist[idn][0])): - An = Elist[idn][0][idnn].toWeightedErrorBasisMatrix() - partials += [corr[idm,idn] * Am @ An] - partials[0] = partials[0]/2 - Kms += [sum(partials,0)] - return Kms - -def averaged_evolution(corr, Elist,qubits): - Kms = nm_propagators(corr, Elist,qubits) - return multi_dot([expm(Km) for Km in Kms]) \ No newline at end of file diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator_dev.py index a0fdcf66c..88c742f49 100644 --- a/pygsti/errorgenpropagation/errorpropagator_dev.py +++ b/pygsti/errorgenpropagation/errorpropagator_dev.py @@ -6,7 +6,6 @@ from numpy.linalg import multi_dot from scipy.linalg import expm from pygsti.tools.internalgates import standard_gatenames_stim_conversions -from .utilserrorgenpropagation import * import copy as _copy from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrogenLabel diff --git a/pygsti/errorgenpropagation/propagatableerrorgen.py b/pygsti/errorgenpropagation/propagatableerrorgen.py deleted file mode 100644 index 239446d0e..000000000 --- a/pygsti/errorgenpropagation/propagatableerrorgen.py +++ /dev/null @@ -1,379 +0,0 @@ -from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel -from .utilspygstistimtranslator import * -import stim -from numpy import array,kron -from pygsti.tools import change_basis -from pygsti.tools.lindbladtools import create_elementary_errorgen -''' -Similar to errorgenlabel but has an errorrate included as well as additional classes -''' -# Create a new pygsti-ish method where we use a modified dictionary and a modified local error generator where the keys are -# stim PauliStrings -class propagatableerrorgen(ElementaryErrorgenLabel): - ''' - Labels an elementary errorgen by a type, pauli and error rate - ''' - - @classmethod - def cast(cls, obj, sslbls=None, identity_label='I'): - raise NotImplementedError("TODO: Implement casts for this method") - - - ''' - Initiates the errorgen object - Inputs - errorgen_type: charecture can be set to 'H' Hamiltonian, 'S' Stochastic, 'C' Correlated or 'A' active following the conventions - of the taxonomy of small markovian errorgens paper - - Outputs: - propagatableerrorgen object - ''' - def __init__(self,errorgen_type,basis_element_labels,error_rate): - self.errorgen_type=str(errorgen_type) - self.basis_element_labels=tuple(basis_element_labels) - self.error_rate=error_rate - - ''' - hashes the error gen object - ''' - def __hash__(self): - return hash((self.errorgen_type,self.basis_element_labels)) - - ''' - checks and if two error gens have the same type and labels - ''' - def __eq__(self, other): - return (self.errorgen_type == other.errorgen_type - and self.basis_element_labels == other.basis_element_labels) - - ''' - displays the errorgens as strings - ''' - def __str__(self): - return self.errorgen_type + "(" + ",".join(map(str, self.basis_element_labels)) + ")" + ": " + str(self.error_rate) - - - def __repr__(self): - return str((self.errorgen_type, self.basis_element_labels, self.error_rate)) - - ''' - adds the error rates together oftwo error generators of the same type and label - ''' - def __add__(self,other): - if self.errorgen_type == other.errorgen_type and self.basis_element_labels == other.basis_element_labels: - return propagatableerrorgen(self.errorgen_type,self.basis_element_labels,self.error_rate + other.error_rate) - else: - raise Exception("ErrorGens are not equal") - - ''' - returns the dictionary representation of the error generator inline with pygsti notation - ''' - def to_dict(self): - return {self: self.error_rate} - - - ''' - returns the error rate - ''' - def get_Error_Rate(self): - return self.error_rate - - ''' - returns the string representation of the first pauli label - ''' - def getP1(self): - return self.basis_element_labels[0] - - ''' - returns the string representation of the second pauli label - ''' - def getP2(self): - return self.basis_element_labels[1] - - def ErrorWeight(self): - def Weight(pauli): - weight=0 - for char in pauli: - if char == 'I': - continue - else: - weight+=1 - return weight - if len(self.basis_element_labels)==1 or Weight(self.basis_element_labels[0]) > Weight(self.basis_element_labels[1]): - return Weight(self.basis_element_labels[0]) - else: - return Weight(self.basis_element_labels[1]) - - - ''' - propagates a propagatableerrorgen object through a clifford layer, returns the created error gen - ''' - def propagate_error_gen_inplace(self, player): - slayer = pyGSTiLayer_to_stimLayer(player) - new_basis_labels = [] - weightmod = 1 - for pauli in self.basis_element_labels: - temp=pyGSTiPauli_2_stimPauli(pauli) - temp = slayer(temp) - weightmod=weightmod*temp.sign - new_basis_labels.append(stimPauli_2_pyGSTiPauli(temp)) - - if self.errorgen_type in 'HCA': - self.error_rate=self.error_rate*weightmod - self.basis_element_labels =tuple(new_basis_labels) - - ''' - using stim propagates the associated pauli labels through a stim tableu object, the object is modified inplace - ''' - def propagate_error_gen_inplace_tableau(self, slayer): - new_basis_labels = [] - weightmod = 1 - for pauli in self.basis_element_labels: - temp=pyGSTiPauli_2_stimPauli(pauli) - temp = slayer(temp) - weightmod=weightmod*temp.sign - new_basis_labels.append(stimPauli_2_pyGSTiPauli(temp)) - if self.errorgen_type in 'HCA': - self.error_rate=self.error_rate*weightmod - self.basis_element_labels =tuple(new_basis_labels) - - - - ''' - returns the strings representing the pauli labels in the pygsti representation of paulis as stim PauliStrings - ''' - def returnStimPaulis(self): - paulis_string=[] - for pauli in self.basis_element_labels: - paulis_string.append(stim.PauliString(pauli)) - return tuple(paulis_string) - - ''' - Returns the errorbasis matrix for the associated errorgenerator mulitplied by its error rate - - input: A pygsti defined matrix basis by default can be pauli-product, gellmann 'gm' or then pygsti standard basis 'std' - functions defaults to pauli product if not specified - ''' - def toWeightedErrorBasisMatrix(self,matrix_basis='pp'): - PauliDict={ - 'I' : array([[1.0,0.0],[0.0,1.0]]), - 'X' : array([[0.0j, 1.0+0.0j], [1.0+0.0j, 0.0j]]), - 'Y' : array([[0.0, -1.0j], [1.0j, 0.0]]), - 'Z' : array([[1.0, 0.0j], [0.0j, -1.0]]) - } - paulis=[] - for paulistring in self.basis_element_labels: - for idx,pauli in enumerate(paulistring): - if idx == 0: - pauliMat = PauliDict[pauli] - else: - pauliMat=kron(pauliMat,PauliDict[pauli]) - paulis.append(pauliMat) - if self.errorgen_type in 'HS': - return self.error_rate*change_basis(create_elementary_errorgen(self.errorgen_type,paulis[0]),'std',matrix_basis) - else: - return self.error_rate*change_basis(create_elementary_errorgen(self.errorgen_type,paulis[0],paulis[1]),'std',matrix_basis) - - - - - -''' -Returns the Commutator of two errors -''' -def commute_errors(ErG1,ErG2, weightFlip=1.0, BCHweight=1.0): - def com(p1,p2): - P1 = pyGSTiPauli_2_stimPauli(p1) - P2=pyGSTiPauli_2_stimPauli(p2) - P3=P1*P2-P2*P1 - return (P3.weight,stimPauli_2_pyGSTiPauli(P3)) - - def acom(p1,p2): - P1 = pyGSTiPauli_2_stimPauli(p1) - P2=pyGSTiPauli_2_stimPauli(p2) - P3=P1*P2+P2*P1 - return (P3.weight,stimPauli_2_pyGSTiPauli(P3)) - - def labelMultiply(p1,p2): - P1 = pyGSTiPauli_2_stimPauli(p1) - P2=pyGSTiPauli_2_stimPauli(p2) - P3=P1*P2 - return (P3.weight,stimPauli_2_pyGSTiPauli(P3)) - - errorGens=[] - - wT=ErG1.getWeight()*ErG2.getWeight()*weightFlip*BCHweight - - if ErG1.getType()=='H' and ErG2.getType()=='H': - pVec=com(ErG1.getP1() , ErG2.getP2()) - errorGens.append( propagatableerrorgen( 'H' , [pVec[1]] , -1j*wT *pVec[0] ) ) - - elif ErG1.getType()=='H' and ErG2.getType()=='S': - pVec=com(ErG2.getP1() , ErG1.getP1()) - errorGens.append( propagatableerrorgen( 'C' , [ErG2.getP1() , pVec[1]] , 1j*wT*pVec[0] ) ) - - elif ErG1.getType()=='S' and ErG2.getType()=='H': - pVec=com(ErG2.getP1() , ErG1.getP1()) - errorGens.append( propagatableerrorgen( 'C' , [ErG2.getP1() , pVec[1]] , -1j*wT *pVec[0] ) ) - - elif ErG1.getType()=='H' and ErG2.getType()=='C': - pVec1=com(ErG2.getP1() , ErG1.getP1()) - errorGens.append( propagatableerrorgen('C' , [pVec1[1], ErG2.getP2()] , 1j*wT*pVec1[0] ) ) - pVec2=com(ErG2.getP2() , ErG1.getP1()) - errorGens.append( propagatableerrorgen('C' , [pVec2[1] , ErG2.getP1()] , 1j*wT*pVec2[0] ) ) - - elif ErG1.getType()=='C' and ErG2.getType()=='H': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType()=='H' and ErG2.getType()=='A': - pVec1 = com(ErG1.getP1() , ErG2.getP1()) - errorGens.append( propagatableerrorgen('A' , [pVec1[1] , ErG2.getP2()] , -1j*wT*pVec1[0]) ) - pVec2 = com(ErG1.getP1() , ErG2.getP2()) - errorGens.append( propagatableerrorgen('A' , [ErG2.getP1(), pVec2[1]] , -1j*wT*pVec2[0] ) ) - - elif ErG1.getType()=='A' and ErG2.getType()=='H': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType()=='S' and ErG2.getType()=='S': - errorGens.append( propagatableerrorgen('H', ErG1.getP1(),0 )) - - elif ErG1.getType()=='S' and ErG2.getType()=='C': - pVec1=labelMultiply(ErG1.getP1() , ErG2.getP1()) - pVec2=labelMultiply(ErG2.getP2() , ErG1.getP1()) - errorGens.append( propagatableerrorgen( 'A' , [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = labelMultiply(ErG1.getP1() , ErG2.getP2()) - pVec2 = labelMultiply(ErG2.getP1() , ErG1.getP1()) - errorGens.append( propagatableerrorgen( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1 =acom(ErG2.getP1(), ErG2.getP2()) - pVec2 = labelMultiply(pVec1[1],ErG1.getP1()) - errorGens.append( propagatableerrorgen( 'A' ,[pVec2[1], ErG1.getP1()] , -1j*.5*wT*pVec1[0]*pVec2[0])) - pVec1=acom(ErG2.getP1(), ErG2.getP2()) - pVec2=labelMultiply(ErG1.getP1(),pVec1[1]) - errorGens.append( propagatableerrorgen( 'A', [ErG1.getP1() ,pVec2[1]],-1j*.5*wT*pVec1[0]*pVec2[0])) - - elif ErG1.getType() == 'C' and ErG2.getType() == 'S': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType() == 'S' and ErG2.getType() == 'A': - pVec1 =labelMultiply(ErG1.getP1() , ErG2.getP1()) - pVec2=labelMultiply(ErG2.getP2() , ErG1.getP1()) - errorGens.append( propagatableerrorgen( 'C', [pVec1[1], pVec2[1]] ,1j*wT*pVec1[0]*pVec2[0] )) - pVec1=labelMultiply(ErG1.getP1() , ErG2.getP2()) - pVec2=labelMultiply(ErG2.getP1() , ErG1.getP1()) - errorGens.append( propagatableerrorgen( 'C', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1 = com(ErG2.getP1() , ErG2.getP2()) - pVec2 = com(ErG1.getP1(),pVec1[1]) - errorGens.append( propagatableerrorgen( 'A', [ErG1.getP1(), pVec2[1]] ,-.5*wT*pVec1[0]*pVec2[0])) - - elif ErG1.getType() == 'A' and ErG1.getType() == 'S': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType() == 'C' and ErG2.getType() == 'C': - A=ErG1.getP1() - B=ErG1.getP2() - P=ErG2.getP1() - Q=ErG2.getP2() - pVec1 = labelMultiply(A,P) - pVec2 =labelMultiply(Q,B) - errorGens.append( propagatableerrorgen( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = labelMultiply(A,Q) - pVec2 =labelMultiply(P,B) - errorGens.append( propagatableerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = labelMultiply(B,P) - pVec2 =labelMultiply(Q,A) - errorGens.append( propagatableerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = labelMultiply(B,Q) - pVec2 =labelMultiply(P,A) - errorGens.append( propagatableerrorgen( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(A,B) - pVec2=com(P,pVec1[1]) - errorGens.append( propagatableerrorgen( 'A' , [pVec2[1] , Q ], -.5*1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(A,B) - pVec2=com(Q,pVec1[1]) - errorGens.append( propagatableerrorgen( 'A' , [pVec2[1], P] , -.5*1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(P,Q) - pVec2=com(pVec1[1],A) - errorGens.append( propagatableerrorgen( 'A' , [pVec2[1] , B] , -.5*1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(P,Q) - pVec2=com(pVec1[1],B) - errorGens.append( propagatableerrorgen( 'A' , [pVec2[1] , A ] , -.5*1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(A,B) - pVec2=acom(P,Q) - pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( propagatableerrorgen( 'H', [pVec3[1]] ,.25*1j*wT*pVec1[0]*pVec2[0]*pVec3[0])) - - elif ErG1.getType() == 'C' and ErG2.getType() == 'A': - A=ErG1.getP1() - B=ErG1.getP2() - P=ErG2.getP1() - Q=ErG2.getP2() - pVec1 = labelMultiply(A,P) - pVec2 =labelMultiply(Q,B) - errorGens.append( propagatableerrorgen('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) - pVec1 = labelMultiply(A,Q) - pVec2 =labelMultiply(P,B) - errorGens.append( propagatableerrorgen('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1 = labelMultiply(B,P) - pVec2 =labelMultiply(Q,A) - errorGens.append( propagatableerrorgen('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) - pVec1 = labelMultiply(P,A) - pVec2 =labelMultiply(B,Q) - errorGens.append( propagatableerrorgen('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1 = com(P,Q) - pVec2 =com(A,pVec1[1]) - errorGens.append( propagatableerrorgen('A' , [pVec2[1] , B] , .5*wT*pVec1[0]*pVec2[0] )) - pVec1 = com(P,Q) - pVec2 =com(B,pVec1[1]) - errorGens.append( propagatableerrorgen('A' , [pVec2[1], A ], .5*wT*pVec1[0]*pVec2[0] )) - pVec1 = acom(A,B) - pVec2 =com(P,pVec1[1]) - errorGens.append( propagatableerrorgen('C', [pVec2[1] , Q ], .5*1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = acom(A,B) - pVec2 =com(Q,pVec1[1]) - errorGens.append( propagatableerrorgen('C',[pVec2[1],P ],-.5*1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = com(P,Q) - pVec2 =acom(A,B) - pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( propagatableerrorgen('H',[pVec3[1]],-.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) - - elif ErG1.getType() == 'A' and ErG2.getType() == 'C': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType() == 'A' and ErG2.getType() == 'A': - A=ErG1.getP1() - B=ErG1.getP2() - P=ErG2.getP1() - Q=ErG2.getP2() - pVec1=labelMultiply(Q,B) - pVec2=labelMultiply(A,P) - errorGens.append(propagatableerrorgen('A',[pVec1[1],pVec2[1]] ,-1j*wT*pVec1[0]*pVec2[0])) - pVec1=labelMultiply(P,A) - pVec2=labelMultiply(B,Q) - errorGens.append(propagatableerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) - pVec1=labelMultiply(B,P) - pVec2=labelMultiply(Q,A) - errorGens.append(propagatableerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) - pVec1=labelMultiply(A,Q) - pVec2=labelMultiply(P,B) - errorGens.append(propagatableerrorgen('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) - pVec1=com(P,Q) - pVec2=com(B,pVec1[1]) - errorGens.append(propagatableerrorgen('C',[pVec2[1],A],.5*wT*pVec1[0]*pVec2[0])) - pVec1=com(P,Q) - pVec2=com(A,pVec1[1]) - errorGens.append(propagatableerrorgen('C',[pVec2[1],B] ,-.5*wT*pVec1[0]*pVec2[0])) - pVec1=com(A,B) - pVec2=com(P,pVec1[1]) - errorGens.append(propagatableerrorgen('C', [pVec2[1],Q] ,.5*wT*pVec1[0]*pVec2[0])) - pVec1=com(A,B) - pVec2=com(Q,pVec1[1]) - errorGens.append(propagatableerrorgen('C', [pVec2[1],P] ,-.5*wT*pVec1[0]*pVec2[0])) - pVec1=com(P,Q) - pVec2=com(A,B) - pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( propagatableerrorgen('H',[pVec3[1]] ,.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) - - - return errorGens - - diff --git a/pygsti/errorgenpropagation/utilserrorgenpropagation.py b/pygsti/errorgenpropagation/utilserrorgenpropagation.py deleted file mode 100644 index 0ad96e529..000000000 --- a/pygsti/errorgenpropagation/utilserrorgenpropagation.py +++ /dev/null @@ -1,196 +0,0 @@ - -from .localstimerrorgen import LocalStimErrorgenLabel as _LSE -from numpy import conjugate - -''' -Returns the Commutator of two errors -''' -def commute_errors(ErG1,ErG2, weightFlip=1.0, BCHweight=1.0): - def com(P1,P2): - P3=P1*P2-P2*P1 - return (P3.weight, P3*conjugate(P3.weight)) - # returns (sign - def acom(P1,P2): - P3=P1*P2+P2*P1 - return (P3.weight, P3*conjugate(P3.weight)) - - def labelMultiply(P1,P2): - P3=P1*P2 - return (P3.weight, P3*conjugate(P3.weight)) - - errorGens=[] - - wT=weightFlip*BCHweight - - if ErG1.getType()=='H' and ErG2.getType()=='H': - pVec=com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) - if pVec[0] != 0: - errorGens.append( _LSE( 'H' , [pVec[1]] , -1j*wT *pVec[0] ) ) - - elif ErG1.getType()=='H' and ErG2.getType()=='S': - pVec=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( _LSE( 'C' , [ErG2.basis_element_labels[0] , pVec[1]] , 1j*wT*pVec[0] ) ) - - elif ErG1.getType()=='S' and ErG2.getType()=='H': - pVec=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( _LSE( 'C' , [ErG2.basis_element_labels[0] , pVec[1]] , -1j*wT *pVec[0] ) ) - - elif ErG1.getType()=='H' and ErG2.getType()=='C': - pVec1=com(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( _LSE('C' , [pVec1[1], ErG2.basis_element_labels[1]] , 1j*wT*pVec1[0] ) ) - pVec2=com(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) - errorGens.append( _LSE('C' , [pVec2[1] , ErG2.basis_element_labels[0]] , 1j*wT*pVec2[0] ) ) - - elif ErG1.getType()=='C' and ErG2.getType()=='H': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType()=='H' and ErG2.getType()=='A': - pVec1 = com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) - errorGens.append( _LSE('A' , [pVec1[1] , ErG2.basis_element_labels[1]] , -1j*wT*pVec1[0]) ) - pVec2 = com(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) - errorGens.append( _LSE('A' , [ErG2.basis_element_labels[0], pVec2[1]] , -1j*wT*pVec2[0] ) ) - - elif ErG1.getType()=='A' and ErG2.getType()=='H': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType()=='S' and ErG2.getType()=='S': - errorGens.append( _LSE('H', ErG1.basis_element_labels[0],0 )) - - elif ErG1.getType()=='S' and ErG2.getType()=='C': - pVec1=labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) - pVec2=labelMultiply(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) - errorGens.append( _LSE( 'A' , [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) - pVec2 = labelMultiply(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( _LSE( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1 =acom(ErG2.basis_element_labels[0], ErG2.basis_element_labels[1]) - pVec2 = labelMultiply(pVec1[1],ErG1.basis_element_labels[0]) - errorGens.append( _LSE( 'A' ,[pVec2[1], ErG1.basis_element_labels[0]] , -1j*.5*wT*pVec1[0]*pVec2[0])) - pVec1=acom(ErG2.basis_element_labels[0], ErG2.basis_element_labels[1]) - pVec2=labelMultiply(ErG1.basis_element_labels[0],pVec1[1]) - errorGens.append( _LSE( 'A', [ErG1.basis_element_labels[0] ,pVec2[1]],-1j*.5*wT*pVec1[0]*pVec2[0])) - - elif ErG1.getType() == 'C' and ErG2.getType() == 'S': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType() == 'S' and ErG2.getType() == 'A': - pVec1 =labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[0]) - pVec2=labelMultiply(ErG2.basis_element_labels[1] , ErG1.basis_element_labels[0]) - errorGens.append( _LSE( 'C', [pVec1[1], pVec2[1]] ,1j*wT*pVec1[0]*pVec2[0] )) - pVec1=labelMultiply(ErG1.basis_element_labels[0] , ErG2.basis_element_labels[1]) - pVec2=labelMultiply(ErG2.basis_element_labels[0] , ErG1.basis_element_labels[0]) - errorGens.append( _LSE( 'C', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1 = com(ErG2.basis_element_labels[0] , ErG2.basis_element_labels[1]) - pVec2 = com(ErG1.basis_element_labels[0],pVec1[1]) - errorGens.append( _LSE( 'A', [ErG1.basis_element_labels[0], pVec2[1]] ,-.5*wT*pVec1[0]*pVec2[0])) - - elif ErG1.getType() == 'A' and ErG1.getType() == 'S': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType() == 'C' and ErG2.getType() == 'C': - A=ErG1.basis_element_labels[0] - B=ErG1.basis_element_labels[1] - P=ErG2.basis_element_labels[0] - Q=ErG2.basis_element_labels[1] - pVec1 = labelMultiply(A,P) - pVec2 =labelMultiply(Q,B) - errorGens.append( _LSE( 'A', [pVec1[1], pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = labelMultiply(A,Q) - pVec2 =labelMultiply(P,B) - errorGens.append( _LSE( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = labelMultiply(B,P) - pVec2 =labelMultiply(Q,A) - errorGens.append( _LSE( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = labelMultiply(B,Q) - pVec2 =labelMultiply(P,A) - errorGens.append( _LSE( 'A' , [pVec1[1] , pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(A,B) - pVec2=com(P,pVec1[1]) - errorGens.append( _LSE( 'A' , [pVec2[1] , Q ], -.5*1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(A,B) - pVec2=com(Q,pVec1[1]) - errorGens.append( _LSE( 'A' , [pVec2[1], P] , -.5*1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(P,Q) - pVec2=com(pVec1[1],A) - errorGens.append( _LSE( 'A' , [pVec2[1] , B] , -.5*1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(P,Q) - pVec2=com(pVec1[1],B) - errorGens.append( _LSE( 'A' , [pVec2[1] , A ] , -.5*1j*wT*pVec1[0]*pVec2[0])) - pVec1=acom(A,B) - pVec2=acom(P,Q) - pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( _LSE( 'H', [pVec3[1]] ,.25*1j*wT*pVec1[0]*pVec2[0]*pVec3[0])) - - elif ErG1.getType() == 'C' and ErG2.getType() == 'A': - A=ErG1.basis_element_labels[0] - B=ErG1.basis_element_labels[1] - P=ErG2.basis_element_labels[0] - Q=ErG2.basis_element_labels[1] - pVec1 = labelMultiply(A,P) - pVec2 =labelMultiply(Q,B) - errorGens.append( _LSE('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) - pVec1 = labelMultiply(A,Q) - pVec2 =labelMultiply(P,B) - errorGens.append( _LSE('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1 = labelMultiply(B,P) - pVec2 =labelMultiply(Q,A) - errorGens.append( _LSE('C' , [pVec1[1],pVec2[1]] , 1j*wT*pVec1[0]*pVec2[0])) - pVec1 = labelMultiply(P,A) - pVec2 =labelMultiply(B,Q) - errorGens.append( _LSE('C' ,[pVec1[1],pVec2[1]] , -1j*wT*pVec1[0]*pVec2[0])) - pVec1 = com(P,Q) - pVec2 =com(A,pVec1[1]) - errorGens.append( _LSE('A' , [pVec2[1] , B] , .5*wT*pVec1[0]*pVec2[0] )) - pVec1 = com(P,Q) - pVec2 =com(B,pVec1[1]) - errorGens.append( _LSE('A' , [pVec2[1], A ], .5*wT*pVec1[0]*pVec2[0] )) - pVec1 = acom(A,B) - pVec2 =com(P,pVec1[1]) - errorGens.append( _LSE('C', [pVec2[1] , Q ], .5*1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = acom(A,B) - pVec2 =com(Q,pVec1[1]) - errorGens.append( _LSE('C',[pVec2[1],P ],-.5*1j*wT*pVec1[0]*pVec2[0] )) - pVec1 = com(P,Q) - pVec2 =acom(A,B) - pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( _LSE('H',[pVec3[1]],-.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) - - elif ErG1.getType() == 'A' and ErG2.getType() == 'C': - errorGens = commute_errors(ErG2,ErG1,weightFlip=-1.0,BCHweight=BCHweight) - - elif ErG1.getType() == 'A' and ErG2.getType() == 'A': - A=ErG1.basis_element_labels[0] - B=ErG1.basis_element_labels[1] - P=ErG2.basis_element_labels[0] - Q=ErG2.basis_element_labels[1] - pVec1=labelMultiply(Q,B) - pVec2=labelMultiply(A,P) - errorGens.append(_LSE('A',[pVec1[1],pVec2[1]] ,-1j*wT*pVec1[0]*pVec2[0])) - pVec1=labelMultiply(P,A) - pVec2=labelMultiply(B,Q) - errorGens.append(_LSE('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) - pVec1=labelMultiply(B,P) - pVec2=labelMultiply(Q,A) - errorGens.append(_LSE('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) - pVec1=labelMultiply(A,Q) - pVec2=labelMultiply(P,B) - errorGens.append(_LSE('A',[pVec1[1],pVec2[1]],-1j*wT*pVec1[0]*pVec2[0])) - pVec1=com(P,Q) - pVec2=com(B,pVec1[1]) - errorGens.append(_LSE('C',[pVec2[1],A],.5*wT*pVec1[0]*pVec2[0])) - pVec1=com(P,Q) - pVec2=com(A,pVec1[1]) - errorGens.append(_LSE('C',[pVec2[1],B] ,-.5*wT*pVec1[0]*pVec2[0])) - pVec1=com(A,B) - pVec2=com(P,pVec1[1]) - errorGens.append(_LSE('C', [pVec2[1],Q] ,.5*wT*pVec1[0]*pVec2[0])) - pVec1=com(A,B) - pVec2=com(Q,pVec1[1]) - errorGens.append(_LSE('C', [pVec2[1],P] ,-.5*wT*pVec1[0]*pVec2[0])) - pVec1=com(P,Q) - pVec2=com(A,B) - pVec3=com(pVec1[1],pVec2[1]) - errorGens.append( _LSE('H',[pVec3[1]] ,.25*wT*pVec1[0]*pVec2[0]*pVec3[0])) - - - return errorGens \ No newline at end of file diff --git a/pygsti/errorgenpropagation/utilspygstistimtranslator.py b/pygsti/errorgenpropagation/utilspygstistimtranslator.py deleted file mode 100644 index b6473318f..000000000 --- a/pygsti/errorgenpropagation/utilspygstistimtranslator.py +++ /dev/null @@ -1,68 +0,0 @@ -import stim -from numpy import conjugate - - - -''' -returns a dictionary capable of translating pygsti standard gate labels to stim tablue representations of gates -''' -def Gate_Translate_Dict_p_2_s(): - pyGSTi_to_stim_GateDict={ - 'Gi' : stim.Tableau.from_named_gate('I'), - 'Gxpi' : stim.Tableau.from_named_gate('X'), - 'Gypi' : stim.Tableau.from_named_gate('Y'), - 'Gzpi' : stim.Tableau.from_named_gate('Z'), - 'Gxpi2' : stim.Tableau.from_named_gate('SQRT_X'), - 'Gypi2' : stim.Tableau.from_named_gate('SQRT_Y'), - 'Gzpi2' : stim.Tableau.from_named_gate('SQRT_Z'), - 'Gxmpi2': stim.Tableau.from_named_gate('SQRT_X_DAG'), - 'Gympi2': stim.Tableau.from_named_gate('SQRT_Y_DAG'), - 'Gzmpi2': stim.Tableau.from_named_gate('SQRT_Z_DAG'), - 'Gh' : stim.Tableau.from_named_gate('H'), - 'Gxx' : stim.Tableau.from_named_gate('SQRT_XX'), - 'Gzz' : stim.Tableau.from_named_gate('SQRT_ZZ'), - 'Gcnot' : stim.Tableau.from_named_gate('CNOT'), - 'Gswap' : stim.Tableau.from_named_gate('SWAP') - } - return pyGSTi_to_stim_GateDict - - -''' -returns a dict translating the stim tableu (gate) key to pyGSTi gate keys -TODO: change the stim tablues to tablues keys -''' -def Gate_Translate_Dict_s_2_p(): - dict = Gate_Translate_Dict_p_2_s() - return {v: k for k, v in dict.items()} - -''' -Takes a layer of pyGSTi gates and composes them into a single stim Tableu -''' -def pyGSTiLayer_to_stimLayer(player,qubits,MultiGateDict={},MultiGate=False): - slayer=stim.Tableau(qubits) - stimDict=Gate_Translate_Dict_p_2_s() - for sub_lbl in player: - if not MultiGate: - temp = stimDict[sub_lbl.name] - else: - temp = stimDict[MultiGateDict[sub_lbl.name]] - slayer.append(temp,sub_lbl.qubits) - return slayer - -''' -Takes the typical pygsti label for paulis and returns a stim PauliString object -''' -def pyGSTiPauli_2_stimPauli(pauli): - return stim.PauliString(pauli) - - -''' -Converts a stim paulistring to the string typically used in pysti to label paulis -warning: stim ofter stores a pauli phase in the string (i.e +1,-1,+i,-i) this is assumed positive -in this function, if the weight is needed please store paulistring::weight prior to applying this function -''' -def stimPauli_2_pyGSTiPauli(pauliString): - n=1 - if pauliString.sign==1j or pauliString.sign==-1j: - n=2 - return str(pauliString)[n:].replace('_',"I") \ No newline at end of file From ee64a04b1ee533c74783c15756aaa961584a90b4 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 5 Feb 2025 19:55:50 -0700 Subject: [PATCH 067/102] Switch from dev to primary Switch the naming of the errorpropagator_dev module to errorpropagator. --- .../{errorpropagator_dev.py => errorpropagator.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pygsti/errorgenpropagation/{errorpropagator_dev.py => errorpropagator.py} (100%) diff --git a/pygsti/errorgenpropagation/errorpropagator_dev.py b/pygsti/errorgenpropagation/errorpropagator.py similarity index 100% rename from pygsti/errorgenpropagation/errorpropagator_dev.py rename to pygsti/errorgenpropagation/errorpropagator.py From 0f208aada5e8737d4567a793de1c4c14425d5182 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 8 Feb 2025 21:08:48 -0700 Subject: [PATCH 068/102] third and fifth order BCH bugfix Patch a bug in the third-order BCH implementation (zipping iterables of unequal length), and a sign error in the fifth-order BCH implementation. --- pygsti/tools/errgenproptools.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 0c9bbc780..e032d0e6b 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -180,9 +180,8 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #this at higher order if needed. commuted_errgen_list_1 = [] commuted_errgen_list_2 = [] - for (error1a, error1a_val), (error1b, error1b_val) in zip(errgen_layer_1.items(), errgen_layer_2.items()): + for error1a, error1a_val in errgen_layer_1.items(): for error2, error2_val in second_order_comm_dict.items(): - #only need a factor of 1/6 because new_errorgen_layer[1] is 1/2 the commutator weighta = (1/6)*error1a_val*error2_val @@ -191,13 +190,16 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th commuted_errgen_sublist = error_generator_commutator(error1a, error2, weight=weighta, identity=identity) commuted_errgen_list_1.extend(commuted_errgen_sublist) - + + for error1b, error1b_val in errgen_layer_2.items(): + for error2, error2_val in second_order_comm_dict.items(): #only need a factor of -1/6 because new_errorgen_layer[1] is 1/2 the commutator weightb = -(1/6)*error1b_val*error2_val if not abs(weightb) < truncation_threshold: commuted_errgen_sublist = error_generator_commutator(error1b, error2, weight=weightb, identity=identity) - commuted_errgen_list_2.extend(commuted_errgen_sublist) + commuted_errgen_list_2.extend(commuted_errgen_sublist) + #turn the two new commuted error generator lists into dictionaries. #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. @@ -264,9 +266,9 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th # This gives 9 new commutators to calculate (7 if you used linearity, and even fewer would be needed # using the result from the paper above, but we won't here atm). elif curr_order == 4: - B = third_order_comm_dict_1 - C = third_order_comm_dict_2 - D = fourth_order_comm_dict + B = third_order_comm_dict_1 #has a factor of 1/12 folded in already. + C = third_order_comm_dict_2 #has a factor of -1/12 folded in already. + D = fourth_order_comm_dict #has a factor of -1/24 folded in already. #Compute the new commutators E, F and G as defined above. #Start with E: commuted_errgen_list_E = [] @@ -326,7 +328,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #We also need the following weight factors. F has a leading factor of (1/12) #E and G have a leading factor of (-1/12). D has a leading factor of (-1/24) #This gives the following additional weight multipliers: - #[X,F] = (-1/60); [Y,E] = (1/60); [Y,F]= (1/30); [X,E]= (1/30); [Y,G] = (-1/10); [X,D] = (1/5) + #[X,F] = (-1/60); [Y,E] = (-1/60); [Y,F]= (1/30); [X,E]= (1/30); [Y,G] = (-1/10); [X,D] = (1/5) #[X,F]: commuted_errgen_list_XF = [] @@ -344,7 +346,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th for error1, error1_val in errgen_layer_2.items(): for error2, error2_val in E_comm_dict.items(): #Won't add any weight adjustments at this stage, will do that for next commutator. - weight = (1/60)*error1_val*error2_val + weight = -(1/60)*error1_val*error2_val if abs(weight) < truncation_threshold: continue commuted_errgen_sublist = error_generator_commutator(error1, error2, From d631f81e5c14f8e9cd87b6cdc6ed3bc836bff47c Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 8 Feb 2025 21:11:26 -0700 Subject: [PATCH 069/102] Restore previous dense error gen construction Restore previous implementation of the constructor for dense error generator representations in the propagation class that had been previously commented out for future debugging. This version has since been debugged and is much faster than using the model member constructor path. --- pygsti/baseobjs/errorgenbasis.py | 2 +- pygsti/errorgenpropagation/errorpropagator.py | 95 ++++++++++--------- .../operations/lindbladerrorgen.py | 20 ++-- pygsti/tools/optools.py | 41 +++++++- 4 files changed, 99 insertions(+), 59 deletions(-) diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index c349de51c..54da66548 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -105,7 +105,7 @@ def __init__(self, state_space, labels, basis_1q=None): elif isinstance(basis_1q, str): self._basis_1q = _Basis.cast(basis_1q, 4) else: - self._basis_1q = _Basis.cast('pp', 4) + self._basis_1q = _Basis.cast('PP', 4) self.state_space = state_space assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index 88c742f49..788e6fa05 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -7,13 +7,15 @@ from scipy.linalg import expm from pygsti.tools.internalgates import standard_gatenames_stim_conversions import copy as _copy -from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis +from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis, BuiltinBasis as _BuiltinBasis from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrogenLabel from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel import pygsti.tools.errgenproptools as _eprop import pygsti.tools.basistools as _bt import pygsti.tools.matrixtools as _mt +import pygsti.tools.optools as _ot from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen +from itertools import islice class ErrorGeneratorPropagator: @@ -161,8 +163,8 @@ def averaged_eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam for j in range(i+1): if i==j: # term: - - prop_contrib = amam + pass + #prop_contrib = amam else: pass @@ -587,49 +589,30 @@ def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp', return_ #the elements necessary for the construction of the error generator matrix. #Construct a list of new errorgen coefficients by looping through the keys of errorgen_layer - #and converting them to LocalElementaryErrorgenLabels. + #and converting them to LocalElementaryErrorgenLabels. + local_errorgen_coeffs = [coeff_lbl.to_local_eel() for coeff_lbl in errorgen_layer.keys()] + eg_types = [lbl.errorgen_type for lbl in local_errorgen_coeffs] + eg_bels = [lbl.basis_element_labels for lbl in local_errorgen_coeffs] + basis_1q = _BuiltinBasis('PP', 4) + num_qubits = len(self.model.state_space.qubit_labels) + errorgen = _np.zeros((4**num_qubits, 4**num_qubits), dtype=complex128) + #do this in blocks of 1000 to reduce memory requirements. + for eg_typ_batch, eg_bels_batch, eg_rates_batch in zip(_batched(eg_types, 1000), _batched(eg_bels, 1000), _batched(errorgen_layer.values(), 1000)): + elemgen_matrices = _ot.bulk_create_elementary_errorgen_nqudit(eg_typ_batch, eg_bels_batch, basis_1q, normalize=False, + sparse=False, tensorprod_basis=False) + + #Stack the arrays and then use broadcasting to weight them according to the rates + elemgen_matrices_array = _np.stack(elemgen_matrices, axis=-1) + weighted_elemgen_matrices_array = _np.array(eg_rates_batch)*elemgen_matrices_array + weighted_elemgen_matrices_array = _np.real_if_close(weighted_elemgen_matrices_array) + #The error generator is then just the sum of weighted_elemgen_matrices_array along the third axis. + errorgen += _np.sum(weighted_elemgen_matrices_array, axis = 2) - #TODO: Debug this implementation, something weird is going on with the basis management and is only - #getting picked up for two or more qubits. - #local_errorgen_coeffs = [coeff_lbl.to_local_eel() for coeff_lbl in errorgen_layer.keys()] - # - #errorgen_basis = _ExplicitElementaryErrorgenBasis(self.model.state_space, local_errorgen_coeffs, basis_1q='PP', elemgen_basis='pp') - #print(f'{errorgen_basis.elemgen_matrices=}') - ##Stack the arrays and then use broadcasting to weight them according to the rates - #elemgen_matrices_array = _np.stack(errorgen_basis.elemgen_matrices, axis=-1) - #weighted_elemgen_matrices_array = _np.fromiter(errorgen_layer.values(), dtype=_np.double)*elemgen_matrices_array - #weighted_elemgen_matrices_array = _np.real_if_close(weighted_elemgen_matrices_array) - ##The error generator is then just the sum of weighted_elemgen_matrices_array along the third axis. - #errorgen = _np.sum(weighted_elemgen_matrices_array, axis = 2) - ##print(f'{errorgen=}') - # - ##finally need to change from the standard basis (which is what the error generator is currently in) - ##to the pauli basis. - #try: - # errorgen = _bt.change_basis(errorgen, from_basis='std', to_basis=mx_basis)#, expect_real=False) - #except ValueError as err: - # print(f'{local_errorgen_coeffs=}') - # print(f'{errorgen_basis.labels=}') - # print(f'{_mt.is_hermitian(errorgen)=}') - # print(f'{errorgen_layer=}') - # _mt.print_mx(errorgen) - # raise err - - #if the model's basis is already the same as mx_basis then reuse the one from the model - if isinstance(mx_basis, str): - if set(self.model.basis.name.split('*')) == set([mx_basis]) or self.model.basis.name==mx_basis: - mx_basis = self.model.basis - global_errorgen_coeffs = [coeff_lbl.to_global_eel() for coeff_lbl in errorgen_layer.keys()] - coeff_dict = {lbl:_np.real_if_close(val) for lbl, val in zip(global_errorgen_coeffs, errorgen_layer.values())} - - errorgen = _LindbladErrorgen.from_elementary_errorgens(coeff_dict, parameterization='GLND', state_space=self.model.state_space, - mx_basis=mx_basis) - - if return_dense: - return errorgen.to_dense() - else: - return errorgen - + #finally need to change from the standard basis (which is what the error generator is currently in) + #to the pauli basis. + errorgen = _bt.change_basis(errorgen, from_basis='std', to_basis=mx_basis)#, expect_real=False) + + return errorgen def ErrorPropagatorAnalytic(circ,errorModel,ErrorLayerDef=False,startingErrors=None): @@ -734,4 +717,24 @@ def error_stitcher(first_error,second_error): new_errors.append(new_layer) for layer in second_error: new_errors.append(layer) - return new_errors \ No newline at end of file + return new_errors + + + +def _batched(iterable, n): + """ + Yield successive n-sized batches from an iterable. + + Parameters: + iterable (iterable): The iterable to divide into batches. + n (int): The batch size. + + Yields: + iterable: An iterable containing the next batch of items. + """ + it = iter(iterable) + while True: + batch = list(islice(it, n)) + if not batch: + break + yield batch \ No newline at end of file diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index bb98a47ed..f24cbf7d3 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -348,6 +348,7 @@ def _from_error_generator(cls, errgen, parameterization="CPTP", lindblad_basis=" return cls(blocks, "auto", mx_basis, evotype, state_space) + #TODO: Need to make the construction robust to empty elementary_errorgens dictionaries. @classmethod def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto', elementary_errorgen_basis='PP', mx_basis="pp", truncate=True, evotype="default", state_space=None): @@ -357,15 +358,16 @@ def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto' basis = _Basis.cast(elementary_errorgen_basis, dim) #check the first key, if local then no need to convert, otherwise convert from global. - first_key = next(iter(elementary_errorgens)) - if isinstance(first_key, (_GlobalElementaryErrorgenLabel, tuple)): - #convert keys to local elementary errorgen labels (the same as those used by the coefficient blocks): - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = state_space.sole_tensor_product_block_labels # take first TPB labels as all labels - elementary_errorgens = {_LocalElementaryErrorgenLabel.cast(k, sslbls, identity_label_1Q): v - for k, v in elementary_errorgens.items()} - else: - assert isinstance(first_key, _LocalElementaryErrorgenLabel), 'Unsupported error generator label type as key.' + if elementary_errorgens: + first_key = next(iter(elementary_errorgens)) + if isinstance(first_key, (_GlobalElementaryErrorgenLabel, tuple)): + #convert keys to local elementary errorgen labels (the same as those used by the coefficient blocks): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elementary_errorgens = {_LocalElementaryErrorgenLabel.cast(k, sslbls, identity_label_1Q): v + for k, v in elementary_errorgens.items()} + else: + assert isinstance(first_key, _LocalElementaryErrorgenLabel), 'Unsupported error generator label type as key.' parameterization = LindbladParameterization.minimal_from_elementary_errorgens(elementary_errorgens) \ if parameterization == "auto" else LindbladParameterization.cast(parameterization) diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index 958493b56..ebee39c54 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -1649,9 +1649,44 @@ def elementary_errorgens_dual(dim, typ, basis): return elem_errgens -def extract_elementary_errorgen_coefficients(errorgen, elementary_errorgen_labels, elementary_errorgen_basis='pp', +def extract_elementary_errorgen_coefficients(errorgen, elementary_errorgen_labels, elementary_errorgen_basis='PP', errorgen_basis='pp', return_projected_errorgen=False): - """ TODO: docstring """ + """ + Extract a dictionary of elemenary error generator coefficients and rates fromt he specified dense error generator + matrix. + + Parameters + ---------- + errorgen : numpy.ndarray + Error generator matrix + + elementary_errorgen_labels : list of `ElementaryErrorgenLabel`s + A list of `ElementaryErrorgenLabel`s corresponding to the coefficients + to extract from the input error generator. + + elementary_errorgen_basis : str or `Basis`, optional (default 'PP') + Basis used in construction of elementary error generator dual matrices. + + errorgen_basis : str or `Basis`, optional (default 'pp') + Basis of the input matrix specified in `errorgen`. + + return_projected_errorgen : bool, optional (default False) + If True return a new dense error generator matrix which has been + projected onto the subspace of error generators spanned by + `elementary_errorgen_labels`. + + Returns + ------- + projections : dict + Dictionary whose keys are the coefficients specified in `elementary_errorgen_labels` + (cast to `LocalElementaryErrorgenLabel`), and values are corresponding rates. + + projected_errorgen : np.ndarray + Returned if return_projected_errorgen is True, a new dense error generator matrix which has been + projected onto the subspace of error generators spanned by + `elementary_errorgen_labels`. + + """ # the same as decompose_errorgen but given a dict/list of elementary errorgens directly instead of a basis and type if isinstance(errorgen_basis, _Basis): errorgen_std = _bt.change_basis(errorgen, errorgen_basis, errorgen_basis.create_equivalent('std')) @@ -1678,7 +1713,7 @@ def extract_elementary_errorgen_coefficients(errorgen, elementary_errorgen_label bmx0 = elementary_errorgen_basis[bel_lbls[0]] bmx1 = elementary_errorgen_basis[bel_lbls[1]] if (len(bel_lbls) > 1) else None flat_projector = _lt.create_elementary_errorgen_dual(key.errorgen_type, bmx0, bmx1, sparse=False).flatten() - projections[key] = _np.real_if_close(_np.vdot(flat_projector, flat_errorgen_std), tol=1000) + projections[key] = _np.real_if_close(_np.vdot(flat_projector, flat_errorgen_std), tol=1000).item() if return_projected_errorgen: space_projector[:, i] = flat_projector From c335b01360a52b80f22d0c6441ceefedd899d7bd Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 8 Feb 2025 21:48:14 -0700 Subject: [PATCH 070/102] BCH approximation unit testing Add unit tests and related infrastructure for testing the BCH approximation's correctness. --- pygsti/tools/errgenproptools.py | 97 ++++++++++++++++++- test/unit/objects/test_errorgenpropagation.py | 2 +- test/unit/tools/test_errgenproptools.py | 84 +++++++++++++--- 3 files changed, 167 insertions(+), 16 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index e032d0e6b..2e8bedb48 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -15,7 +15,7 @@ from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GEEL, LocalElementaryErrorgenLabel as _LEEL from pygsti.baseobjs import QubitSpace as _QubitSpace from pygsti.baseobjs.basis import BuiltinBasis as _BuiltinBasis -from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis as _CompleteElementaryErrorgenBasis +from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis as _CompleteElementaryErrorgenBasis, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.modelmembers.operations import LindbladErrorgen as _LinbladErrorgen from pygsti.circuits import Circuit as _Circuit @@ -6023,7 +6023,7 @@ def errorgen_layer_to_matrix(errorgen_layer, num_qubits, errorgen_matrix_dict=No return mat -#Helper functions for doing numeric commutators and compositions. +#Helper functions for doing numeric commutators, compositions and BCH. def error_generator_commutator_numerical(errorgen1, errorgen2, errorgen_matrix_dict=None, num_qubits=None): """ @@ -6131,6 +6131,99 @@ def error_generator_composition_numerical(errorgen1, errorgen2, errorgen_matrix_ comp = errorgen_matrix_dict[_LSE.cast(errorgen1)]@errorgen_matrix_dict[_LSE.cast(errorgen2)] return comp +def bch_numerical(propagated_errorgen_layers, error_propagator, bch_order=1): + """ + Iteratively compute effective error generator layer produced by applying the BCH approximation + to the list of input error generator matrices. Note this is primarily intended + as part of testing and validation infrastructure. + + Parameters + ---------- + propagated_errorgen_layers : list of numpy.ndarrays + List of the error generator layers to combine using the BCH approximation (in circuit ordering) + + error_propagator : `ErrorGeneratorPropagator` + An `ErrorGeneratorPropagator` instance to use as part of the BCH calculation. + + bch_order : int, optional (default 1) + Order of the BCH approximation to apply (up to 5 is supported currently). + + Returns + ------- + numpy.ndarray + A dense numpy array corresponding to the result of the iterative application of the BCH + approximation. + """ + #Need to build an appropriate basis for getting the error generator matrices. + #accumulate the error generator coefficients needed. + collected_coeffs = [] + for layer in propagated_errorgen_layers: + for coeff in layer.keys(): + collected_coeffs.append(coeff.to_local_eel()) + #only want the unique ones. + unique_coeffs = list(set(collected_coeffs)) + + num_qubits = len(error_propagator.model.state_space.qubit_labels) + + errorgen_basis = _ExplicitElementaryErrorgenBasis(_QubitSpace(num_qubits), unique_coeffs, basis_1q=_BuiltinBasis('PP', 4)) + errorgen_lbl_matrix_dict = {lbl:mat for lbl,mat in zip(errorgen_basis.labels, errorgen_basis.elemgen_matrices)} + + #iterate through each of the propagated error generator layers and turn these into dense numpy arrays + errorgen_layer_mats = [] + for layer in propagated_errorgen_layers: + errorgen_layer_mats.append(error_propagator.errorgen_layer_dict_to_errorgen(layer, mx_basis='pp', return_dense=True)) + + #initialize a matrix for storing the result of doing BCH. + bch_result = _np.zeros((4**num_qubits, 4**num_qubits), dtype=_np.complex128) + + if len(errorgen_layer_mats)==1: + return errorgen_layer_mats[0] + + #otherwise iterate through in reverse order (the propagated layers are + #in circuit ordering and not matrix multiplication ordering at the moment) + #and combine the terms pairwise + combined_err_layer = errorgen_layer_mats[-1] + for i in range(len(errorgen_layer_mats)-2, -1, -1): + combined_err_layer = pairwise_bch_numerical(combined_err_layer, errorgen_layer_mats[i], order=bch_order) + + return combined_err_layer + +def pairwise_bch_numerical(mat1, mat2, order=1): + """ + Helper function for doing the numerical BCH in a pairwise fashion. Note this function is primarily intended + for numerical validations as part of testing infrastructure. + """ + bch_result = _np.zeros(mat1.shape, dtype=_np.complex128) + if order >= 1: + bch_result += mat1 + mat2 + if order >= 2: + commutator12 = _matrix_commutator(mat1, mat2) + bch_result += .5*commutator12 + if order >= 3: + commutator112 = _matrix_commutator(mat1, commutator12) + commutator212 = _matrix_commutator(mat2, commutator12) + bch_result += (1/12)*(commutator112-commutator212) + if order >= 4: + commutator2112 = _matrix_commutator(mat2, commutator112) + bch_result += (-1/24)*commutator2112 + if order >= 5: + commutator1112 = _matrix_commutator(mat1, commutator112) + commutator2212 = _matrix_commutator(mat2, commutator212) + + commutator22212 = _matrix_commutator(mat2, commutator2212) + commutator11112 = _matrix_commutator(mat1, commutator1112) + commutator12212 = _matrix_commutator(mat1, commutator2212) + commutator21112 = _matrix_commutator(mat2, commutator1112) + commutator21212 = _matrix_commutator(mat2, _matrix_commutator(mat1, commutator212)) + commutator12112 = _matrix_commutator(mat1, commutator2112) + + bch_result += (-1/720)*(commutator11112 - commutator22212) + bch_result += (1/360)*(commutator21112 - commutator12212) + bch_result += (1/120)*(commutator21212 - commutator12112) + return bch_result + +def _matrix_commutator(mat1, mat2): + return mat1@mat2 - mat2@mat1 #-----------First-Order Approximate Error Generator Probabilities---------------# diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py index 5ccea2ce1..22ca02a33 100644 --- a/test/unit/objects/test_errorgenpropagation.py +++ b/test/unit/objects/test_errorgenpropagation.py @@ -1,6 +1,6 @@ from ..util import BaseCase from pygsti.algorithms.randomcircuit import create_random_circuit -from pygsti.errorgenpropagation.errorpropagator_dev import ErrorGeneratorPropagator +from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator from pygsti.processors import QubitProcessorSpec from pygsti.models.modelconstruction import create_crosstalk_free_model from pygsti.baseobjs import Label, BuiltinBasis, QubitSpace, CompleteElementaryErrorgenBasis diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py index 60a1eb0a7..45297736b 100644 --- a/test/unit/tools/test_errgenproptools.py +++ b/test/unit/tools/test_errgenproptools.py @@ -1,5 +1,6 @@ import numpy as np -from pygsti.baseobjs import Label, QubitSpace +from scipy.linalg import logm +from pygsti.baseobjs import Label, QubitSpace, BuiltinBasis from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis from pygsti.algorithms.randomcircuit import create_random_circuit from pygsti.models.modelconstruction import create_crosstalk_free_model @@ -12,12 +13,26 @@ import random import stim from pygsti.processors import QubitProcessorSpec -from pygsti.errorgenpropagation.errorpropagator_dev import ErrorGeneratorPropagator +from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator #TODO: BCH approximation, errorgen_layer_to_matrix, stim_pauli_string_less_than, iterative_error_generator_composition class ErrgenCompositionCommutationTester(BaseCase): + def setUp(self): + num_qubits = 4 + gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] + availability = {'Gcphase':[(0,1), (1,2), (2,3), (3,0)]} + pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) + self.target_model = create_crosstalk_free_model(processor_spec = pspec) + self.circuit = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + max_strengths = {1: {'S': 0, 'H': .0001}, + 2: {'S': 0, 'H': .0001}} + error_rates_dict = sample_error_rates_dict(pspec, max_strengths, seed=12345) + self.error_model = create_crosstalk_free_model(pspec, lindblad_error_coeffs=error_rates_dict) + self.errorgen_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + self.propagated_errorgen_layers = self.errorgen_propagator.propagate_errorgens(self.circuit) + def test_errorgen_commutators(self): #confirm we get the correct analytic commutators by comparing to numerics. @@ -141,7 +156,59 @@ def test_errorgen_composition(self): print_mx(analytic_composition_mat) raise ValueError('Numeric and analytic error generator compositions were not found to be identical!') - + def test_iterative_error_generator_composition(self): + test_labels = [(_LSE('H', [stim.PauliString('X')]), _LSE('H', [stim.PauliString('X')]), _LSE('H', [stim.PauliString('X')])), + (_LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')])), + (_LSE('S', [stim.PauliString('YY')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')]))] + rates = [(1,1,1), (1,1,1), (1,1,1)] + + correct_iterative_compositions = [[(_LSE('H', (stim.PauliString("+X"),)), (-2-0j)), (_LSE('H', (stim.PauliString("+X"),)), -2)], + [(_LSE('H', (stim.PauliString("+X_"),)), (-1+0j)), (_LSE('A', (stim.PauliString("+_X"), stim.PauliString("+XX"))), (1+0j)), + (_LSE('A', (stim.PauliString("+_X"), stim.PauliString("+XX"))), (1+0j)), (_LSE('H', (stim.PauliString("+X_"),)), (-1+0j))], + [(_LSE('C', (stim.PauliString("+X_"), stim.PauliString("+YZ"))), (-1+0j)), (_LSE('C', (stim.PauliString("+_X"), stim.PauliString("+ZY"))), (-1+0j)), + (_LSE('A', (stim.PauliString("+XX"), stim.PauliString("+YY"))), (-1+0j)), (_LSE('H', (stim.PauliString("+ZZ"),)), (1+0j))] + ] + + for lbls, rates, correct_lbls in zip(test_labels, rates, correct_iterative_compositions): + iterated_composition = _eprop.iterative_error_generator_composition(lbls, rates) + self.assertEqual(iterated_composition, correct_lbls) + + def test_bch_approximation(self): + first_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=1) + propagated_errorgen_layers_bch_order_1 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=1) + first_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_1,mx_basis='pp', return_dense=True) + assert np.linalg.norm(first_order_bch_analytical-first_order_bch_numerical) < 1e-14 + + propagated_errorgen_layers_bch_order_2 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=2) + second_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=2) + second_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_2, mx_basis='pp', return_dense=True) + assert np.linalg.norm(second_order_bch_analytical-second_order_bch_numerical) < 1e-14 + + third_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=3) + propagated_errorgen_layers_bch_order_3 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=3) + third_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_3, mx_basis='pp', return_dense=True) + assert np.linalg.norm(third_order_bch_analytical-third_order_bch_numerical) < 1e-14 + + fourth_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=4) + propagated_errorgen_layers_bch_order_4 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=4) + fourth_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_4, mx_basis='pp', return_dense=True) + assert np.linalg.norm(fourth_order_bch_analytical-fourth_order_bch_numerical) < 1e-14 + + fifth_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=5) + propagated_errorgen_layers_bch_order_5 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=5, truncation_threshold=0) + fifth_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_5, mx_basis='pp', return_dense=True) + assert np.linalg.norm(fifth_order_bch_analytical-fifth_order_bch_numerical) < 1e-14 + + exact_errorgen = logm(self.errorgen_propagator.eoc_error_channel(self.circuit)) + exact_vs_first_order_norm = np.linalg.norm(first_order_bch_analytical-exact_errorgen) + exact_vs_second_order_norm = np.linalg.norm(second_order_bch_analytical-exact_errorgen) + exact_vs_third_order_norm = np.linalg.norm(third_order_bch_analytical-exact_errorgen) + exact_vs_fourth_order_norm = np.linalg.norm(fourth_order_bch_analytical-exact_errorgen) + exact_vs_fifth_order_norm = np.linalg.norm(fifth_order_bch_analytical-exact_errorgen) + + self.assertTrue((exact_vs_first_order_norm > exact_vs_second_order_norm) and (exact_vs_second_order_norm > exact_vs_third_order_norm) + and (exact_vs_third_order_norm > exact_vs_fourth_order_norm) and (exact_vs_fourth_order_norm > exact_vs_fifth_order_norm)) + class ApproxStabilizerProbTester(BaseCase): def setUp(self): num_qubits = 4 @@ -161,7 +228,7 @@ def setUp(self): self.circuit_tableau_alt = self.circuit_alt.convert_to_stim_tableau() #also create a 3-qubit pspec for making some tests faster. - num_qubits = 4 + num_qubits = 3 gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] availability = {'Gcphase':[(0,1), (1,2)]} pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) @@ -266,15 +333,6 @@ def test_stabilizer_probability_correction(self): for order in orders: _eprop.stabilizer_probability_correction(self.propagated_errorgen_layer, self.circuit_tableau, bitstring, order) - #def test_iterative_error_generator_composition(self): - # test_labels = [(_LSE('H', [stim.PauliString('X')]), _LSE('H', [stim.PauliString('X')]), _LSE('H', [stim.PauliString('X')])), - # (_LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')])), - # (_LSE('S', [stim.PauliString('YY')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')]))] - # rates = [(1,1,1), (1,1,1), (1,1,1)] - # - # correct_iterative_compositions = [ [], {_LSE('A', [stim.PauliString('IX'), stim.PauliString('XX')])} - # ] - def test_approximate_stabilizer_probability(self): exact_prop_probs = probabilities_errorgen_prop(self.error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=1) From 86b40967c843c17ac15c2efee54ae6104f804db9 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 8 Feb 2025 22:06:17 -0700 Subject: [PATCH 071/102] Additional propagation related unit tests Add additional unit tests for correctness testing the calculation of end of circuit error channels. --- test/unit/objects/test_errorgenpropagation.py | 35 +++++++++++++++---- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py index 22ca02a33..29d23c446 100644 --- a/test/unit/objects/test_errorgenpropagation.py +++ b/test/unit/objects/test_errorgenpropagation.py @@ -48,16 +48,39 @@ def test_approx_propagation_probabilities_BCH(self): probabilities_BCH_order_2 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=2) probabilities_BCH_order_3 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=3) probabilities_BCH_order_4 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=4) - + probabilities_BCH_order_5 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=5) probabilities_forward_simulation = probabilities_fwdsim(self.error_model, self.circuit) #use a much looser constraint on the agreement between the BCH results and forward simulation. Mostly testing to catch things exploding. - self.assertTrue(np.linalg.norm(probabilities_BCH_order_1 - probabilities_forward_simulation, ord=1) < 1e-2) - self.assertTrue(np.linalg.norm(probabilities_BCH_order_2 - probabilities_forward_simulation, ord=1) < 1e-2) - self.assertTrue(np.linalg.norm(probabilities_BCH_order_3 - probabilities_forward_simulation, ord=1) < 1e-2) - self.assertTrue(np.linalg.norm(probabilities_BCH_order_4 - probabilities_forward_simulation, ord=1) < 1e-2) + TVD_order_1 = np.linalg.norm(probabilities_BCH_order_1 - probabilities_forward_simulation, ord=1) + TVD_order_2 = np.linalg.norm(probabilities_BCH_order_2 - probabilities_forward_simulation, ord=1) + TVD_order_3 = np.linalg.norm(probabilities_BCH_order_3 - probabilities_forward_simulation, ord=1) + TVD_order_4 = np.linalg.norm(probabilities_BCH_order_4 - probabilities_forward_simulation, ord=1) + TVD_order_5 = np.linalg.norm(probabilities_BCH_order_5 - probabilities_forward_simulation, ord=1) + + #loose bound is just to make sure nothing exploded. + self.assertTrue(TVD_order_1 < 1e-2) + self.assertTrue(TVD_order_2 < 1e-2) + self.assertTrue(TVD_order_3 < 1e-2) + self.assertTrue(TVD_order_4 < 1e-2) + self.assertTrue(TVD_order_5 < 1e-2) + + #also assert that the TVDs get smaller in general as you go up in order. + self.assertTrue((TVD_order_1>TVD_order_2) and (TVD_order_2>TVD_order_3) and (TVD_order_3>TVD_order_4) and (TVD_order_4>TVD_order_5)) + + def test_eoc_error_channel(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + eoc_error_channel = error_propagator.eoc_error_channel(self.circuit) + + #manually compute end-of-circuit error generator + ideal_channel = self.target_model.sim.product(self.circuit) + noisy_channel_exact = self.error_model.sim.product(self.circuit) + eoc_error_channel_exact = noisy_channel_exact@ideal_channel.conj().T + + assert np.linalg.norm(eoc_error_channel - eoc_error_channel_exact) < 1e-10 + + - class LocalStimErrorgenLabelTester(BaseCase): def setUp(self): self.local_eel = LocalElementaryErrorgenLabel('C', ['XX', 'YY']) From 5188b1d0d6c4693f02940a3b5d3500ebfa6cae24 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 8 Feb 2025 23:02:30 -0700 Subject: [PATCH 072/102] Minor tweak Minor tweak to avoid constructing the hashable basis element labels twice. --- pygsti/errorgenpropagation/localstimerrorgen.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index 247fbbcf6..019e169d8 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -147,7 +147,7 @@ def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initi self._hashable_string_rep = self.errorgen_type.join(pauli_str_reps) else: self._hashable_basis_element_labels = self.bel_to_strings() - self._hashable_string_rep = self.errorgen_type.join(self.bel_to_strings()) + self._hashable_string_rep = self.errorgen_type.join(self._hashable_basis_element_labels) #additionally store a copy of the value of the original error generator label which will remain unchanged #during the course of propagation for later bookkeeping purposes. @@ -165,7 +165,7 @@ def bel_to_strings(self): """ Convert the elements of `basis_element_labels` to python strings (from stim.PauliString(s)) and return as a tuple. - """ + """ return tuple([str(ps)[1:].replace('_',"I") for ps in self.basis_element_labels]) From ebe8b089810d96cde18ad8127388462f6d8c11ec Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 9 Feb 2025 17:34:23 -0700 Subject: [PATCH 073/102] Spring cleaning and documentation updates Clean up and add missing documentation to the LindbladErrorgen module. Also bring up to date previously out-of-date docstrings referring to previous implementation details no longer relevant. Additionally remove old commented out debug statements and unused/vestigial code. Fix a few references to now deprecated parameterizations. --- pygsti/errorgenpropagation/errorpropagator.py | 7 +- .../operations/lindbladerrorgen.py | 825 +++++++----------- test/unit/objects/test_errorgenpropagation.py | 3 +- 3 files changed, 336 insertions(+), 499 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index 788e6fa05..544c0fda5 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -2,7 +2,7 @@ import numpy as _np import scipy.linalg as _spl from .localstimerrorgen import LocalStimErrorgenLabel as _LSE -from numpy import abs,zeros, complex128 +from numpy import zeros, complex128 from numpy.linalg import multi_dot from scipy.linalg import expm from pygsti.tools.internalgates import standard_gatenames_stim_conversions @@ -167,10 +167,7 @@ def averaged_eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam #prop_contrib = amam else: pass - - - - + #loop though the propagated error generator layers and construct their error generators. #Then exponentiate diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index f24cbf7d3..24073b817 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -11,29 +11,23 @@ #*************************************************************************************************** import warnings as _warnings -import collections as _collections -import copy as _copy import itertools as _itertools import numpy as _np import scipy.linalg as _spl import scipy.sparse as _sps -import scipy.sparse.linalg as _spsl from pygsti.baseobjs.opcalc import compact_deriv as _compact_deriv, \ bulk_eval_compact_polynomials_complex as _bulk_eval_compact_polynomials_complex, \ abs_sum_bulk_eval_compact_polynomials_complex as _abs_sum_bulk_eval_compact_polynomials_complex from pygsti.modelmembers.operations.linearop import LinearOperator as _LinearOperator from pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock as _LindbladCoefficientBlock -from pygsti.modelmembers import term as _term from pygsti.evotypes import Evotype as _Evotype from pygsti.baseobjs import statespace as _statespace -from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis -from pygsti.baseobjs.polynomial import Polynomial as _Polynomial +from pygsti.baseobjs.basis import Basis as _Basis from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel -from pygsti.tools import basistools as _bt from pygsti.tools import matrixtools as _mt from pygsti.tools import optools as _ot @@ -42,75 +36,67 @@ class LindbladErrorgen(_LinearOperator): """ - An Lindblad-form error generator. - - This error generator consisting of terms that, with appropriate constraints - ensurse that the resulting (after exponentiation) operation/layer operation - is CPTP. These terms can be divided into "Hamiltonian"-type terms, which - map rho -> i[H,rho] and "non-Hamiltonian"/"other"-type terms, which map rho - -> A rho B + 0.5*(ABrho + rhoAB). - - Parameters - ---------- - dim : int - The Hilbert-Schmidt (superoperator) dimension, which will be the - dimension of the created operator. - - lindblad_term_dict : dict - A dictionary specifying which Linblad terms are present in the - parameteriztion. Keys are `(termType, basisLabel1, )` - tuples, where `termType` can be `"H"` (Hamiltonian), `"S"` - (Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always - have a single basis label (so key is a 2-tuple) whereas Stochastic - tuples with 1 basis label indicate a *diagonal* term, and are the - only types of terms allowed when `nonham_mode != "all"`. Otherwise, - Stochastic term tuples can include 2 basis labels to specify - "off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be - strings or integers. Values are complex coefficients. - - basis : Basis, optional - A basis mapping the labels used in the keys of `lindblad_term_dict` to - basis matrices (e.g. numpy arrays or Scipy sparse matrices). - - param_mode : {"unconstrained", "cptp", "depol", "reldepol"} - Describes how the Lindblad coefficients/projections relate to the - error generator's parameter values. Allowed values are: - `"unconstrained"` (coeffs are independent unconstrained parameters), - `"cptp"` (independent parameters but constrained so map is CPTP), - `"reldepol"` (all non-Ham. diagonal coeffs take the *same* value), - `"depol"` (same as `"reldepol"` but coeffs must be *positive*) - - nonham_mode : {"diagonal", "diag_affine", "all"} - Which non-Hamiltonian Lindblad projections are potentially non-zero. - Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.), - `"diag_affine"` (diagonal coefficients + affine projections), and - `"all"` (the entire matrix of coefficients is allowed). - - truncate : bool, optional - Whether to truncate the projections onto the Lindblad terms in - order to meet constraints (e.g. to preserve CPTP) when necessary. - If False, then an error is thrown when the given dictionary of - Lindblad terms doesn't conform to the constrains. - - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - The basis for this error generator's linear mapping. Allowed - values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - and Qutrit (qt) (or a custom basis object). - - evotype : {"densitymx","svterm","cterm"} - The evolution type of the error generator being constructed. - `"densitymx"` means the usual Lioville density-matrix-vector - propagation via matrix-vector products. `"svterm"` denotes - state-vector term-based evolution (action of operation is obtained by - evaluating the rank-1 terms up to some order). `"cterm"` is similar - but uses Clifford operation action on stabilizer states. + A class for representing noisy quantum operations using Lindblad error generators. """ - _generators_cache = {} # a custom cache for _init_generators method calls - @classmethod - def from_operation_matrix_and_blocks(cls, op_matrix, lindblad_coefficient_blocks, lindblad_basis='auto', + def from_operation_matrix_and_blocks(cls, op_matrix, lindblad_coefficient_blocks, elementary_errorgen_basis='PP', mx_basis='pp', truncate=True, evotype="default", state_space=None): + """ + Creates a Lindblad-parameterized error generator from an operation and a set + of `LindbladCoefficientBlock`s. + + Here "operation" means the exponentiated error generator, so this method + essentially takes the matrix log of `op_matrix` and constructs an error + generator from this by subsequently projecting this constructed error generator + onto the specified `LindbladCoefficientBlock`s. Note that since these blocks are + user specified this projection may not be complete. E.g. passing in a general operation consisting + of non-trivial 'H', 'S', 'C' and 'A' generators together with a single `LindbladCoefficientBlock` + for storing 'H' terms will result in an 'H'-only generator. + + Parameters + ---------- + op_matrix : numpy array or SciPy sparse matrix + a square 2D array that gives the raw operation matrix, assumed to + be in the `mx_basis` basis, to parameterize. The shape of this + array sets the dimension of the operation. + + lindblad_coefficient_blocks : list of `LindbladCoefficientBlocks` + List of `LindbladCoefficientBlocks` for storing the input error generator data + given the projections onto these blocks. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... + + truncate : bool, optional + Whether to truncate the projections onto the Lindblad terms in + order to meet constraints (e.g. to preserve CPTP) when necessary. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. + + evotype : str or `Evotype`, optional (default 'default') + The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. + `"densitymx"` means usual Lioville density-matrix-vector propagation + via matrix-vector products. `"svterm"` denotes state-vector term- + based evolution (action of operation is obtained by evaluating the rank-1 + terms up to some order). `"cterm"` is similar but uses Clifford operation + action on stabilizer states. + + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. + + Returns + ------- + `LindbladErrorgen` + """ + sparseOp = _sps.issparse(op_matrix) #Init base from error generator: sets basis members and ultimately @@ -130,10 +116,10 @@ def from_operation_matrix_and_blocks(cls, op_matrix, lindblad_coefficient_blocks mx_basis, "logGTi") for blk in lindblad_coefficient_blocks: blk.set_from_errorgen_projections(errgenMx, mx_basis, truncate=truncate) - return cls(lindblad_coefficient_blocks, lindblad_basis, mx_basis, evotype, state_space) + return cls(lindblad_coefficient_blocks, elementary_errorgen_basis, mx_basis, evotype, state_space) @classmethod - def from_operation_matrix(cls, op_matrix, parameterization='CPTP', lindblad_basis='PP', + def from_operation_matrix(cls, op_matrix, parameterization='CPTPLND', elementary_errorgen_basis='PP', mx_basis='pp', truncate=True, evotype="default", state_space=None): """ Creates a Lindblad-parameterized error generator from an operation. @@ -147,56 +133,54 @@ def from_operation_matrix(cls, op_matrix, parameterization='CPTP', lindblad_basi op_matrix : numpy array or SciPy sparse matrix a square 2D array that gives the raw operation matrix, assumed to be in the `mx_basis` basis, to parameterize. The shape of this - array sets the dimension of the operation. If None, then it is assumed - equal to `unitary_postfactor` (which cannot also be None). The - quantity `op_matrix inv(unitary_postfactor)` is parameterized via - projection onto the Lindblad terms. - - ham_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - The basis is used to construct the Hamiltonian-type lindblad error - Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - and Qutrit (qt), list of numpy arrays, or a custom basis object. - - nonham_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - The basis is used to construct the non-Hamiltonian (generalized - Stochastic-type) lindblad error Allowed values are Matrix-unit - (std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt), list of - numpy arrays, or a custom basis object. - - param_mode : {"unconstrained", "cptp", "depol", "reldepol"} - Describes how the Lindblad coefficients/projections relate to the - operation's parameter values. Allowed values are: - `"unconstrained"` (coeffs are independent unconstrained parameters), - `"cptp"` (independent parameters but constrained so map is CPTP), - `"reldepol"` (all non-Ham. diagonal coeffs take the *same* value), - `"depol"` (same as `"reldepol"` but coeffs must be *positive*) - - nonham_mode : {"diagonal", "diag_affine", "all"} - Which non-Hamiltonian Lindblad projections are potentially non-zero. - Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.), - `"diag_affine"` (diagonal coefficients + affine projections), and - `"all"` (the entire matrix of coefficients is allowed). + array sets the dimension of the operation. + + parameterization: `LindbladParameterization` or str castable to `LindbladParameterization`, optional (default 'auto') + Either an instance of `LindbladParameterization` or a string castable to a + valid `LindbladParameterization`. This object specifies the internal parameterizations + and coefficient blocks required for storing the data associated with this error generator + and requisite for enforcing appropriate constraints. See documentation of `LindbladParameterization` + for more details, but common examples include: + + - 'auto': A minimal parameterization is inferred based on the contents of `elementary_errorgens`. + See the `minimal_from_elementary_errorgens` method of `LindbladParameterization` for more. + - 'CPTPLND': A CPTP-constrained error generator parameterization + - 'GLND': General Lindbladian, a non-CP (but still TP) constrained parameterization + - 'H': A Hamiltonian-only parameterization (no 'S', 'C', or 'A' terms) + - 'H+S': A Hamiltonian + Stochastic only parameterization (no 'C' or 'A' terms). + + See aforementioned documentation for more details. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... truncate : bool, optional Whether to truncate the projections onto the Lindblad terms in order to meet constraints (e.g. to preserve CPTP) when necessary. - If False, then an error is thrown when the given `operation` cannot - be realized by the specified set of Lindblad projections. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - The source and destination basis, respectively. Allowed - values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - and Qutrit (qt) (or a custom basis object). - - evotype : Evotype or str, optional - The evolution type. The special value `"default"` is equivalent - to specifying the value of `pygsti.evotypes.Evotype.default_evotype`. + evotype : str or `Evotype`, optional (default 'default') + The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. + `"densitymx"` means usual Lioville density-matrix-vector propagation + via matrix-vector products. `"svterm"` denotes state-vector term- + based evolution (action of operation is obtained by evaluating the rank-1 + terms up to some order). `"cterm"` is similar but uses Clifford operation + action on stabilizer states. - state_space : TODO docstring + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. Returns ------- - LindbladOp + `LindbladErrorgen` """ #Compute an errorgen from the given op_matrix. Works with both @@ -219,99 +203,142 @@ def from_operation_matrix(cls, op_matrix, parameterization='CPTP', lindblad_basi else: errgenMx = _ot.error_generator(op_matrix, _np.identity(op_matrix.shape[0], 'd'), mx_basis, "logGTi") - return cls.from_error_generator(errgenMx, parameterization, lindblad_basis, + return cls.from_error_generator(errgenMx, parameterization, elementary_errorgen_basis, mx_basis, truncate, evotype, state_space=state_space) @classmethod - def from_error_generator(cls, errgen_or_dim, parameterization="CPTP", lindblad_basis='PP', mx_basis='pp', + def from_error_generator(cls, errgen_or_dim, parameterization="CPTPLND", elementary_errorgen_basis='PP', mx_basis='pp', truncate=True, evotype="default", state_space=None): """ - TODO: docstring - take from now-private version below Note: errogen_or_dim can be an integer => zero errgen + Construct a new `LindbladErrorgen` instance instantiated using a dense numpy array or sparse + scipy array representation. + + Parameters + ---------- + errgen_or_dim : numpy array or SciPy sparse matrix or int + A square 2D array that gives the full error generator, or an integer specifying the dimension + of an empty (all-zeros) 2D array to construct. + + parameterization: `LindbladParameterization` or str castable to `LindbladParameterization`, optional (default 'auto') + Either an instance of `LindbladParameterization` or a string castable to a + valid `LindbladParameterization`. This object specifies the internal parameterizations + and coefficient blocks required for storing the data associated with this error generator + and requisite for enforcing appropriate constraints. See documentation of `LindbladParameterization` + for more details, but common examples include: + + - 'auto': A minimal parameterization is inferred based on the contents of `elementary_errorgens`. + See the `minimal_from_elementary_errorgens` method of `LindbladParameterization` for more. + - 'CPTPLND': A CPTP-constrained error generator parameterization + - 'GLND': General Lindbladian, a non-CP (but still TP) constrained parameterization + - 'H': A Hamiltonian-only parameterization (no 'S', 'C', or 'A' terms) + - 'H+S': A Hamiltonian + Stochastic only parameterization (no 'C' or 'A' terms). + + See aforementioned documentation for more details. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... + + truncate : bool, optional + Whether to truncate the projections onto the Lindblad terms in + order to meet constraints (e.g. to preserve CPTP) when necessary. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. + + evotype : str or `Evotype`, optional (default 'default') + The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. + `"densitymx"` means usual Lioville density-matrix-vector propagation + via matrix-vector products. `"svterm"` denotes state-vector term- + based evolution (action of operation is obtained by evaluating the rank-1 + terms up to some order). `"cterm"` is similar but uses Clifford operation + action on stabilizer states. + + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. + + Returns + ------- + `LindbladErrorgen` """ errgen = _np.zeros((errgen_or_dim, errgen_or_dim), 'd') \ if isinstance(errgen_or_dim, (int, _np.int64)) else errgen_or_dim - return cls._from_error_generator(errgen, parameterization, lindblad_basis, + return cls._from_error_generator(errgen, parameterization, elementary_errorgen_basis, mx_basis, truncate, evotype, state_space) @classmethod def from_error_generator_and_blocks(cls, errgen_or_dim, lindblad_coefficient_blocks, - lindblad_basis='PP', mx_basis='pp', + elementary_errorgen_basis='PP', mx_basis='pp', truncate=True, evotype="default", state_space=None): """ - TODO: docstring - take from now-private version below Note: errogen_or_dim can be an integer => zero errgen - """ - errgenMx = _np.zeros((errgen_or_dim, errgen_or_dim), 'd') \ - if isinstance(errgen_or_dim, (int, _np.int64)) else errgen_or_dim - for blk in lindblad_coefficient_blocks: - blk.set_from_errorgen_projections(errgenMx, mx_basis, truncate=truncate) - return cls(lindblad_coefficient_blocks, lindblad_basis, mx_basis, evotype, state_space) - - @classmethod - def _from_error_generator(cls, errgen, parameterization="CPTP", lindblad_basis="PP", - mx_basis="pp", truncate=True, evotype="default", state_space=None): - """ - Create a Lindblad-form error generator from an error generator matrix and a basis. - TODO: fix docstring -- ham/nonham_basis ==> lindblad_basis + Creates a Lindblad-parameterized error generator from an operation and a set + of `LindbladCoefficientBlock`s. - The basis specifies how to decompose (project) the error generator. + Here "operation" means the exponentiated error generator, so this method + essentially takes the matrix log of `op_matrix` and constructs an error + generator from this by subsequently projecting this constructed error generator + onto the specified `LindbladCoefficientBlock`s. Note that since these blocks are + user specified this projection may not be complete. E.g. passing in a general operation consisting + of non-trivial 'H', 'S', 'C' and 'A' generators together with a single `LindbladCoefficientBlock` + for storing 'H' terms will result in an 'H'-only generator. Parameters ---------- - errgen : numpy array or SciPy sparse matrix - a square 2D array that gives the full error generator. The shape of - this array sets the dimension of the operator. The projections of - this quantity onto the `ham_basis` and `nonham_basis` are closely - related to the parameters of the error generator (they may not be - exactly equal if, e.g `cptp=True`). - - ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - The basis is used to construct the Hamiltonian-type lindblad error - Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - and Qutrit (qt), list of numpy arrays, or a custom basis object. - - nonham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object - The basis is used to construct the non-Hamiltonian-type lindblad error - Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - and Qutrit (qt), list of numpy arrays, or a custom basis object. - - param_mode : {"unconstrained", "cptp", "depol", "reldepol"} - Describes how the Lindblad coefficients/projections relate to the - operation's parameter values. Allowed values are: - `"unconstrained"` (coeffs are independent unconstrained parameters), - `"cptp"` (independent parameters but constrained so map is CPTP), - `"reldepol"` (all non-Ham. diagonal coeffs take the *same* value), - `"depol"` (same as `"reldepol"` but coeffs must be *positive*) - - nonham_mode : {"diagonal", "diag_affine", "all"} - Which non-Hamiltonian Lindblad projections are potentially non-zero. - Allowed values are: `"diagonal"` (only the diagonal Lind. coeffs.), - `"diag_affine"` (diagonal coefficients + affine projections), and - `"all"` (the entire matrix of coefficients is allowed). - - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object - The source and destination basis, respectively. Allowed - values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - and Qutrit (qt) (or a custom basis object). + errgen_or_dim : numpy array or SciPy sparse matrix or int + A square 2D array that gives the full error generator, or an integer specifying the dimension + of an empty (all-zeros) 2D array to construct. + + lindblad_coefficient_blocks : list of `LindbladCoefficientBlocks` + List of `LindbladCoefficientBlocks` for storing the input error generator data + given the projections onto these blocks. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... truncate : bool, optional Whether to truncate the projections onto the Lindblad terms in order to meet constraints (e.g. to preserve CPTP) when necessary. - If False, then an error is thrown when the given `errgen` cannot - be realized by the specified set of Lindblad projections. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. - evotype : {"densitymx","svterm","cterm"} + evotype : str or `Evotype`, optional (default 'default') The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. `"densitymx"` means usual Lioville density-matrix-vector propagation via matrix-vector products. `"svterm"` denotes state-vector term- based evolution (action of operation is obtained by evaluating the rank-1 terms up to some order). `"cterm"` is similar but uses Clifford operation action on stabilizer states. - state_space : TODO docstring + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. Returns ------- - LindbladErrorgen + `LindbladErrorgen` + """ + errgenMx = _np.zeros((errgen_or_dim, errgen_or_dim), 'd') \ + if isinstance(errgen_or_dim, (int, _np.int64)) else errgen_or_dim + for blk in lindblad_coefficient_blocks: + blk.set_from_errorgen_projections(errgenMx, mx_basis, truncate=truncate) + return cls(lindblad_coefficient_blocks, elementary_errorgen_basis, mx_basis, evotype, state_space) + + @classmethod + def _from_error_generator(cls, errgen, parameterization="CPTPLND", elementary_errorgen_basis="PP", + mx_basis="pp", truncate=True, evotype="default", state_space=None): + """ + See `from_error_generator` for more details. """ dim = errgen.shape[0] @@ -322,12 +349,12 @@ def _from_error_generator(cls, errgen, parameterization="CPTP", lindblad_basis=" # given to us are sparse or not and make them all consistent # (maybe this is needed by lindblad_errorgen_projections call below?) sparse = None - if isinstance(lindblad_basis, _Basis): - sparse = lindblad_basis.sparse + if isinstance(elementary_errorgen_basis, _Basis): + sparse = elementary_errorgen_basis.sparse else: - if isinstance(lindblad_basis, str): sparse = _sps.issparse(errgen) - elif len(lindblad_basis) > 0: sparse = _sps.issparse(lindblad_basis[0]) - lindblad_basis = _Basis.cast(lindblad_basis, dim, sparse=sparse) + if isinstance(elementary_errorgen_basis, str): sparse = _sps.issparse(errgen) + elif len(elementary_errorgen_basis) > 0: sparse = _sps.issparse(elementary_errorgen_basis[0]) + elementary_errorgen_basis = _Basis.cast(elementary_errorgen_basis, dim, sparse=sparse) if sparse is None: sparse = False # the default @@ -342,7 +369,7 @@ def _from_error_generator(cls, errgen, parameterization="CPTP", lindblad_basis=" # Create blocks based on bases along - no specific errorgen labels blocks = [] for blk_type, blk_param_mode in zip(parameterization.block_types, parameterization.param_modes): - blk = _LindbladCoefficientBlock(blk_type, lindblad_basis, param_mode=blk_param_mode) + blk = _LindbladCoefficientBlock(blk_type, elementary_errorgen_basis, param_mode=blk_param_mode) blk.set_from_errorgen_projections(errgen, matrix_basis, truncate=truncate) blocks.append(blk) @@ -352,7 +379,66 @@ def _from_error_generator(cls, errgen, parameterization="CPTP", lindblad_basis=" @classmethod def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto', elementary_errorgen_basis='PP', mx_basis="pp", truncate=True, evotype="default", state_space=None): - """TODO: docstring""" + """ + Construct a new `LindbladErrorgen` instance instantiated using a dictionary of elementary error generator + coefficients and rates. + + Parameters + ---------- + elementary_errorgens : dict + a square 2D array that gives the full error generator. The shape of + this array sets the dimension of the operator. The projections of + this quantity onto the `ham_basis` and `nonham_basis` are closely + related to the parameters of the error generator (they may not be + exactly equal if, e.g `cptp=True`). + + parameterization: `LindbladParameterization` or str castable to `LindbladParameterization`, optional (default 'auto') + Either an instance of `LindbladParameterization` or a string castable to a + valid `LindbladParameterization`. This object specifies the internal parameterizations + and coefficient blocks required for storing the data associated with this error generator + and requisite for enforcing appropriate constraints. See documentation of `LindbladParameterization` + for more details, but common examples include: + + - 'auto': A minimal parameterization is inferred based on the contents of `elementary_errorgens`. + See the `minimal_from_elementary_errorgens` method of `LindbladParameterization` for more. + - 'CPTPLND': A CPTP-constrained error generator parameterization + - 'GLND': General Lindbladian, a non-CP (but still TP) constrained parameterization + - 'H': A Hamiltonian-only parameterization (no 'S', 'C', or 'A' terms) + - 'H+S': A Hamiltonian + Stochastic only parameterization (no 'C' or 'A' terms). + + See aforementioned documentation for more details. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... + + truncate : bool, optional + Whether to truncate the projections onto the Lindblad terms in + order to meet constraints (e.g. to preserve CPTP) when necessary. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. + + evotype : str or `Evotype`, optional (default 'default') + The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. + `"densitymx"` means usual Lioville density-matrix-vector propagation + via matrix-vector products. `"svterm"` denotes state-vector term- + based evolution (action of operation is obtained by evaluating the rank-1 + terms up to some order). `"cterm"` is similar but uses Clifford operation + action on stabilizer states. + + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. + + Returns + ------- + `LindbladErrorgen` + """ state_space = _statespace.StateSpace.cast(state_space) dim = state_space.dim # Store superop dimension basis = _Basis.cast(elementary_errorgen_basis, dim) @@ -388,7 +474,7 @@ def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto' return cls(blocks, basis, mx_basis, evotype, state_space) - def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis='pp', + def __init__(self, lindblad_coefficient_blocks, elementary_errorgen_basis='auto', mx_basis='pp', evotype="default", state_space=None): if isinstance(lindblad_coefficient_blocks, dict): # backward compat warning @@ -414,15 +500,15 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis= state_space = _statespace.StateSpace.cast(state_space) dim = state_space.dim # Store superop dimension - if lindblad_basis == "auto": + if elementary_errorgen_basis == "auto": assert(all([(blk._basis is not None) for blk in lindblad_coefficient_blocks])), \ - "When `lindblad_basis == 'auto'`, the supplied coefficient blocks must have valid bases!" - default_lindblad_basis = None + "When `elementary_errorgen_basis == 'auto'`, the supplied coefficient blocks must have valid bases!" + default_elementary_errorgen_basis = None else: - default_lindblad_basis = _Basis.cast(lindblad_basis, dim, sparse=sparse_bases) + default_elementary_errorgen_basis = _Basis.cast(elementary_errorgen_basis, dim, sparse=sparse_bases) for blk in lindblad_coefficient_blocks: - if blk._basis is None: blk._basis = default_lindblad_basis + if blk._basis is None: blk._basis = default_elementary_errorgen_basis elif blk._basis.sparse != sparse_bases: # update block bases to desired sparsity if needed blk._basis = blk._basis.with_sparsity(sparse_bases) @@ -483,139 +569,6 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis= assert(self._onenorm_upbound is not None) # _update_rep should set this #Done with __init__(...) - #def _init_generators(self, dim): - # #assumes self.dim, self.ham_basis, self.other_basis, and self.matrix_basis are setup... - # sparse_bases = bool(self._rep_type == 'sparse superop') - # - # #HERE TODO - need to update this / MOVE to block class? - # #use caching to increase performance - cache based on all the self.XXX members utilized by this fn - # cache_key = (self._rep_type, self.matrix_basis, self.ham_basis, self.other_basis, self.parameterization) - # #print("cache key = ",self._rep_type, (self.matrix_basis.name, self.matrix_basis.dim), - # # (self.ham_basis.name, self.ham_basis.dim), (self.other_basis.name, self.other_basis.dim), - # # str(self.parameterization)) - # - # if cache_key not in self._generators_cache: - # - # d = int(round(_np.sqrt(dim))) - # assert(d * d == dim), "Errorgen dim must be a perfect square" - # - # # Get basis transfer matrix - # mxBasisToStd = self.matrix_basis.create_transform_matrix( - # _BuiltinBasis("std", self.matrix_basis.dim, sparse_bases)) - # # use BuiltinBasis("std") instead of just "std" in case matrix_basis is a TensorProdBasis - # leftTrans = _spsl.inv(mxBasisToStd.tocsc()).tocsr() if _sps.issparse(mxBasisToStd) \ - # else _np.linalg.inv(mxBasisToStd) - # rightTrans = mxBasisToStd - # - # hamBasisMxs = self.ham_basis.elements - # otherBasisMxs = self.other_basis.elements - # - # hamGens, otherGens = _ot.lindblad_error_generators( - # hamBasisMxs, otherBasisMxs, normalize=False, - # other_mode=self.parameterization.nonham_mode) # in std basis - # - # # Note: lindblad_error_generators will return sparse generators when - # # given a sparse basis (or basis matrices) - # - # if hamGens is not None: - # bsH = len(hamGens) + 1 # projection-basis size (not nec. == dim) - # _ot._assert_shape(hamGens, (bsH - 1, dim, dim), sparse_bases) - # - # # apply basis change now, so we don't need to do so repeatedly later - # if sparse_bases: - # hamGens = [_mt.safe_real(_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)), - # inplace=True, check=True) for mx in hamGens] - # for mx in hamGens: mx.sort_indices() - # # for faster addition ops in _construct_errgen_matrix - # else: - # #hamGens = _np.einsum("ik,akl,lj->aij", leftTrans, hamGens, rightTrans) - # hamGens = _np.transpose(_np.tensordot( - # _np.tensordot(leftTrans, hamGens, (1, 1)), rightTrans, (2, 0)), (1, 0, 2)) - # else: - # bsH = 0 - # assert(bsH == self.ham_basis_size) - # - # if otherGens is not None: - # - # if self.parameterization.nonham_mode == "diagonal": - # bsO = len(otherGens) + 1 # projection-basis size (not nec. == dim) - # _ot._assert_shape(otherGens, (bsO - 1, dim, dim), sparse_bases) - # - # # apply basis change now, so we don't need to do so repeatedly later - # if sparse_bases: - # otherGens = [_mt.safe_real(_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)), - # inplace=True, check=True) for mx in otherGens] - # for mx in otherGens: mx.sort_indices() - # # for faster addition ops in _construct_errgen_matrix - # else: - # #otherGens = _np.einsum("ik,akl,lj->aij", leftTrans, otherGens, rightTrans) - # otherGens = _np.transpose(_np.tensordot( - # _np.tensordot(leftTrans, otherGens, (1, 1)), rightTrans, (2, 0)), (1, 0, 2)) - # - # elif self.parameterization.nonham_mode == "diag_affine": - # # projection-basis size (not nec. == dim) [~shape[1] but works for lists too] - # bsO = len(otherGens[0]) + 1 - # _ot._assert_shape(otherGens, (2, bsO - 1, dim, dim), sparse_bases) - # - # # apply basis change now, so we don't need to do so repeatedly later - # if sparse_bases: - # otherGens = [[_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)) - # for mx in mxRow] for mxRow in otherGens] - # - # for mxRow in otherGens: - # for mx in mxRow: mx.sort_indices() - # # for faster addition ops in _construct_errgen_matrix - # else: - # #otherGens = _np.einsum("ik,abkl,lj->abij", leftTrans, - # # otherGens, rightTrans) - # otherGens = _np.transpose(_np.tensordot( - # _np.tensordot(leftTrans, otherGens, (1, 2)), rightTrans, (3, 0)), (1, 2, 0, 3)) - # - # else: - # bsO = len(otherGens) + 1 # projection-basis size (not nec. == dim) - # _ot._assert_shape(otherGens, (bsO - 1, bsO - 1, dim, dim), sparse_bases) - # - # # apply basis change now, so we don't need to do so repeatedly later - # if sparse_bases: - # otherGens = [[_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)) - # for mx in mxRow] for mxRow in otherGens] - # #Note: complex OK here, as only linear combos of otherGens (like (i,j) + (j,i) - # # terms) need to be real - # - # for mxRow in otherGens: - # for mx in mxRow: mx.sort_indices() - # # for faster addition ops in _construct_errgen_matrix - # else: - # #otherGens = _np.einsum("ik,abkl,lj->abij", leftTrans, - # # otherGens, rightTrans) - # otherGens = _np.transpose(_np.tensordot( - # _np.tensordot(leftTrans, otherGens, (1, 2)), rightTrans, (3, 0)), (1, 2, 0, 3)) - # - # else: - # bsO = 0 - # assert(bsO == self.other_basis_size) - # - # if hamGens is not None: - # hamGens_1norms = _np.array([_mt.safe_onenorm(mx) for mx in hamGens], 'd') - # else: - # hamGens_1norms = None - # - # if otherGens is not None: - # if self.parameterization.nonham_mode == "diagonal": - # otherGens_1norms = _np.array([_mt.safe_onenorm(mx) for mx in otherGens], 'd') - # else: - # otherGens_1norms = _np.array([_mt.safe_onenorm(mx) - # for oGenRow in otherGens for mx in oGenRow], 'd') - # else: - # otherGens_1norms = None - # - # self._generators_cache[cache_key] = (hamGens, otherGens, hamGens_1norms, otherGens_1norms) - # - # cached_hamGens, cached_otherGens, cached_h1norms, cached_o1norms = self._generators_cache[cache_key] - # return (_copy.deepcopy(cached_hamGens), _copy.deepcopy(cached_otherGens), - # cached_h1norms.copy() if (cached_h1norms is not None) else None, - # cached_o1norms.copy() if (cached_o1norms is not None) else None) - def _init_terms(self, coefficient_blocks, max_polynomial_vars): Lterms = []; off = 0 @@ -635,21 +588,6 @@ def _init_terms(self, coefficient_blocks, max_polynomial_vars): ctape = _np.empty(0, complex) coeffs_as_compact_polys = (vtape, ctape) - #DEBUG TODO REMOVE (and make into test) - check norm of rank-1 terms - # (Note: doesn't work for Clifford terms, which have no .base): - # rho =OP=> coeff * A rho B - # want to bound | coeff * Tr(E Op rho) | = | coeff | * | | - # so A and B should be unitary so that | | <= 1 - # but typically these are unitaries / (sqrt(2)*nqubits) - #import bpdb; bpdb.set_trace() - #scale = 1.0 - #for t in Lterms: - # for op in t._rep.pre_ops: - # test = _np.dot(_np.conjugate(scale * op.base.T), scale * op.base) - # assert(_np.allclose(test, _np.identity(test.shape[0], 'd'))) - # for op in t._rep.post_ops: - # test = _np.dot(_np.conjugate(scale * op.base.T), scale * op.base) - # assert(_np.allclose(test, _np.identity(test.shape[0], 'd'))) return Lterms, coeffs_as_compact_polys def _set_params_from_matrix(self, errgen, truncate): @@ -669,7 +607,6 @@ def _set_params_from_matrix(self, errgen, truncate): off += blk.num_params self._update_rep() - #assert(_np.allclose(errgen, self.to_dense())) # DEBUG def _update_rep(self): """ @@ -722,11 +659,9 @@ def _update_rep(self): assert(_np.isclose(_np.linalg.norm(lnd_error_gen.imag), 0)), \ "Imaginary error gen norm: %g" % _np.linalg.norm(lnd_error_gen.imag) - #print("errgen pre-real = \n"); _mt.print_mx(lnd_error_gen,width=4,prec=1) self._rep.base[:, :] = lnd_error_gen.real self._onenorm_upbound = onenorm - #assert(self._onenorm_upbound >= _np.linalg.norm(self.to_dense(), ord=1) - 1e-6) #DEBUG def to_dense(self, on_space='minimal'): """ @@ -775,30 +710,6 @@ def to_sparse(self, on_space='minimal'): else: # dense rep return _sps.csr_matrix(self.to_dense(on_space)) - #def torep(self): - # """ - # Return a "representation" object for this error generator. - # - # Such objects are primarily used internally by pyGSTi to compute - # things like probabilities more efficiently. - # - # Returns - # ------- - # OpRep - # """ - # if self._evotype == "densitymx": - # if self._rep_type == 'sparse superop': - # A = self.err_gen_mx - # return replib.DMOpRepSparse( - # _np.ascontiguousarray(A.data), - # _np.ascontiguousarray(A.indices, _np.int64), - # _np.ascontiguousarray(A.indptr, _np.int64)) - # else: - # return replib.DMOpRepDense(_np.ascontiguousarray(self.err_gen_mx, 'd')) - # else: - # raise NotImplementedError("torep(%s) not implemented for %s objects!" % - # (self._evotype, self.__class__.__name__)) - def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False): """ Get the `order`-th order Taylor-expansion terms of this operation. @@ -902,23 +813,6 @@ def total_term_magnitude_deriv(self): assert(_np.linalg.norm(_np.imag(ret)) < 1e-8) return ret.real - #DEBUG - #ret2 = _np.empty(self.num_params,'d') - #eps = 1e-8 - #orig_vec = self.to_vector().copy() - #f0 = sum([abs(coeff) for coeff in coeff_values]) - #for i in range(self.num_params): - # v = orig_vec.copy() - # v[i] += eps - # new_coeff_values = _bulk_eval_compact_polynomials_complex(vtape, ctape, v, (len(self.Lterms),)) - # ret2[i] = ( sum([abs(coeff) for coeff in new_coeff_values]) - f0 ) / eps - - #test3 = _np.linalg.norm(ret-ret2) - #print("TEST3 = ",test3) - #if test3 > 10.0: - # import bpdb; bpdb.set_trace() - #return ret - @property def num_params(self): """ @@ -979,12 +873,11 @@ def from_vector(self, v, close=False, dirty_value=True): def coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): """ - TODO: docstring Constructs a dictionary of the Lindblad-error-generator coefficients of this error generator. - Note that these are not necessarily the parameter values, as these - coefficients are generally functions of the parameters (so as to keep - the coefficients positive, for instance). + Note that these are not necessarily the parameter values as those parameter value + correspond to the internal representation utilized, which may be constructed to + enforce positivity constraints, for instance. Parameters ---------- @@ -1009,18 +902,16 @@ def coefficients(self, return_basis=False, logscale_nonham=False, label_type='gl Returns ------- - Ltermdict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are integers starting at 0. Values are complex - coefficients. + elem_errorgens : dict + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. + basis : Basis A Basis mapping the basis labels used in the - keys of `Ltermdict` to basis matrices. + keys of `elem_errorgens` to basis matrices. """ assert label_type=='global' or label_type=='local', "Allowed values of label_type are 'global' and 'local'." @@ -1077,9 +968,12 @@ def coefficient_labels(self, label_type='global'): Returns ------- - tuple - A tuple of (, [,)` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ labels = [] for blk in self.coefficient_blocks: @@ -1117,7 +1011,6 @@ def coefficients_array(self): combination of standard error generators that is this error generator. """ # Note: ret will be complex if any block's data is - #ret = _np.concatenate([blk.block_data.flat for blk in self.coefficient_blocks]) ret = _np.concatenate([list(blk.elementary_errorgens.values()) for blk in self.coefficient_blocks]) if self._coefficient_weights is not None: ret *= self._coefficient_weights @@ -1184,15 +1077,12 @@ def error_rates(self, label_type='global'): Returns ------- - lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are real error rates except for the 2-basis-label - case. + elem_errorgens : dict + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ return self.coefficients(return_basis=False, logscale_nonham=True, label_type=label_type) @@ -1200,21 +1090,19 @@ def set_coefficients(self, elementary_errorgens, action="update", logscale_nonha """ Sets the coefficients of elementary error generator terms in this error generator. - TODO: docstring update - The dictionary `lindblad_term_dict` has tuple-keys describing the type - of term and the basis elements used to construct it, e.g. `('H','X')`. + The dictionary `elementary_errorgens` has keys which are `ElementaryErrorgenLabel`s + describing the type of term and the basis elements used to construct it, e.g. `('H','X')`, + together with the corresponding rates. Parameters ---------- - lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are the coefficients of these error generators, - and should be real except for the 2-basis-label case. + elementary_errorgens : dict + Dictionary whose keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient, and whose + values are corresponding error generator rates for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. action : {"update","add","reset"} How the values in `lindblad_term_dict` should be combined with existing @@ -1293,22 +1181,23 @@ def set_error_rates(self, elementary_errorgens, action="update"): """ Sets the coeffcients of elementary error generator terms in this error generator. - TODO: update docstring Coefficients are set so that the contributions of the resulting - channel's error rate are given by the values in `lindblad_term_dict`. + channel's error rate are given by the values in `elementary_errorgens`. See :meth:`error_rates` for more details. Parameters ---------- - lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are real error rates except for the 2-basis-label - case, when they may be complex. + elementary_errorgens : dict + Dictionary whose keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient, and whose + values are corresponding error generator rates for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. + + action : {"update","add","reset"} + How the values in `lindblad_term_dict` should be combined with existing + error-generator coefficients. action : {"update","add","reset"} How the values in `lindblad_term_dict` should be combined with existing @@ -1320,9 +1209,13 @@ def set_error_rates(self, elementary_errorgens, action="update"): """ self.set_coefficients(elementary_errorgens, action, logscale_nonham=True) - def coefficient_weights(self, weights): + def coefficient_weights(self): """ - TODO: docstring + Return a dictionary whose keys are error generator coefficients, as given by + :method:`coefficient_labels`, and whose value are the weights that have been specified + for those coefficients. Note that weight != rate! These weights are used in conjunction + with certain penalty factor options available in the construction of objective functions + for parameters estimation purposes, and are not generally used outside of that setting. """ coeff_labels = self.coefficient_labels() lbl_lookup = {i: lbl for i, lbl in enumerate(coeff_labels)} @@ -1338,7 +1231,12 @@ def coefficient_weights(self, weights): def set_coefficient_weights(self, weights): """ - TODO: docstring + Set the weights for the error generator coefficients in this error generator using a + dictionary whose keys are error generator coefficients, as given by + :method:`coefficient_labels`, and whose value are the weights that have been specified + for those coefficients. Note that weight != rate! These weights are used in conjunction + with certain penalty factor options available in the construction of objective functions + for parameters estimation purposes, and are not generally used outside of that setting. """ coeff_labels = self.coefficient_labels() ilbl_lookup = {lbl: i for i, lbl in enumerate(coeff_labels)} @@ -1389,56 +1287,7 @@ def transform_inplace(self, s): raise ValueError("Invalid transform for this LindbladErrorgen: type %s" % str(type(s))) - #I don't think this is ever needed - #def spam_transform_inplace(self, s, typ): - # """ - # Update operation matrix `O` with `inv(s) * O` OR `O * s`, depending on the value of `typ`. - # - # This functions as `transform_inplace(...)` but is used when this - # Lindblad-parameterized operation is used as a part of a SPAM - # vector. When `typ == "prep"`, the spam vector is assumed - # to be `rho = dot(self, )`, which transforms as - # `rho -> inv(s) * rho`, so `self -> inv(s) * self`. When - # `typ == "effect"`, `e.dag = dot(e.dag, self)` (not that - # `self` is NOT `self.dag` here), and `e.dag -> e.dag * s` - # so that `self -> self * s`. - # - # Parameters - # ---------- - # s : GaugeGroupElement - # A gauge group element which specifies the "s" matrix - # (and it's inverse) used in the above similarity transform. - # - # typ : { 'prep', 'effect' } - # Which type of SPAM vector is being transformed (see above). - # - # Returns - # ------- - # None - # """ - # assert(typ in ('prep', 'effect')), "Invalid `typ` argument: %s" % typ - # - # if isinstance(s, _gaugegroup.UnitaryGaugeGroupElement) or \ - # isinstance(s, _gaugegroup.TPSpamGaugeGroupElement): - # U = s.transform_matrix - # Uinv = s.transform_matrix_inverse - # err_gen_mx = self.to_sparse() if self._rep_type == 'sparse superop' else self.to_dense() - # - # #just act on postfactor and Lindbladian exponent: - # if typ == "prep": - # err_gen_mx = _mt.safe_dot(Uinv, err_gen_mx) - # else: - # err_gen_mx = _mt.safe_dot(err_gen_mx, U) - # - # self._set_params_from_matrix(err_gen_mx, truncate=True) - # self.dirty = True - # #Note: truncate=True above because some unitary transforms seem to - # ## modify eigenvalues to be negative beyond the tolerances - # ## checked when truncate == False. - # else: - # raise ValueError("Invalid transform for this LindbladDenseOp: type %s" - # % str(type(s))) - + def deriv_wrt_params(self, wrt_filter=None): """ The element-wise derivative this operation. @@ -1669,10 +1518,8 @@ def minimal_from_elementary_errorgens(cls, errs): if any([lbl.errorgen_type == 'S' for lbl in errs]): paramtypes.append('S') if any([lbl.errorgen_type == 'C' for lbl in errs]): paramtypes.append('C') if any([lbl.errorgen_type == 'A' for lbl in errs]): paramtypes.append('A') - #if any([lbl.errorgen_type == 'S' and len(lbl.basis_element_labels) == 2 for lbl in errs]): - # # parameterization must be "CPTP" if there are any ('S',b1,b2) keys if 'C' in paramtypes or 'A' in paramtypes: - parameterization = "CPTP" + parameterization = "CPTPLND" else: parameterization = '+'.join(paramtypes) return cls.cast(parameterization) @@ -1740,12 +1587,6 @@ def __init__(self, block_types, param_modes, abbrev=None, meta=None): self.abbrev = abbrev self.meta = meta - #REMOVE - #self.nonham_block_type = nonham_block_type #nonham_mode - #self.nonham_param_mode = nonham_param_mode #param_mode - #self.include_ham_block = include_ham_block #ham_params_allowed = ham_params_allowed - #self.include_nonham_block = include_nonham_block #nonham_params_allowed = nonham_params_allowed - def __hash__(self): return hash((self.block_types, self.param_modes)) diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py index 29d23c446..9b5ca5535 100644 --- a/test/unit/objects/test_errorgenpropagation.py +++ b/test/unit/objects/test_errorgenpropagation.py @@ -78,8 +78,7 @@ def test_eoc_error_channel(self): eoc_error_channel_exact = noisy_channel_exact@ideal_channel.conj().T assert np.linalg.norm(eoc_error_channel - eoc_error_channel_exact) < 1e-10 - - + class LocalStimErrorgenLabelTester(BaseCase): def setUp(self): From ef5c7be548fde88dc69c166473a1f9e4357ca05b Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 9 Feb 2025 18:23:24 -0700 Subject: [PATCH 074/102] Unit tests for random error generator gen Add in comprehensive unit testing for new random error generator rate construction function. --- test/unit/tools/test_lindbladtools.py | 129 ++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/test/unit/tools/test_lindbladtools.py b/test/unit/tools/test_lindbladtools.py index 0870714e8..95b59dce7 100644 --- a/test/unit/tools/test_lindbladtools.py +++ b/test/unit/tools/test_lindbladtools.py @@ -2,7 +2,9 @@ import scipy.sparse as sps from pygsti.tools import lindbladtools as lt +from pygsti.modelmembers.operations import LindbladErrorgen from pygsti.baseobjs import Basis +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel from ..util import BaseCase @@ -88,3 +90,130 @@ def test_elementary_errorgen_bases(self): dot_mx[i,j] = np.vdot(dual.flatten(), primal.flatten()) self.assertTrue(np.allclose(dot_mx, np.identity(len(lbls), 'd'))) + +class RandomErrorgenRatesTester(BaseCase): + + def test_default_settings(self): + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, seed=1234) + + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 240) + + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + + def test_sector_restrictions(self): + #H-only: + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H',), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 15) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + + #S-only + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('S',), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 15) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + + #H+S + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 15) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + + #H + S + A + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S', 'A'), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 135) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + + def test_error_metric_restrictions(self): + #test generator_infidelity + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + error_metric= 'generator_infidelity', + error_metric_value=.99, seed=1234) + #confirm this has the correct generator infidelity. + gen_infdl = 0 + for coeff, rate in random_errorgen_rates: + if coeff.errorgen_type == 'H': + gen_infdl+=rate**2 + elif coeff.errorgen_type == 'S': + gen_infdl+=rate + + assert abs(gen_infdl-.99)<1e-5 + + #test generator_error + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + error_metric= 'generator_error', + error_metric_value=.99, seed=1234) + #confirm this has the correct generator infidelity. + gen_error = 0 + for coeff, rate in random_errorgen_rates: + if coeff.errorgen_type == 'H': + gen_error+=abs(rate) + elif coeff.errorgen_type == 'S': + gen_error+=rate + + assert abs(gen_error-.99)<1e-5 + + #test relative_HS_contribution: + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + error_metric= 'generator_infidelity', + error_metric_value=.99, + relative_HS_contribution=(.5, .5), seed=1234) + #confirm this has the correct generator infidelity contributions. + gen_infdl_H = 0 + gen_infdl_S = 0 + for coeff, rate in random_errorgen_rates: + if coeff.errorgen_type == 'H': + gen_infdl_H+=rate**2 + elif coeff.errorgen_type == 'S': + gen_infdl_S+=rate + + assert abs(gen_infdl_S - gen_infdl_H)<1e-5 + + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + error_metric= 'generator_error', + error_metric_value=.99, + relative_HS_contribution=(.5, .5), seed=1234) + #confirm this has the correct generator error contributions. + gen_error_H = 0 + gen_error_S = 0 + for coeff, rate in random_errorgen_rates: + if coeff.errorgen_type == 'H': + gen_error_H+=abs(rate) + elif coeff.errorgen_type == 'S': + gen_error_S+=rate + + assert abs(gen_error_S - gen_error_H)<1e-5 + + def test_fixed_errorgen_rates(self): + fixed_rates_dict = {GlobalElementaryErrorgenLabel('H', ('X',), (0,)): 1} + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + fixed_errorgen_rates=fixed_rates_dict, + seed=1234) + + self.assertEqual(random_errorgen_rates[GlobalElementaryErrorgenLabel('H', ('X',), (0,))], 1) + + def test_label_type(self): + + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + label_type='local', seed=1234) + assert isinstance(next(iter(random_errorgen_rates)), LocalElementaryErrorgenLabel) + + def test_sslbl_overlap(self): + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + sslbl_overlap=(0,), + seed=1234) + for coeff in random_errorgen_rates: + assert 0 in coeff.sslbls + From 17d1f73534bfcc946261ce6b0b2f3439c7a29471 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 9 Feb 2025 22:12:14 -0700 Subject: [PATCH 075/102] New test module for error generator bases Brand new test module providing coverage for the error generator basis classes in errorgenbasis.py. Also include a patch to the ExplicitElementaryErrorgenBasis class related to compatibility for LocalElementaryErrorgenLabel in create_subbasis. --- pygsti/baseobjs/errorgenbasis.py | 35 +++- test/unit/objects/test_errorgenbasis.py | 256 ++++++++++++++++++++++++ test/unit/tools/test_errgenproptools.py | 2 +- 3 files changed, 282 insertions(+), 11 deletions(-) create mode 100644 test/unit/objects/test_errorgenbasis.py diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 54da66548..7b3548498 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -61,7 +61,6 @@ def _all_elements_same_type(lst): return False return True -#TODO: Unit Testing class ExplicitElementaryErrorgenBasis(ElementaryErrorgenBasis): """ This basis object contains the information necessary for building, @@ -222,17 +221,33 @@ def create_subbasis(self, sslbl_overlap): one of these qudits) in order to be included in this subbasis. """ - sub_sslbls = set(sslbl_overlap) + #need different logic for LocalElementaryErrorgenLabels + if isinstance(self.labels[0], _GlobalElementaryErrorgenLabel): + sub_sslbls = set(sslbl_overlap) + def overlaps(sslbls): + ret = len(set(sslbls).intersection(sslbl_overlap)) > 0 + if ret: sub_sslbls.update(sslbls) # keep track of all overlaps + return ret - def overlaps(sslbls): - ret = len(set(sslbls).intersection(sslbl_overlap)) > 0 - if ret: sub_sslbls.update(sslbls) # keep track of all overlaps - return ret - - sub_labels, sub_indices = zip(*[(lbl, i) for i, lbl in enumerate(self._labels) - if overlaps(lbl[0])]) + sub_labels, sub_indices = zip(*[(lbl, i) for i, lbl in enumerate(self._labels) + if overlaps(lbl[0])]) + sub_state_space = self.state_space.create_subspace(sub_sslbls) + else: + sub_labels = [] + for lbl in self.labels: + non_trivial_bel_indices = [] + for bel in lbl.basis_element_labels: + for i,subbel in enumerate(bel): + if subbel != 'I': + non_trivial_bel_indices.append(i) + non_trivial_bel_indices = set(non_trivial_bel_indices) + for sslbl in sslbl_overlap: + if sslbl in non_trivial_bel_indices: + sub_labels.append(lbl) + break + #since using local labels keep the full original state space (the labels won't have gotten any shorter). + sub_state_space = self.state_space.copy() - sub_state_space = self.state_space.create_subspace(sub_sslbls) return ExplicitElementaryErrorgenBasis(sub_state_space, sub_labels, self._basis_1q) def union(self, other_basis): diff --git a/test/unit/objects/test_errorgenbasis.py b/test/unit/objects/test_errorgenbasis.py new file mode 100644 index 000000000..fec7caf1c --- /dev/null +++ b/test/unit/objects/test_errorgenbasis.py @@ -0,0 +1,256 @@ +from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis, ExplicitElementaryErrorgenBasis +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel +from pygsti.baseobjs import BuiltinBasis, QubitSpace +from ..util import BaseCase + +class CompleteElementaryErrorgenBasisTester(BaseCase): + + def setUp(self): + self.basis_1q = BuiltinBasis('PP', 4) + self.state_space_1Q = QubitSpace(1) + self.state_space_2Q = QubitSpace(2) + + #create a complete basis with default settings for reuse. + self.complete_errorgen_basis_default_1Q = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q) + + def test_default_construction(self): + assert len(self.complete_errorgen_basis_default_1Q.labels) == 12 + #may as well also test the __len__ method while we're here. + assert len(self.complete_errorgen_basis_default_1Q) == 12 + + def test_sector_restrictions(self): + errorgen_basis_H = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H',)) + errorgen_basis_S = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('S',)) + errorgen_basis_C = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('C',)) + errorgen_basis_A = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('A',)) + + for lbl in errorgen_basis_H.labels: + assert lbl.errorgen_type == 'H' + for lbl in errorgen_basis_S.labels: + assert lbl.errorgen_type == 'S' + for lbl in errorgen_basis_C.labels: + assert lbl.errorgen_type == 'C' + for lbl in errorgen_basis_A.labels: + assert lbl.errorgen_type == 'A' + + assert len(errorgen_basis_H.labels) == 3 + assert len(errorgen_basis_S.labels) == 3 + assert len(errorgen_basis_C.labels) == 3 + assert len(errorgen_basis_A.labels) == 3 + + #confirm multiple sectors work right too. + errorgen_basis_HSC = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H','S','C')) + for lbl in errorgen_basis_HSC.labels: + assert lbl.errorgen_type in ('H', 'S', 'C') + assert len(errorgen_basis_HSC.labels) == 9 + + def test_max_weights(self): + errorgen_basis = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_2Q, + max_weights = {'H':2, 'S':2, 'C':1, 'A':1}) + + for lbl in errorgen_basis.labels: + if lbl.errorgen_type in ('H', 'S'): + assert len(lbl.sslbls) in (1,2) + else: + assert len(lbl.sslbls)==1 + + def test_to_explicit_basis(self): + explicit_errorgen_basis = self.complete_errorgen_basis_default_1Q.to_explicit_basis() + + assert self.complete_errorgen_basis_default_1Q.labels == explicit_errorgen_basis.labels + + def test_global_local_labels(self): + global_labels = self.complete_errorgen_basis_default_1Q.global_labels() + local_labels = self.complete_errorgen_basis_default_1Q.local_labels() + + assert isinstance(global_labels[0], GlobalElementaryErrorgenLabel) + assert isinstance(local_labels[0], LocalElementaryErrorgenLabel) + + def test_sublabels(self): + H_labels = self.complete_errorgen_basis_default_1Q.sublabels('H') + S_labels = self.complete_errorgen_basis_default_1Q.sublabels('S') + C_labels = self.complete_errorgen_basis_default_1Q.sublabels('C') + A_labels = self.complete_errorgen_basis_default_1Q.sublabels('A') + + for lbl in H_labels: + assert lbl.errorgen_type == 'H' + for lbl in S_labels: + assert lbl.errorgen_type == 'S' + for lbl in C_labels: + assert lbl.errorgen_type == 'C' + for lbl in A_labels: + assert lbl.errorgen_type == 'A' + + def test_elemgen_supports(self): + errorgen_basis = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_2Q) + + #there should be 24 weight 1 and 216 weight 2 terms. + elemgen_supports = errorgen_basis.elemgen_supports + num_weight_1 = 0 + num_weight_2 = 0 + for support in elemgen_supports: + if len(support) == 1: + num_weight_1+=1 + elif len(support) == 2: + num_weight_2+=1 + else: + raise ValueError('Invalid support length for two-qubit error gen basis.') + + assert num_weight_1==24 and num_weight_2==216 + + def test_elemgen_and_dual_construction(self): + #just test for running w/o failure. + elemgens = self.complete_errorgen_basis_default_1Q.elemgen_matrices + duals = self.complete_errorgen_basis_default_1Q.elemgen_dual_matrices + + def test_label_index(self): + labels = self.complete_errorgen_basis_default_1Q.labels + + test_eg = GlobalElementaryErrorgenLabel('C', ['X', 'Y'], (0,)) + test_eg_missing = GlobalElementaryErrorgenLabel('C', ['X', 'Y'], (1,)) + + lbl_idx = self.complete_errorgen_basis_default_1Q.label_index(test_eg) + + assert lbl_idx == labels.index(test_eg) + + with self.assertRaises(KeyError): + self.complete_errorgen_basis_default_1Q.label_index(test_eg_missing) + assert self.complete_errorgen_basis_default_1Q.label_index(test_eg_missing, ok_if_missing=True) is None + + def test_create_subbasis(self): + errorgen_basis = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_2Q) + subbasis = errorgen_basis.create_subbasis(sslbl_overlap=(0,)) + + #should have 12 weight-1 terms on zero and 216 weight 2, for 228 total in this subbasis. + assert len(subbasis) == 228 + + def test_union(self): + errorgen_basis_H = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H',)) + errorgen_basis_S = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('S',)) + + union_basis = errorgen_basis_H.union(errorgen_basis_S) + #should now have 6 items. + assert len(union_basis) == 6 + for lbl in union_basis.labels: + assert lbl.errorgen_type in ('H', 'S') + + def test_intersection(self): + errorgen_basis_HSC = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H','S','C')) + errorgen_basis_H = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H',)) + + intersection_basis = errorgen_basis_HSC.intersection(errorgen_basis_H) + #should now have 3 items + assert len(intersection_basis) == 3 + for lbl in intersection_basis.labels: + assert lbl.errorgen_type == 'H' + + def test_difference(self): + errorgen_basis_HSC = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H','S','C')) + errorgen_basis_H = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H',)) + + intersection_basis = errorgen_basis_HSC.difference(errorgen_basis_H) + #should now have 6 items + assert len(intersection_basis) == 6 + for lbl in intersection_basis.labels: + assert lbl.errorgen_type in ('S', 'C') + +class ExplicitElementaryErrorgenBasisTester(BaseCase): + + def setUp(self): + self.basis_1q = BuiltinBasis('PP', 4) + self.state_space_1Q = QubitSpace(1) + self.state_space_2Q = QubitSpace(2) + + self.labels_1Q = [LocalElementaryErrorgenLabel('H', ['X']), + LocalElementaryErrorgenLabel('S', ['Y']), + LocalElementaryErrorgenLabel('C', ['X','Y']), + LocalElementaryErrorgenLabel('A', ['X','Y'])] + self.labels_2Q = [LocalElementaryErrorgenLabel('H', ['XI']), + LocalElementaryErrorgenLabel('S', ['YY']), + LocalElementaryErrorgenLabel('C', ['XX','YY']), + LocalElementaryErrorgenLabel('A', ['XX','YY'])] + self.labels_2Q_alt = [LocalElementaryErrorgenLabel('H', ['IX']), + LocalElementaryErrorgenLabel('S', ['ZZ']), + LocalElementaryErrorgenLabel('C', ['XX','YY']), + LocalElementaryErrorgenLabel('A', ['XX','YY'])] + + + self.explicit_basis_1Q = ExplicitElementaryErrorgenBasis(self.state_space_1Q, self.labels_1Q, self.basis_1q) + self.explicit_basis_2Q = ExplicitElementaryErrorgenBasis(self.state_space_2Q, self.labels_2Q, self.basis_1q) + self.explicit_basis_2Q_alt = ExplicitElementaryErrorgenBasis(self.state_space_2Q, self.labels_2Q_alt, self.basis_1q) + + + + def test_elemgen_supports(self): + #there should be 1 weight 1 and 3 weight 2 terms. + elemgen_supports = self.explicit_basis_2Q.elemgen_supports + num_weight_1 = 0 + num_weight_2 = 0 + for support in elemgen_supports: + if len(support) == 1: + num_weight_1+=1 + elif len(support) == 2: + num_weight_2+=1 + else: + raise ValueError('Invalid support length for two-qubit error gen basis.') + + assert num_weight_1==1 and num_weight_2==3 + + def test_elemgen_and_dual_construction(self): + #just test for running w/o failure. + elemgens = self.explicit_basis_2Q.elemgen_matrices + duals = self.explicit_basis_2Q.elemgen_dual_matrices + + def test_label_index(self): + labels = self.explicit_basis_1Q.labels + + test_eg = LocalElementaryErrorgenLabel('C', ['X', 'Y']) + test_eg_missing = LocalElementaryErrorgenLabel('C', ['X', 'Z']) + + lbl_idx = self.explicit_basis_1Q.label_index(test_eg) + + assert lbl_idx == labels.index(test_eg) + + with self.assertRaises(KeyError): + self.explicit_basis_1Q.label_index(test_eg_missing) + assert self.explicit_basis_1Q.label_index(test_eg_missing, ok_if_missing=True) is None + + def test_create_subbasis(self): + subbasis = self.explicit_basis_2Q.create_subbasis(sslbl_overlap=(1,)) + + #should have 3 elements remaining in the subbasis. + assert len(subbasis) == 3 + + def test_union(self): + union_basis = self.explicit_basis_2Q.union(self.explicit_basis_2Q_alt) + correct_union_labels = [LocalElementaryErrorgenLabel('H', ['XI']), + LocalElementaryErrorgenLabel('S', ['YY']), + LocalElementaryErrorgenLabel('H', ['IX']), + LocalElementaryErrorgenLabel('S', ['ZZ']), + LocalElementaryErrorgenLabel('C', ['XX','YY']), + LocalElementaryErrorgenLabel('A', ['XX','YY'])] + #should now have 6 items. + assert len(union_basis) == 6 + for lbl in union_basis.labels: + assert lbl in correct_union_labels + + def test_intersection(self): + intersection_basis = self.explicit_basis_2Q.intersection(self.explicit_basis_2Q_alt) + correct_intersection_labels = [LocalElementaryErrorgenLabel('C', ['XX','YY']), + LocalElementaryErrorgenLabel('A', ['XX','YY'])] + #should now have 2 items. + assert len(intersection_basis) == 2 + for lbl in intersection_basis.labels: + assert lbl in correct_intersection_labels + + def test_difference(self): + difference_basis = self.explicit_basis_2Q.difference(self.explicit_basis_2Q_alt) + correct_difference_labels = [LocalElementaryErrorgenLabel('H', ['XI']), + LocalElementaryErrorgenLabel('S', ['YY'])] + #should now have 2 items. + assert len(difference_basis) == 2 + for lbl in difference_basis.labels: + assert lbl in correct_difference_labels + + + diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py index 45297736b..d901356de 100644 --- a/test/unit/tools/test_errgenproptools.py +++ b/test/unit/tools/test_errgenproptools.py @@ -209,7 +209,7 @@ def test_bch_approximation(self): self.assertTrue((exact_vs_first_order_norm > exact_vs_second_order_norm) and (exact_vs_second_order_norm > exact_vs_third_order_norm) and (exact_vs_third_order_norm > exact_vs_fourth_order_norm) and (exact_vs_fourth_order_norm > exact_vs_fifth_order_norm)) -class ApproxStabilizerProbTester(BaseCase): +class ApproxStabilizerMethodTester(BaseCase): def setUp(self): num_qubits = 4 gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] From 3ab46458658fd568fce8636dc63422d23167f5c5 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 10 Feb 2025 17:47:15 -0700 Subject: [PATCH 076/102] error generator label test module Add a new test module for the `LocalElementaryErrorgenLabel` and `GlobalElementaryErrorgenLabel` classes. Also flesh out some incomplete documentation for these classes and patch a minor bug that was discovered by the new unit tests. --- pygsti/baseobjs/errorgenbasis.py | 7 +- pygsti/baseobjs/errorgenlabel.py | 64 ++++++++++-- test/unit/objects/test_errorgenlabel.py | 127 ++++++++++++++++++++++++ 3 files changed, 186 insertions(+), 12 deletions(-) create mode 100644 test/unit/objects/test_errorgenlabel.py diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 7b3548498..89fc8989a 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -235,12 +235,7 @@ def overlaps(sslbls): else: sub_labels = [] for lbl in self.labels: - non_trivial_bel_indices = [] - for bel in lbl.basis_element_labels: - for i,subbel in enumerate(bel): - if subbel != 'I': - non_trivial_bel_indices.append(i) - non_trivial_bel_indices = set(non_trivial_bel_indices) + non_trivial_bel_indices = lbl.support_indices() for sslbl in sslbl_overlap: if sslbl in non_trivial_bel_indices: sub_labels.append(lbl) diff --git a/pygsti/baseobjs/errorgenlabel.py b/pygsti/baseobjs/errorgenlabel.py index 3e0324aba..717fa3b7e 100644 --- a/pygsti/baseobjs/errorgenlabel.py +++ b/pygsti/baseobjs/errorgenlabel.py @@ -34,9 +34,20 @@ def cast(cls, obj, sslbls=None, identity_label='I'): Parameters ---------- - obj : `LocalElementaryErrorgenLabel`, `GlobalElementaryErrorgenLabel`, tuple or list - Object to cast. - + obj : `LocalElementaryErrorgenLabel`, `GlobalElementaryErrorgenLabel`, str, tuple or list + Object to cast. If a `GlobalElementaryErrorgenLabel` then a value for the `sslbls` + argument should be passed with the full list of state space labels for the system. + Other castable options include: + + -str: A string formatted as '([,])'. E.g. 'H(XX)' or + 'C(X,Y)' + -tuple/list: These can be specified either in 'global-style' or 'local-style'. + - local-style: format is (, [,]) + - global-style:format is (, (,[]), ()) + Where sslbls above is specifically the subset of state space labels this error + generator acts on nontrivially. When specifying global-style tuple labels the sslbls kwarg of this method + which contains the complete set of state-space labels must also be specified. + sslbls : tuple or list, optional (default None) A complete set of state space labels. Used when casting from a GlobalElementaryErrorgenLabel or from a tuple of length 3 (wherein the final element is interpreted as the set of ssblbs the error @@ -97,7 +108,8 @@ def __init__(self, errorgen_type, basis_element_labels): This is either length-1 for 'H' and 'S' type error generators, or length-2 for 'C' and 'A' type. """ - + #TODO: Store non-standard identity labels with object so we don't need to specify this in + #support_indices. self.errorgen_type = str(errorgen_type) self.basis_element_labels = tuple(basis_element_labels) self._hash = hash((self.errorgen_type, self.basis_element_labels)) @@ -146,7 +158,47 @@ class GlobalElementaryErrorgenLabel(ElementaryErrorgenLabel): @classmethod def cast(cls, obj, sslbls=None, identity_label='I'): - """ TODO: docstring - lots in this module """ + """ + Method for casting an object to an instance of GlobalElementaryErrorgenLabel + + Parameters + ---------- + obj : `GlobalElementaryErrorgenLabel`, `LocalElementaryErrorgenLabel`, tuple or list + Object to cast. If a `LocalElementaryErrorgenLabel` then a value for the `sslbls` + argument should be passed with the full list of state space labels for the system. + Other castable options include: + + -str: Following formatting options are supported. + - A string formatted as '([,]:())' where + is the subset of state-space labels this error generator acts on nontrivially + specified as a comma-separated list. E.g. 'H(XX:0,1)' or 'S(XIY):0,2'. + - A string formatted as :, where + is the subset of state-space labels this error generator acts on nontrivially + specified as a comma-separated list. E.g. 'HXX:0,1' or 'SIX:1'. Note this style + is only compatible with basis element label error generators, and this only H and S. + - A string formatted as . For this style the basis element label + is assumed to correspond to the entire state space, and as such the sslbls kwarg + for this method must also be specified. Like the previous example this is also + only compatible with H and S terms. + -tuple/list: These can be specified either in 'global-style' or 'local-style'. + - local-style: format is (, [,]) + - global-style:format is (, (,[]), ()) + Where sslbls above is specifically the subset of state space labels this error + generator acts on nontrivially. When specifying global-style tuple labels the sslbls kwarg of this method + which contains the complete set of state-space labels must also be specified. + + sslbls : tuple or list, optional (default None) + A complete set of state space labels. Used when casting from a LocalElementaryErrorgenLabel + or from a tuple of length 2 (wherein the final element is interpreted as the set of ssblbs the error + generator acts upon). + + identity_label : str, optional (default 'I') + An optional string specifying the label used to denote the identity in basis element labels. + + Returns + ------- + GlobalElementaryErrorgenLabel + """ if isinstance(obj, GlobalElementaryErrorgenLabel): return obj elif isinstance(obj, LocalElementaryErrorgenLabel): @@ -171,7 +223,7 @@ def cast(cls, obj, sslbls=None, identity_label='I'): return cls.cast(LocalElementaryErrorgenLabel.cast(obj), sslbls, identity_label) else: # no parenthesis, assume of form "HXX:Q0,Q1" or local label, e.g. "HXX" if ':' in obj: - typ_bel_str, sslbl_str = in_parens.split(':') + typ_bel_str, sslbl_str = obj.split(':') sslbls = [_to_int_or_strip(x) for x in sslbl_str.split(',')] return cls(typ_bel_str[0], (typ_bel_str[1:],), sslbls) else: # treat as a local label diff --git a/test/unit/objects/test_errorgenlabel.py b/test/unit/objects/test_errorgenlabel.py new file mode 100644 index 000000000..898b4d2f8 --- /dev/null +++ b/test/unit/objects/test_errorgenlabel.py @@ -0,0 +1,127 @@ +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as LEEL, GlobalElementaryErrorgenLabel as GEEL +from ..util import BaseCase + +class LocalElementaryErrorgenLabelTester(BaseCase): + + def test_cast(self): + #from local + leel_to_cast = LEEL('H', ['X']) + leel_cast = LEEL.cast(leel_to_cast) + assert leel_cast is leel_to_cast + + #from global + geel_to_cast = GEEL('H', ['X'], (0,)) + leel_cast = LEEL.cast(geel_to_cast, sslbls=(0,1)) + assert leel_cast.basis_element_labels == ('XI',) + + #from string + string_to_cast = 'H(XX)' + leel_cast = LEEL.cast(string_to_cast) + assert leel_cast.errorgen_type == 'H' + assert leel_cast.basis_element_labels == ('XX',) + + #from tuple + #global style tuple + global_tup_to_cast = ('H', ('X',), (1,)) + leel_cast = LEEL.cast(global_tup_to_cast, sslbls=(0,1)) + assert leel_cast.errorgen_type == 'H' + assert leel_cast.basis_element_labels == ('IX',) + + local_tup_to_cast = ('H', 'IX') + leel_cast = LEEL.cast(local_tup_to_cast) + assert leel_cast.errorgen_type == 'H' + assert leel_cast.basis_element_labels == ('IX',) + + #different identity label + geel_to_cast = GEEL('H', ['X'], (0,)) + leel_cast = LEEL.cast(geel_to_cast, sslbls=(0,1), identity_label='F') + assert leel_cast.basis_element_labels == ('XF',) + + def test_eq(self): + assert LEEL('H', ('XX',)) == LEEL('H', ('XX',)) + assert LEEL('H', ('XX',)) != LEEL('S', ('XX',)) + assert LEEL('H', ('XX',)) != LEEL('H', ('XY',)) + + def test_support_indices(self): + assert LEEL('H', ('XX',)).support_indices() == (0,1) + assert LEEL('C', ['IX', 'XI']).support_indices() == (0,1) + assert LEEL('C', ['IXI', 'XII']).support_indices() == (0,1) + #nonstandard identity label + assert LEEL('C', ['FXF', 'XFF']).support_indices(identity_label='F') == (0,1) + +class GlobalElementaryErrorgenLabelTester(BaseCase): + + def test_cast(self): + #from global + geel_to_cast = GEEL('H', ['X'], (0,)) + geel_cast = GEEL.cast(geel_to_cast) + assert geel_cast is geel_to_cast + + #from local + leel_to_cast = LEEL('H', ['XI']) + geel_cast = GEEL.cast(leel_to_cast, sslbls=(0,1)) + assert geel_cast.basis_element_labels == ('X',) + assert geel_cast.sslbls == (0,) + + #from string + string_to_cast = 'H(XX:0,1)' + geel_cast = GEEL.cast(string_to_cast) + assert geel_cast.errorgen_type == 'H' + assert geel_cast.basis_element_labels == ('XX',) + assert geel_cast.sslbls == (0,1) + + string_to_cast = 'SXX:0,1' + geel_cast = GEEL.cast(string_to_cast) + assert geel_cast.errorgen_type == 'S' + assert geel_cast.basis_element_labels == ('XX',) + assert geel_cast.sslbls == (0,1) + + string_to_cast = 'SXX' + geel_cast = GEEL.cast(string_to_cast, sslbls=(0,1)) + assert geel_cast.errorgen_type == 'S' + assert geel_cast.basis_element_labels == ('XX',) + assert geel_cast.sslbls == (0,1) + + #from tuple + #global style tuple + global_tup_to_cast = ('H', ('X',), (1,)) + geel_cast = GEEL.cast(global_tup_to_cast, sslbls=(0,1)) + assert geel_cast.errorgen_type == 'H' + assert geel_cast.basis_element_labels == ('X',) + assert geel_cast.sslbls == (1,) + + local_tup_to_cast = ('H', 'IX') + geel_cast = GEEL.cast(local_tup_to_cast, sslbls=(0,1)) + assert geel_cast.errorgen_type == 'H' + assert geel_cast.basis_element_labels == ('X',) + assert geel_cast.sslbls == (1,) + + def test_eq(self): + assert GEEL('H', ('X',), (0,)) == GEEL('H', ('X',), (0,)) + assert GEEL('H', ('X',), (0,)) != GEEL('H', ('X',), (1,)) + assert GEEL('H', ('X',), (0,)) != GEEL('H', ('Y',), (0,)) + + def test_padded_basis_element_labels(self): + assert GEEL('H', ('X',), (0,)).padded_basis_element_labels(all_sslbls=(0,1,2)) == ('XII',) + assert GEEL('C', ('XX','YY'), (1,2)).padded_basis_element_labels(all_sslbls=(0,1,2)) == ('IXX','IYY') + + def test_map_state_space_labels(self): + geel_to_test = GEEL('C', ['XX', 'YY'], (0,1)) + #dictionary mapper + mapper = {0:'Q0', 1:'Q1'} + mapped_geel = geel_to_test.map_state_space_labels(mapper) + assert mapped_geel.sslbls == ('Q0', 'Q1') + + #function mapper + mapper = lambda x:x+10 + mapped_geel = geel_to_test.map_state_space_labels(mapper) + assert mapped_geel.sslbls == (10, 11) + + def test_sort_sslbls(self): + geel_to_test = GEEL('C', ['XI', 'IX'], (1,0)) + sorted_sslbl_geel = geel_to_test.sort_sslbls() + + assert sorted_sslbl_geel.sslbls == (0,1) + assert sorted_sslbl_geel.basis_element_labels[0] == 'IX' and sorted_sslbl_geel.basis_element_labels[1] == 'XI' + + \ No newline at end of file From 6fe367733e15ac212a1ec5fb72d11b0cc3ef0d56 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 10 Feb 2025 21:36:02 -0700 Subject: [PATCH 077/102] Add in A-H and A-S compositions I'm an idiot and somehow managed to miss the addition of these in my initial implementation. Rectify that now. --- pygsti/tools/errgenproptools.py | 235 +++++++++++++++++++++++++++++++- 1 file changed, 234 insertions(+), 1 deletion(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 2e8bedb48..b81892d46 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -3817,7 +3817,240 @@ def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=Non composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) if new_eg_type_3 is not None: composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) - + + elif errorgen_1_type == 'A' and errorgen_2_type == 'H': + #A_{P,Q}[H_A] P->errorgen_1_bel_0, Q->errorgen_1_bel_1 A -> errorgen_2_bel_0 + A = errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_1_bel_1 + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #Case 1: P and Q commute. + if P.commutes(Q): + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PQ_ident = (PQ[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + PQ_eq_A = (PQ[1]==A) + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'S': + #A_P,Q[S_A] P->errorgen_1_bel_0, Q->errorgen_1_bel_1, A -> errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_1_bel_1 + A = errorgen_2_bel_0 + + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + if P.commutes(Q): #Case 1: [P,Q]=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + assert not PA_eq_QA #(I'm almost positive this should be true) + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + #APQ can't equal A since that implies P==Q, which would be an invalid C term input. + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. + elif errorgen_1_type == 'A' and errorgen_2_type == 'C': #A_A,B[C_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 A = errorgen_1_bel_0 From 99bafde2f1aa2a31fbd3002aa9105358be08f189 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 10 Feb 2025 23:01:36 -0700 Subject: [PATCH 078/102] Unit test updates Update some unit tests which were failing/had typos or mistakes in their construction. --- .../operations/lindbladerrorgen.py | 8 ++---- test/unit/objects/test_errorgenpropagation.py | 2 +- test/unit/tools/test_errgenproptools.py | 20 ++++++------- test/unit/tools/test_lindbladtools.py | 28 +++++++++---------- 4 files changed, 28 insertions(+), 30 deletions(-) diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index 24073b817..6f568241b 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -386,11 +386,7 @@ def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto' Parameters ---------- elementary_errorgens : dict - a square 2D array that gives the full error generator. The shape of - this array sets the dimension of the operator. The projections of - this quantity onto the `ham_basis` and `nonham_basis` are closely - related to the parameters of the error generator (they may not be - exactly equal if, e.g `cptp=True`). + A dictionary whose keys are `ElementaryErrogenLabel`s and whose values are corresponding error generator rates. parameterization: `LindbladParameterization` or str castable to `LindbladParameterization`, optional (default 'auto') Either an instance of `LindbladParameterization` or a string castable to a @@ -439,6 +435,8 @@ def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto' ------- `LindbladErrorgen` """ + if state_space is None: + raise ValueError('Must specify a state space when using `from_elementary_errorgens`.') state_space = _statespace.StateSpace.cast(state_space) dim = state_space.dim # Store superop dimension basis = _Basis.cast(elementary_errorgen_basis, dim) diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py index 9b5ca5535..3fb1d52a5 100644 --- a/test/unit/objects/test_errorgenpropagation.py +++ b/test/unit/objects/test_errorgenpropagation.py @@ -91,7 +91,7 @@ def test_cast(self): correct_lse = _LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]) self.assertEqual(correct_lse, _LSE.cast(self.local_eel)) - self.assertEqual(correct_lse, _LSE.cast(self.global_eel, self.sslbs)) + self.assertEqual(correct_lse, _LSE.cast(self.global_eel, self.sslbls)) def test_to_local_global_eel(self): lse = _LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]) diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py index d901356de..6de3ad418 100644 --- a/test/unit/tools/test_errgenproptools.py +++ b/test/unit/tools/test_errgenproptools.py @@ -15,7 +15,7 @@ from pygsti.processors import QubitProcessorSpec from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator -#TODO: BCH approximation, errorgen_layer_to_matrix, stim_pauli_string_less_than, iterative_error_generator_composition +#TODO: errorgen_layer_to_matrix, stim_pauli_string_less_than class ErrgenCompositionCommutationTester(BaseCase): @@ -56,7 +56,7 @@ def test_errorgen_commutators(self): for pair1, pair2 in zip(errorgen_label_pairs, stim_errorgen_label_pairs): numeric_commutator = _eprop.error_generator_commutator_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict) analytic_commutator = _eprop.error_generator_commutator(pair2[0], pair2[1]) - analytic_commutator_mat = _eprop.errorgen_layer_to_matrix(analytic_commutator, errorgen_lbl_matrix_dict, 2) + analytic_commutator_mat = _eprop.errorgen_layer_to_matrix(analytic_commutator, 2, errorgen_lbl_matrix_dict) norm_diff = np.linalg.norm(numeric_commutator-analytic_commutator_mat) if norm_diff > 1e-10: @@ -165,8 +165,8 @@ def test_iterative_error_generator_composition(self): correct_iterative_compositions = [[(_LSE('H', (stim.PauliString("+X"),)), (-2-0j)), (_LSE('H', (stim.PauliString("+X"),)), -2)], [(_LSE('H', (stim.PauliString("+X_"),)), (-1+0j)), (_LSE('A', (stim.PauliString("+_X"), stim.PauliString("+XX"))), (1+0j)), (_LSE('A', (stim.PauliString("+_X"), stim.PauliString("+XX"))), (1+0j)), (_LSE('H', (stim.PauliString("+X_"),)), (-1+0j))], - [(_LSE('C', (stim.PauliString("+X_"), stim.PauliString("+YZ"))), (-1+0j)), (_LSE('C', (stim.PauliString("+_X"), stim.PauliString("+ZY"))), (-1+0j)), - (_LSE('A', (stim.PauliString("+XX"), stim.PauliString("+YY"))), (-1+0j)), (_LSE('H', (stim.PauliString("+ZZ"),)), (1+0j))] + [(_LSE('C', (stim.PauliString("+YZ"), stim.PauliString("+ZY"))), (1+0j)), (_LSE('C', (stim.PauliString("+YY"), stim.PauliString("+ZZ"))), (1+0j)), + (_LSE('C', (stim.PauliString("+_X"), stim.PauliString("+X_"))), -1)] ] for lbls, rates, correct_lbls in zip(test_labels, rates, correct_iterative_compositions): @@ -239,7 +239,7 @@ def setUp(self): def test_random_support(self): num_random = _eprop.random_support(self.circuit_tableau) - self.assertEqual(num_random, 8) + self.assertEqual(num_random, 3) #This unit test for tableau fidelity is straight out of Craig Gidney's stackexchange post. def test_tableau_fidelity(self): @@ -274,15 +274,14 @@ def _assert_correct_tableau_fidelity(u, v): def test_amplitude_of_state(self): amp0000 = _eprop.amplitude_of_state(self.circuit_tableau, '0000') amp1111 = _eprop.amplitude_of_state(self.circuit_tableau, '1111') - self.assertTrue(abs(amp0000)<1e-7) - self.assertTrue(abs(amp1111 - np.sqrt(.125))<1e-7) + self.assertTrue(abs(amp1111 -(-1j*np.sqrt(.125)))<1e-7) amp0000 = _eprop.amplitude_of_state(self.circuit_tableau_alt, '0000') amp1111 = _eprop.amplitude_of_state(self.circuit_tableau_alt, '1111') self.assertTrue(abs(amp0000)<1e-7) - self.assertTrue(abs(amp1111 - np.sqrt(.125))<1e-7) + self.assertTrue(abs(amp1111 - (-1j*np.sqrt(.125)))<1e-7) def test_bitstring_to_tableau(self): tableau = _eprop.bitstring_to_tableau('1010') @@ -292,11 +291,12 @@ def test_pauli_phase_update(self): test_paulis = ['YII', 'ZII', stim.PauliString('XYZ'), stim.PauliString('+iIII')] test_bitstring = '100' - correct_phase_updates_standard = [-1j, -1, -1j, 1j] - correct_phase_updates_dual = [1j, -1, 1j, 1j] + correct_phase_updates_standard = [-1j, -1, 1j, 1j] + correct_phase_updates_dual = [1j, -1, -1j, 1j] correct_output_bitstrings = ['000', '100', '010', '100'] for i, test_pauli in enumerate(test_paulis): + print(i) phase_update, output_bitstring = _eprop.pauli_phase_update(test_pauli, test_bitstring) self.assertEqual(phase_update, correct_phase_updates_standard[i]) self.assertEqual(output_bitstring, correct_output_bitstrings[i]) diff --git a/test/unit/tools/test_lindbladtools.py b/test/unit/tools/test_lindbladtools.py index 95b59dce7..f89df89ce 100644 --- a/test/unit/tools/test_lindbladtools.py +++ b/test/unit/tools/test_lindbladtools.py @@ -3,7 +3,7 @@ from pygsti.tools import lindbladtools as lt from pygsti.modelmembers.operations import LindbladErrorgen -from pygsti.baseobjs import Basis +from pygsti.baseobjs import Basis, QubitSpace from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel from ..util import BaseCase @@ -94,14 +94,14 @@ def test_elementary_errorgen_bases(self): class RandomErrorgenRatesTester(BaseCase): def test_default_settings(self): - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, seed=1234) + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, seed=1234, label_type='local') #make sure that we get the expected number of rates: self.assertEqual(len(random_errorgen_rates), 240) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. - errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) def test_sector_restrictions(self): #H-only: @@ -110,7 +110,7 @@ def test_sector_restrictions(self): self.assertEqual(len(random_errorgen_rates), 15) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. - errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) #S-only random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('S',), seed=1234) @@ -118,15 +118,15 @@ def test_sector_restrictions(self): self.assertEqual(len(random_errorgen_rates), 15) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. - errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) #H+S random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), seed=1234) #make sure that we get the expected number of rates: - self.assertEqual(len(random_errorgen_rates), 15) + self.assertEqual(len(random_errorgen_rates), 30) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. - errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) #H + S + A random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S', 'A'), seed=1234) @@ -134,7 +134,7 @@ def test_sector_restrictions(self): self.assertEqual(len(random_errorgen_rates), 135) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. - errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False) + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) def test_error_metric_restrictions(self): #test generator_infidelity @@ -143,7 +143,7 @@ def test_error_metric_restrictions(self): error_metric_value=.99, seed=1234) #confirm this has the correct generator infidelity. gen_infdl = 0 - for coeff, rate in random_errorgen_rates: + for coeff, rate in random_errorgen_rates.items(): if coeff.errorgen_type == 'H': gen_infdl+=rate**2 elif coeff.errorgen_type == 'S': @@ -153,11 +153,11 @@ def test_error_metric_restrictions(self): #test generator_error random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), - error_metric= 'generator_error', + error_metric= 'total_generator_error', error_metric_value=.99, seed=1234) #confirm this has the correct generator infidelity. gen_error = 0 - for coeff, rate in random_errorgen_rates: + for coeff, rate in random_errorgen_rates.items(): if coeff.errorgen_type == 'H': gen_error+=abs(rate) elif coeff.errorgen_type == 'S': @@ -173,7 +173,7 @@ def test_error_metric_restrictions(self): #confirm this has the correct generator infidelity contributions. gen_infdl_H = 0 gen_infdl_S = 0 - for coeff, rate in random_errorgen_rates: + for coeff, rate in random_errorgen_rates.items(): if coeff.errorgen_type == 'H': gen_infdl_H+=rate**2 elif coeff.errorgen_type == 'S': @@ -182,13 +182,13 @@ def test_error_metric_restrictions(self): assert abs(gen_infdl_S - gen_infdl_H)<1e-5 random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), - error_metric= 'generator_error', + error_metric= 'total_generator_error', error_metric_value=.99, relative_HS_contribution=(.5, .5), seed=1234) #confirm this has the correct generator error contributions. gen_error_H = 0 gen_error_S = 0 - for coeff, rate in random_errorgen_rates: + for coeff, rate in random_errorgen_rates.items(): if coeff.errorgen_type == 'H': gen_error_H+=abs(rate) elif coeff.errorgen_type == 'S': From f8c1e64c3bebe3f715e54bdbe3eed1ecc3a319e4 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 11 Feb 2025 22:12:41 -0700 Subject: [PATCH 079/102] Random errorgen testing and bugfixes Fix some bugs in the code for doing random error generator construction and add in some new related unit tests. --- pygsti/tools/lindbladtools.py | 96 ++++++++++++++++++++++----- test/unit/tools/test_lindbladtools.py | 32 ++++++--- 2 files changed, 102 insertions(+), 26 deletions(-) diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index a2dbdfa38..60fac80e2 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -568,7 +568,7 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') one of these qudits) in order to be included in this basis. fixed_errorgen_rates : dict, optional (default None) - An optional dictionary whose keys are `GlobalElementaryErrorgenLabel` + An optional dictionary whose keys are `LocalElementaryErrorgenLabel` objects, and whose values are error generator rates. When specified, the rates in this dictionary will override any randomly selected values in the final returned error generator rate dictionary. The inclusion of these @@ -600,9 +600,9 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') raise ValueError('Unsupported error metric type. Currently supported options are generator_infidelity and total_generator_error') #Add a check that the desired error metric value is attainable given the values of fixed_errorgen_rates. if fixed_errorgen_rates: - #verify that all of the keys are GlobalElementaryErrorgenLabel objects. - msg = 'All keys of fixed_errorgen_rates must be GlobalElementaryErrorgenLabel.' - assert all([isinstance(key, _GlobalElementaryErrorgenLabel) for key in fixed_errorgen_rates.keys()]), msg + #verify that all of the keys are LocalElementaryErrorgenLabel objects. + msg = 'All keys of fixed_errorgen_rates must be LocalElementaryErrorgenLabel.' + assert all([isinstance(key, _LocalElementaryErrorgenLabel) for key in fixed_errorgen_rates.keys()]), msg #get the H and S rates from the dictionary. fixed_H_rates = _np.array([val for key, val in fixed_errorgen_rates.items() if key.errorgen_type == 'H']) @@ -626,8 +626,7 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') else: fixed_H_contribution = 0 fixed_S_contribution = 0 - - + if relative_HS_contribution is not None: assert ('H' in errorgen_types and 'S' in errorgen_types), 'Invalid relative_HS_contribution, one of either H or S is not in errorgen_types.' if error_metric is None: @@ -637,22 +636,45 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') if max_weights is not None: assert max_weights['C'] <= max_weights['S'] and max_weights['A'] <= max_weights['S'], 'The maximum weight of the C and A terms should be less than or equal to the maximum weight of S.' - + assert max_weights['C'] == max_weights['A'], 'Maximum weight and C of A terms must be the same at present.' rng = _np.random.default_rng(seed) + + if 'C' in errorgen_types or 'A' in errorgen_types: + assert 'C' in errorgen_types and 'A' in errorgen_types, 'Support only currently available for random C and A terms if both sectors present.' #create a state space with this dimension. state_space = _QubitSpace.cast(num_qubits) #create an error generator basis according the our weight specs - errorgen_basis = _bo.CompleteElementaryErrorgenBasis('pp', state_space, elementary_errorgen_types=errorgen_types, - max_weights=max_weights, sslbl_overlap=sslbl_overlap) + errorgen_basis = _bo.CompleteElementaryErrorgenBasis('PP', state_space, elementary_errorgen_types=errorgen_types, + max_weights=max_weights, sslbl_overlap=sslbl_overlap, default_label_type='local') #Get the labels, broken out by sector, of each of the error generators in this basis. - errgen_labels_H = errorgen_basis.sublabels('H') - errgen_labels_S = errorgen_basis.sublabels('S') - errgen_labels_C = errorgen_basis.sublabels('C') - errgen_labels_A = errorgen_basis.sublabels('A') + errgen_labels_H = _sort_errorgen_labels(errorgen_basis.sublabels('H')) + errgen_labels_S = _sort_errorgen_labels(errorgen_basis.sublabels('S')) + errgen_labels_C = _sort_errorgen_labels(errorgen_basis.sublabels('C')) + errgen_labels_A = _sort_errorgen_labels(errorgen_basis.sublabels('A')) + #filter out any C or A terms which can't be present with CP constraints due to lack of correct S term. + filtered_errgen_labels_C = [] + for lbl in errgen_labels_C: + first_label = _LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],)) + second_label = _LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[1],)) + if first_label not in errgen_labels_S or second_label not in errgen_labels_S: + continue + else: + filtered_errgen_labels_C.append(lbl) + filtered_errgen_labels_A = [] + for lbl in errgen_labels_A: + first_label = _LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],)) + second_label = _LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[1],)) + if first_label not in errgen_labels_S or second_label not in errgen_labels_S: + continue + else: + filtered_errgen_labels_A.append(lbl) + errgen_labels_C = filtered_errgen_labels_C + errgen_labels_A = filtered_errgen_labels_A + #Get the number of H and S error generators. These are stored in HSCA order in the labels num_H_rates = len(errgen_labels_H) num_S_rates = len(errgen_labels_S) @@ -663,15 +685,32 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') #Create a random matrix with complex gaussian entries which will be used to generator a PSD matrix for the SCA rates. random_SCA_gen_mat = rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + \ - 1j* rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + 1j* rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + random_SCA_mat = random_SCA_gen_mat @ random_SCA_gen_mat.conj().T #The random S rates are just the diagonal of random_SCA_mat. random_rates_dicts['S'] = {lbl: val for lbl,val in zip(errgen_labels_S, _np.real(_np.diag(random_SCA_mat)).copy())} - #The random C rates are the real part of the off diagonal entries, and the A rates the imaginary part. random_rates_dicts['C'] = {lbl: val for lbl,val in zip(errgen_labels_C, random_SCA_mat[_np.triu_indices_from(random_SCA_mat, k=1)].real)} random_rates_dicts['A'] = {lbl: val for lbl,val in zip(errgen_labels_A, random_SCA_mat[_np.triu_indices_from(random_SCA_mat, k=1)].imag)} - + #manually check conditions on C and A + for lbl, rate in random_rates_dicts['C'].items(): + first_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],))] + second_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[1],))] + + if not (abs(rate) <= _np.sqrt(first_S_rate*second_S_rate)): + print(f'{lbl}: {rate}') + raise ValueError('Invalid C rate') + + #manually check conditions on C and A + for lbl, rate in random_rates_dicts['A'].items(): + first_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],))] + second_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[1],))] + + if not (abs(rate) <= _np.sqrt(first_S_rate*second_S_rate)): + print(f'{lbl}: {rate}') + raise ValueError('Invalid A rate') + #Add in/override the fixed rates for each of the sectors. H_fixed_keys = [] S_fixed_keys = [] @@ -762,9 +801,30 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') if label_type not in ['global', 'local']: raise ValueError('Unsupported label type {label_type}.') - if label_type == 'local': - errorgen_rates_dict = {_LocalElementaryErrorgenLabel.cast(lbl, state_space.state_space_labels): val + if label_type == 'global': + errorgen_rates_dict = {_GlobalElementaryErrorgenLabel.cast(lbl, sslbls=state_space.state_space_labels): val for lbl, val in errorgen_rates_dict.items()} return errorgen_rates_dict +def _sort_errorgen_labels(errgen_labels): + """ + This function sorts error generator coefficients in canonical way. + Helper function for random error generator rate construction. + """ + if not errgen_labels: + return [] + + assert isinstance(errgen_labels[0], _LocalElementaryErrorgenLabel), 'Can only sort local labels at the moment' + + errorgen_types = [lbl.errorgen_type for lbl in errgen_labels] + assert len(set(errorgen_types))==1, 'only one error generator type at a time is supported presently' + + errorgen_type = errorgen_types[0] + if errorgen_type in ('H', 'S'): + sorted_errgen_labels = sorted(errgen_labels, key= lambda lbl:lbl.basis_element_labels[0]) + else: + sorted_errgen_labels = sorted(errgen_labels, key= lambda lbl:(lbl.basis_element_labels[0], lbl.basis_element_labels[1])) + + return sorted_errgen_labels + diff --git a/test/unit/tools/test_lindbladtools.py b/test/unit/tools/test_lindbladtools.py index f89df89ce..57b470827 100644 --- a/test/unit/tools/test_lindbladtools.py +++ b/test/unit/tools/test_lindbladtools.py @@ -128,14 +128,6 @@ def test_sector_restrictions(self): #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) - #H + S + A - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S', 'A'), seed=1234) - #make sure that we get the expected number of rates: - self.assertEqual(len(random_errorgen_rates), 135) - #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail - #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. - errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) - def test_error_metric_restrictions(self): #test generator_infidelity random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), @@ -217,3 +209,27 @@ def test_sslbl_overlap(self): for coeff in random_errorgen_rates: assert 0 in coeff.sslbls + def test_weight_restrictions(self): + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S','C','A'), + label_type='local', seed=1234, + max_weights={'H':1, 'S':1, 'C':1, 'A':1}) + assert len(random_errorgen_rates) == 24 + #confirm still CPTP + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S','C','A'), + label_type='local', seed=1234, + max_weights={'H':2, 'S':2, 'C':1, 'A':1}) + assert len(random_errorgen_rates) == 42 + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + def test_global_labels(self): + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, seed=1234, label_type='global') + + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 240) + + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + From 53ccbd1faba1cfd3b19a4b4202d6446e88a4564b Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 12 Feb 2025 18:31:04 -0700 Subject: [PATCH 080/102] Add error generator input output map functionality Add two new methods related to error generator propagation aimed at providing supporting functionality for RLGST-related stuff. The first new method is for generating a 'transform map', a map from input error generators and the layer they appear and the corresponding output error generator produced following propagation along with the associated phase factor accrued (this doesn't account for any higher-order stuff that might happen following the application of BCH or anything else). The second new method is more of a model-oriented method which takes as input an error generator label, a circuit and a layer index and returns which gates (if any) in that circuit layer could have contributed that particular error generator to the overall layer's error generator. Aside from that there is some spring cleaning removing the multi_gate_dict kwarg which never ended up being used in the end. Also a miscellaneous new unit test related to core propagation functionality. Finally, temporarily comment out averaged eoc stuff (will be revived on a forked off branch focused on non-Markovian related functionality). --- pygsti/errorgenpropagation/errorpropagator.py | 300 +++++++++++------- test/unit/objects/test_errorgenpropagation.py | 11 +- 2 files changed, 197 insertions(+), 114 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index 544c0fda5..f39e536bb 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -8,18 +8,20 @@ from pygsti.tools.internalgates import standard_gatenames_stim_conversions import copy as _copy from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis, BuiltinBasis as _BuiltinBasis -from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrogenLabel +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel import pygsti.tools.errgenproptools as _eprop import pygsti.tools.basistools as _bt import pygsti.tools.matrixtools as _mt import pygsti.tools.optools as _ot +from pygsti.models.model import OpModel as _OpModel +from pygsti.models import ExplicitOpModel as _ExplicitOpModel, ImplicitOpModel as _ImplicitOpModel from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen from itertools import islice class ErrorGeneratorPropagator: - def __init__(self, model, state_space=None, multi_gate_dict=None, nonmarkovian=False, multi_gate=False): + def __init__(self, model, state_space=None): """ Initialize an instance of `ErrorGeneratorPropagator`. This class is instantiated with a noise model and manages operations related to propagating error generators through circuits, and constructing @@ -48,7 +50,7 @@ def __init__(self, model, state_space=None, multi_gate_dict=None, nonmarkovian=F else: sslbls = state_space.qubit_labels lse_dict = {_LSE.cast(lbl, sslbls): rate for lbl, rate in model.items()} - elif isinstance(next(iter(model)), _LocalElementaryErrogenLabel): + elif isinstance(next(iter(model)), _LocalElementaryErrorgenLabel): lse_dict = {_LSE.cast(lbl): rate for lbl, rate in model.items()} else: lse_dict = model @@ -56,7 +58,7 @@ def __init__(self, model, state_space=None, multi_gate_dict=None, nonmarkovian=F else: self.model = model - def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, use_bch=False, + def eoc_error_channel(self, circuit, include_spam=True, use_bch=False, bch_kwargs=None, mx_basis='pp'): """ Propagate all of the error generators for each circuit layer to the end of the circuit @@ -68,10 +70,6 @@ def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, us circuit : `Circuit` Circuit to construct a set of post gate error generators for. - multi_gate_dict : dict, optional (default None) - An optional dictionary mapping between gate name aliases and their - standard name counterparts. - include_spam : bool, optional (default True) If True then we include in the propagation the error generators associated with state preparation and measurement. @@ -96,13 +94,12 @@ def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, us if use_bch: #should return a single dictionary of error generator rates - propagated_error_generator = self.propagate_errorgens_bch(circuit, multi_gate_dict=multi_gate_dict, - **bch_kwargs) + propagated_error_generator = self.propagate_errorgens_bch(circuit, **bch_kwargs) #convert this to a process matrix return _spl.expm(self.errorgen_layer_dict_to_errorgen(propagated_error_generator, mx_basis='pp', return_dense=True)) else: - propagated_error_generators = self.propagate_errorgens(circuit, multi_gate_dict, include_spam) + propagated_error_generators = self.propagate_errorgens(circuit, include_spam) #loop though the propagated error generator layers and construct their error generators. #Then exponentiate exp_error_generators = [] @@ -123,70 +120,66 @@ def eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, us eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='pp', to_basis=mx_basis) return eoc_error_channel - - def averaged_eoc_error_channel(self, circuit, multi_gate_dict=None, include_spam=True, mx_basis='pp'): - """ - Propagate all of the error generators for each circuit layer to the end of the circuit, - then apply a second order cumulant expansion to approximate the average of the end of circuit - error channel over the values error generator rates that are stochastic processes. - - Parameters - ---------- - circuit : `Circuit` - Circuit to construct a set of post gate error generators for. - - multi_gate_dict : dict, optional (default None) - An optional dictionary mapping between gate name aliases and their - standard name counterparts. - - include_spam : bool, optional (default True) - If True then we include in the propagation the error generators associated - with state preparation and measurement. - - mx_basis : Basis or str, optional (default 'pp') - Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the - basis in which to return the process matrix for the error channel. - - Returns - ------- - avg_eoc_error_channel : numpy.ndarray - A numpy array corresponding to the end-of-circuit error channel resulting - from the propagated error generators and averaging over the stochastic processes - for the error generator rates using a second order cumulant approximation. - """ - - #propagate_errorgens_nonmarkovian returns a list of list of - propagated_error_generators = self.propagate_errorgens_nonmarkovian(circuit, multi_gate_dict, include_spam) - - #construct the nonmarkovian propagators - for i in range(len(propagated_error_generators)): - for j in range(i+1): - if i==j: - # term: - pass - #prop_contrib = amam - else: - pass - - - #loop though the propagated error generator layers and construct their error generators. - #Then exponentiate - exp_error_generators = [] - for err_gen_layer_list in propagated_error_generators: - if err_gen_layer_list: #if not empty. Should be length one if not empty. - #Keep the error generator in the standard basis until after the end-of-circuit - #channel is constructed so we can reduce the overhead of changing basis. - exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer_list[0], mx_basis='std'))) - #Next take the product of these exponentiated error generators. - #These are in circuit ordering, so reverse for matmul. - exp_error_generators.reverse() - eoc_error_channel = _np.linalg.multi_dot(exp_error_generators) - eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='std', to_basis='pp') - - return eoc_error_channel - - - def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): + # + #def averaged_eoc_error_channel(self, circuit, include_spam=True, mx_basis='pp'): + # """ + # Propagate all of the error generators for each circuit layer to the end of the circuit, + # then apply a second order cumulant expansion to approximate the average of the end of circuit + # error channel over the values error generator rates that are stochastic processes. +# + # Parameters + # ---------- + # circuit : `Circuit` + # Circuit to construct a set of post gate error generators for. +# + # include_spam : bool, optional (default True) + # If True then we include in the propagation the error generators associated + # with state preparation and measurement. +# + # mx_basis : Basis or str, optional (default 'pp') + # Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the + # basis in which to return the process matrix for the error channel. +# + # Returns + # ------- + # avg_eoc_error_channel : numpy.ndarray + # A numpy array corresponding to the end-of-circuit error channel resulting + # from the propagated error generators and averaging over the stochastic processes + # for the error generator rates using a second order cumulant approximation. + # """ +# + # #propagate_errorgens_nonmarkovian returns a list of list of + # propagated_error_generators = self.propagate_errorgens_nonmarkovian(circuit, include_spam) + # + # #construct the nonmarkovian propagators + # for i in range(len(propagated_error_generators)): + # for j in range(i+1): + # if i==j: + # # term: + # pass + # #prop_contrib = amam + # else: + # pass + # + # + # #loop though the propagated error generator layers and construct their error generators. + # #Then exponentiate + # exp_error_generators = [] + # for err_gen_layer_list in propagated_error_generators: + # if err_gen_layer_list: #if not empty. Should be length one if not empty. + # #Keep the error generator in the standard basis until after the end-of-circuit + # #channel is constructed so we can reduce the overhead of changing basis. + # exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer_list[0], mx_basis='std'))) + # #Next take the product of these exponentiated error generators. + # #These are in circuit ordering, so reverse for matmul. + # exp_error_generators.reverse() + # eoc_error_channel = _np.linalg.multi_dot(exp_error_generators) + # eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='std', to_basis='pp') +# + # return eoc_error_channel +# + + def propagate_errorgens(self, circuit, include_spam=True): """ Propagate all of the error generators for each circuit layer to the end without any recombinations or averaging. @@ -196,10 +189,6 @@ def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): circuit : `Circuit` Circuit to construct a set of post gate error generators for. - multi_gate_dict : dict, optional (default None) - An optional dictionary mapping between gate name aliases and their - standard name counterparts. - include_spam : bool, optional (default True) If True then we include in the propagation the error generators associated with state preparation and measurement. @@ -214,7 +203,7 @@ def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): #start by converting the input circuit into a list of stim Tableaus with the #first element dropped. - stim_layers = self.construct_stim_layers(circuit, multi_gate_dict, drop_first_layer = not include_spam) + stim_layers = self.construct_stim_layers(circuit, drop_first_layer = not include_spam) #We next want to construct a new set of Tableaus corresponding to the cumulative products #of each of the circuit layers with those that follow. These Tableaus correspond to the @@ -234,8 +223,7 @@ def propagate_errorgens(self, circuit, multi_gate_dict=None, include_spam=True): return propagated_errorgen_layers - def propagate_errorgens_bch(self, circuit, bch_order=1, multi_gate_dict=None, - include_spam=True, truncation_threshold=1e-14): + def propagate_errorgens_bch(self, circuit, bch_order=1, include_spam=True, truncation_threshold=1e-14): """ Propagate all of the error generators for each circuit to the end, performing approximation/recombination using the BCH approximation. @@ -249,10 +237,6 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, multi_gate_dict=None, Order of the BCH approximation to use. A maximum value of 4 is currently supported. - multi_gate_dict : dict, optional (default None) - An optional dictionary mapping between gate name aliases and their - standard name counterparts. - include_spam : bool, optional (default True) If True then we include in the propagation the error generators associated with state preparation and measurement. @@ -262,8 +246,7 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, multi_gate_dict=None, are truncated during the BCH approximation. """ - propagated_errorgen_layers = self.propagate_errorgens(circuit, multi_gate_dict, - include_spam=include_spam) + propagated_errorgen_layers = self.propagate_errorgens(circuit, include_spam=include_spam) #if length one no need to do anything. if len(propagated_errorgen_layers)==1: return propagated_errorgen_layers[0] @@ -279,7 +262,7 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, multi_gate_dict=None, return combined_err_layer - def propagate_errorgens_nonmarkovian(self, circuit, multi_gate_dict=None, include_spam=True): + def propagate_errorgens_nonmarkovian(self, circuit, include_spam=True): """ Propagate all of the error generators for each circuit layer to the end without any recombinations or averaging. This version also only track the overall modifier/weighting @@ -291,10 +274,6 @@ def propagate_errorgens_nonmarkovian(self, circuit, multi_gate_dict=None, includ circuit : `Circuit` Circuit to construct a set of post gate error generators for. - multi_gate_dict : dict, optional (default None) - An optional dictionary mapping between gate name aliases and their - standard name counterparts. - include_spam : bool, optional (default True) If True then we include in the propagation the error generators associated with state preparation and measurement. @@ -306,12 +285,9 @@ def propagate_errorgens_nonmarkovian(self, circuit, multi_gate_dict=None, includ an error generator layer through to the end of the circuit. """ - - #TODO: Check for proper handling of empty circuit and length 1 circuits. - #start by converting the input circuit into a list of stim Tableaus with the #first element dropped. - stim_layers = self.construct_stim_layers(circuit, multi_gate_dict, drop_first_layer = not include_spam) + stim_layers = self.construct_stim_layers(circuit, drop_first_layer = not include_spam) #We next want to construct a new set of Tableaus corresponding to the cumulative products #of each of the circuit layers with those that follow. These Tableaus correspond to the @@ -337,27 +313,130 @@ def propagate_errorgens_nonmarkovian(self, circuit, multi_gate_dict=None, includ return propagated_errorgen_layers + def errorgen_transform_map(self, circuit, include_spam=True): + """ + Construct a map giving the relationship between input error generators and their final + value following propagation through the circuit. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct error generator transform map for. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + """ + #start by converting the input circuit into a list of stim Tableaus with the + #first element dropped. + stim_layers = self.construct_stim_layers(circuit, drop_first_layer = not include_spam) + + #We next want to construct a new set of Tableaus corresponding to the cumulative products + #of each of the circuit layers with those that follow. These Tableaus correspond to the + #clifford operations each error generator will be propagated through in order to reach the + #end of the circuit. + propagation_layers = self.construct_propagation_layers(stim_layers) + + #Next we take the input circuit and construct a list of dictionaries, each corresponding + #to the error generators for a particular gate layer. + #TODO: Add proper inferencing for number of qubits: + assert circuit.line_labels is not None and circuit.line_labels != ('*',) + errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam, fixed_rate=1) + #propagate the errorgen_layers through the propagation_layers to get a list + #of end of circuit error generator dictionaries. + propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) + + #there should be a one-to-one mapping between the index into propagated_errorgen_layers and the + #index of the circuit layer where the error generators in that propagated layer originated. + #Moreover, LocalStimErrorgenLabels remember who they were at instantiation. + input_output_errgen_map = dict() + for i, output_layer in enumerate(propagated_errorgen_layers): + for output_label, output_rate in output_layer.items(): + original_label = _LSE.cast(output_label.initial_label) + input_output_errgen_map[(original_label, i)] = (output_label, output_rate) + + return input_output_errgen_map + + def errorgen_gate_contributors(self, errorgen, circuit, layer_idx, include_spam=True): + """ + Walks through the gates in the specified circuit layer and query the parent + model to figure out which gates could have given rise to a particular error generator + in a layer. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator layer to find instance of. + + circuit : `Circuit` + Circuit to identify potential gates in. + + layer_idx : int + Index of circuit layer. + + include_spam : bool, optional (default True) + If True include the spam circuit layers at the beginning and + end of the circuit. + + Returns + ------- + label_list_for_errorgen : list of `Label` + A list of gate labels contained within this circuit layer that could have + contributed this error generator. + """ + + if not isinstance(self.model, _OpModel): + raise ValueError('This method does not work for non-OpModel models.') + + if include_spam: + circuit = self.model.complete_circuit(circuit) + + assert layer_idx < len(circuit), f'layer_idx {layer_idx} is out of range for circuit with length {len(circuit)}' + + if isinstance(errorgen, _GlobalElementaryErrorgenLabel): + errorgen = _LocalElementaryErrorgenLabel.cast(errorgen, sslbls = self.model.state_space.qubit_labels) + elif isinstance(errorgen, _LSE): + errorgen = errorgen.to_local_eel() + else: + assert isinstance(errorgen, _LocalElementaryErrorgenLabel), f'Unsupported `errorgen` type {type(errorgen)}.' + + circuit_layer = circuit.layer(layer_idx) - def propagate_errorgens_analytic(self, circuit): - pass + if isinstance(self.model, _ExplicitOpModel): + #check if this error generator is in the error generator coefficient dictionary for this layer, and if not return the empty dictionary. + layer_errorgen_coeff_dict = self.model.circuit_layer_operator(circuit_layer).errorgen_coefficients(label_type='local') + if errorgen in layer_errorgen_coeff_dict: + label_list_for_errorgen = [circuit_layer] + else: + label_list_for_errorgen = [] + + elif isinstance(self.model, _ImplicitOpModel): + #Loop through each label in this layer and ask for the circuit layer operator + #for each. Then query this for the error generator coefficients associated + #with that layer. + #Note: This may not be 100% robust, I'm assuming there aren't any exotic layer rules + #that would, e.g., add in totally new error generators when certain pairs of gates appear in a layer. + label_list_for_errorgen = [] + for lbl in circuit_layer: + circuit_layer_operator = self.model.circuit_layer_operator(lbl) + label_errorgen_coeff_dict = circuit_layer_operator.errorgen_coefficients(label_type='local') + if errorgen in label_errorgen_coeff_dict: + label_list_for_errorgen.append(lbl) + else: + raise ValueError(f'Type of model {type(self.model)=} is not supported with this method.') + + return label_list_for_errorgen - def construct_stim_layers(self, circuit, multi_gate_dict=None, drop_first_layer=True): + def construct_stim_layers(self, circuit, drop_first_layer=True): """ Converts a `Circuit` to a list of stim Tableau objects corresponding to each gate layer. - TODO: Move to a tools module? Locality of behavior considerations. - Parameters ---------- circuit : `Circuit` Circuit to convert. - multi_gate_dict : dict, optional (default None) - If specified this augments the standard dictionary for conversion between - pygsti gate labels and stim (found in `pygsti.tools.internalgates.standard_gatenames_stim_conversions`) - with additional entries corresponding to aliases for the entries of the standard dictionary. - This is presently used in the context of non-Markovian applications where tracking - circuit time for gate labels is required. + drop_first_layer : bool, optional (default True) If True the first Tableau for the first gate layer is dropped in the returned output. This default setting is what is primarily used in the context of error generator @@ -371,9 +450,6 @@ def construct_stim_layers(self, circuit, multi_gate_dict=None, drop_first_layer= """ stim_dict=standard_gatenames_stim_conversions() - if multi_gate_dict is not None: - for key in multi_gate_dict: - stim_dict[key]=stim_dict[multi_gate_dict[key]] stim_layers=circuit.convert_to_stim_tableau_layers(gate_name_conversions=stim_dict) if drop_first_layer and len(stim_layers)>0: stim_layers = stim_layers[1:] @@ -485,7 +561,7 @@ def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, incl for errgen_coeff_lbl, rate in layer_errorgen_coeff_dict.items(): #for an error in the accompanying error dictionary #only track this error generator if its rate is not exactly zero. #TODO: Add more flexible initial truncation logic. if rate !=0 or fixed_rate is not None: - #if isinstance(errgen_coeff_lbl, _LocalElementaryErrogenLabel): + #if isinstance(errgen_coeff_lbl, _LocalElementaryErrorgenLabel): initial_label = errgen_coeff_lbl #else: # initial_label = None diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py index 3fb1d52a5..69005faef 100644 --- a/test/unit/objects/test_errorgenpropagation.py +++ b/test/unit/objects/test_errorgenpropagation.py @@ -1,4 +1,5 @@ from ..util import BaseCase +from pygsti.circuits import Circuit from pygsti.algorithms.randomcircuit import create_random_circuit from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator from pygsti.processors import QubitProcessorSpec @@ -22,7 +23,7 @@ def setUp(self): pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) self.target_model = create_crosstalk_free_model(processor_spec = pspec) self.circuit = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) - + self.circuit_length_1 = create_random_circuit(pspec, 1, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) typ = 'H' max_stochastic = {'S': .0005, 'H': 0, 'H+S': .0001} max_hamiltonian = {'S': 0, 'H': .00005, 'H+S': .0001} @@ -78,7 +79,13 @@ def test_eoc_error_channel(self): eoc_error_channel_exact = noisy_channel_exact@ideal_channel.conj().T assert np.linalg.norm(eoc_error_channel - eoc_error_channel_exact) < 1e-10 - + + def test_propagation_length_zero_one(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + empty_circuit = Circuit([], line_labels=(0,1,2,3)) + error_propagator.propagate_errorgens(self.circuit_length_1) + error_propagator.propagate_errorgens(empty_circuit, include_spam=True) + error_propagator.propagate_errorgens(empty_circuit, include_spam=False) class LocalStimErrorgenLabelTester(BaseCase): def setUp(self): From 0e8d52971c0873e57b8e74d6feaceec2d1312951 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 12 Feb 2025 18:42:27 -0700 Subject: [PATCH 081/102] Cleanup unused code Clean up some now unused code now that the core functionality of the analytic propagation stuff has been ported into the main class. Also remove some unneeded methods and out of date documentation from the LocalStimErrorgen class. --- pygsti/errorgenpropagation/errorpropagator.py | 125 +++--------------- .../errorgenpropagation/localstimerrorgen.py | 54 +------- 2 files changed, 22 insertions(+), 157 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index f39e536bb..b3ab1bf04 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -688,111 +688,28 @@ def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp', return_ return errorgen -def ErrorPropagatorAnalytic(circ,errorModel,ErrorLayerDef=False,startingErrors=None): - stim_layers=circ.convert_to_stim_tableau_layers() - - if startingErrors is None: - stim_layers.pop(0) - - propagation_layers=[] - while len(stim_layers)>0: - top_layer=stim_layers.pop(0) - for layer in stim_layers: - top_layer = layer*top_layer - propagation_layers.append(top_layer) - - if not ErrorLayerDef: - errorLayers=buildErrorlayers(circ,errorModel,len(circ.line_labels)) - else: - errorLayers=[[_copy.deepcopy(eg) for eg in errorModel] for i in range(circ.depth)] - - if not startingErrors is None: - errorLayers.insert(0,startingErrors) - - fully_propagated_layers=[] - for (idx,layer) in enumerate(errorLayers): - new_error_dict=dict() - if idx Date: Wed, 12 Feb 2025 21:49:23 -0700 Subject: [PATCH 082/102] LindbladErrorgenbugfix Patch a bug in LindbladErrorgen when using 'CPTPLND' as the parameterization with an H-only model when instantiating through the from_elementary_errorgens constructor. --- .../operations/lindbladerrorgen.py | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index 7499a99c0..ffedf5e1b 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -465,11 +465,13 @@ def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto' blocks = [] for blk_type, blk_param_mode in zip(parameterization.block_types, parameterization.param_modes): relevant_eegs = eegs_by_typ[blk_type] # KeyError => unrecognized block type! - bels = sorted(set(_itertools.chain(*[lbl.basis_element_labels for lbl in relevant_eegs.keys()]))) - blk = _LindbladCoefficientBlock(blk_type, basis, bels, param_mode=blk_param_mode) - blk.set_elementary_errorgens(relevant_eegs, truncate=truncate) - blocks.append(blk) - + #only add block type is relevant_eegs is not empty. + if relevant_eegs: + bels = sorted(set(_itertools.chain(*[lbl.basis_element_labels for lbl in relevant_eegs.keys()]))) + blk = _LindbladCoefficientBlock(blk_type, basis, bels, param_mode=blk_param_mode) + blk.set_elementary_errorgens(relevant_eegs, truncate=truncate) + blocks.append(blk) + print(blocks) return cls(blocks, basis, mx_basis, evotype, state_space) def __init__(self, lindblad_coefficient_blocks, elementary_errorgen_basis='auto', mx_basis='pp', @@ -559,8 +561,15 @@ def __init__(self, lindblad_coefficient_blocks, elementary_errorgen_basis='auto' blk.create_lindblad_term_superoperators(self.matrix_basis, sparse_bases, include_1norms=True, flat=True) for blk in lindblad_coefficient_blocks] + #print(f'{lindblad_coefficient_blocks=}') + #print(f'{len(self.lindblad_term_superops_and_1norms)=}') + #print(f'{self.lindblad_term_superops_and_1norms=}') + + #for (Lterm_superops, _) in self.lindblad_term_superops_and_1norms: + # print(Lterm_superops.shape) #combine all of the linblad term superoperators across the blocks to a single concatenated tensor. - self.combined_lindblad_term_superops = _np.concatenate([Lterm_superops for (Lterm_superops, _) in self.lindblad_term_superops_and_1norms], axis=0) + self.combined_lindblad_term_superops = _np.concatenate([Lterm_superops for (Lterm_superops, _) in + self.lindblad_term_superops_and_1norms], axis=0) #Create a representation of the type chosen above: if self._rep_type == 'lindblad errorgen': @@ -1556,6 +1565,7 @@ def minimal_from_elementary_errorgens(cls, errs): parameterization = "CPTPLND" else: parameterization = '+'.join(paramtypes) + print(f'{parameterization=}') return cls.cast(parameterization) @classmethod From 1b51f772c161df874a1815144698ad3a3d03b6b8 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 12 Feb 2025 21:53:16 -0700 Subject: [PATCH 083/102] Add stim dependency Add stim as an installation dependency (essential for all of new errorgen propagation module's functionality). --- requirements.txt | 1 + setup.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index fee654528..5b3b1fe78 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ scipy plotly pandas networkx +stim diff --git a/setup.py b/setup.py index 1528d9b99..45936402b 100644 --- a/setup.py +++ b/setup.py @@ -83,7 +83,8 @@ 'notebook', 'ipython', 'jupyter_server', - 'torch' + 'torch', + 'stim' ] } @@ -276,7 +277,8 @@ def setup_with_extensions(extensions=None): 'scipy', 'plotly', 'pandas', - 'networkx' + 'networkx', + 'stim' ], extras_require=extras, python_requires='>=3.8', From 39e8786e01a6b6974ae8dbe674863edf4d789852 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 12 Feb 2025 22:45:10 -0700 Subject: [PATCH 084/102] Errorgen Transform Map Unit Tests Add in unit tests for now input-output map related functionality. Also revert the partial implementation of models via dictionary (can revisit this in the future if needed). Clean up some print statements and commented out code. --- pygsti/errorgenpropagation/errorpropagator.py | 35 ++++--------------- pygsti/modelmembers/operations/embeddedop.py | 27 -------------- .../operations/lindbladerrorgen.py | 8 ----- test/unit/objects/test_errorgenpropagation.py | 22 ++++++++++++ 4 files changed, 28 insertions(+), 64 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index b3ab1bf04..d02a5b986 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -21,7 +21,7 @@ class ErrorGeneratorPropagator: - def __init__(self, model, state_space=None): + def __init__(self, model): """ Initialize an instance of `ErrorGeneratorPropagator`. This class is instantiated with a noise model and manages operations related to propagating error generators through circuits, and constructing @@ -29,34 +29,11 @@ def __init__(self, model, state_space=None): Parameters ---------- - model: `OpModel` or dict - If an `OpModel` this model is used to construct error generators for each layer of a circuit - through which error generators are to be propagated. If a dictionary is passed in then this - dictionary should be an error generator coefficient dictionary, with keys that are - `ElementaryErrorgenLabel`s and values that are rates. This dictionary is then used as the - fixed per-circuit error generator independent of the circuit layers. (Dictionary support in development). - - state_space: `StateSpace`, optional (default None) - Only used if specifying a dictionary for `model` whose keys are - `GlobalElementaryErrorgenLabel`s. - """ - if isinstance(model, dict): - #convert this to one where the keys are `LocalStimErrorgenLabel`s. - if isinstance(next(iter(model)), _GlobalElementaryErrorgenLabel): - if state_space is None: - msg = 'When specifying a fixed error generator dictionary as the noise model using keys which are'\ - + '`GlobalElementaryErrorgenLabel` a corresponding `StateSpace` much be specified too.' - raise ValueError(msg) - else: - sslbls = state_space.qubit_labels - lse_dict = {_LSE.cast(lbl, sslbls): rate for lbl, rate in model.items()} - elif isinstance(next(iter(model)), _LocalElementaryErrorgenLabel): - lse_dict = {_LSE.cast(lbl): rate for lbl, rate in model.items()} - else: - lse_dict = model - self.model = lse_dict - else: - self.model = model + model: `OpModel` + This model is used to construct error generators for each layer of a circuit + through which error generators are to be propagated. + """ + self.model = model def eoc_error_channel(self, circuit, include_spam=True, use_bch=False, bch_kwargs=None, mx_basis='pp'): diff --git a/pygsti/modelmembers/operations/embeddedop.py b/pygsti/modelmembers/operations/embeddedop.py index 624b98ab9..90ec4f6d6 100644 --- a/pygsti/modelmembers/operations/embeddedop.py +++ b/pygsti/modelmembers/operations/embeddedop.py @@ -609,33 +609,10 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label """ #*** Note: this function is nearly identical to EmbeddedErrorgen.coefficients() *** coeffs_to_embed = self.embedded_op.errorgen_coefficients(return_basis, logscale_nonham, label_type) - #print(f'{embedded_coeffs=}') if coeffs_to_embed: embedded_labels = self.errorgen_coefficient_labels(label_type=label_type, identity_label=identity_label) embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, coeffs_to_embed.values())} - #first_coeff_lbl = next(iter(coeffs_to_embed)) - #if isinstance(first_coeff_lbl, _GlobalElementaryErrorgenLabel): -# if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - #mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, - # self.target_labels)} - #embedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in coeffs_to_embed.items()} - #elif isinstance(first_coeff_lbl, _LocalElementaryErrorgenLabel): - # embedded_labels = self.errorgen_coefficient_labels() - # #use different embedding scheme for local labels - # base_label = [identity_label for _ in range(self.state_space.num_qudits)] - # embedded_labels = [] - # for lbl in coeff_lbls_to_embed: - # new_bels = [] - # for bel in lbl.basis_element_labels: - # base_label = [identity_label for _ in range(self.state_space.num_qudits)] - # for target, pauli in zip(self.target_labels, bel): - # base_label[target] = pauli - # new_bels.append(''.join(base_label)) - # embedded_labels.append(_LocalElementaryErrorgenLabel(lbl.errorgen_type, tuple(new_bels))) - # embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, coeffs_to_embed.values())} - #else: - # raise ValueError(f'Invalid error generator label type {first_coeff_lbl}') else: embedded_coeffs = dict() @@ -672,11 +649,7 @@ def errorgen_coefficient_labels(self, label_type='global', identity_label='I'): return self._cached_embedded_errorgen_labels_local labels_to_embed = self.embedded_op.errorgen_coefficient_labels(label_type) - #print(f'{embedded_labels=}') - #if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - #print(f'{self.target_labels=}') - #print(f'{self.embedded_op.state_space.sole_tensor_product_block_labels=}') if len(labels_to_embed)>0: if isinstance(labels_to_embed[0], _GlobalElementaryErrorgenLabel): mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index ffedf5e1b..98f6b644d 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -471,7 +471,6 @@ def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto' blk = _LindbladCoefficientBlock(blk_type, basis, bels, param_mode=blk_param_mode) blk.set_elementary_errorgens(relevant_eegs, truncate=truncate) blocks.append(blk) - print(blocks) return cls(blocks, basis, mx_basis, evotype, state_space) def __init__(self, lindblad_coefficient_blocks, elementary_errorgen_basis='auto', mx_basis='pp', @@ -561,12 +560,6 @@ def __init__(self, lindblad_coefficient_blocks, elementary_errorgen_basis='auto' blk.create_lindblad_term_superoperators(self.matrix_basis, sparse_bases, include_1norms=True, flat=True) for blk in lindblad_coefficient_blocks] - #print(f'{lindblad_coefficient_blocks=}') - #print(f'{len(self.lindblad_term_superops_and_1norms)=}') - #print(f'{self.lindblad_term_superops_and_1norms=}') - - #for (Lterm_superops, _) in self.lindblad_term_superops_and_1norms: - # print(Lterm_superops.shape) #combine all of the linblad term superoperators across the blocks to a single concatenated tensor. self.combined_lindblad_term_superops = _np.concatenate([Lterm_superops for (Lterm_superops, _) in self.lindblad_term_superops_and_1norms], axis=0) @@ -1565,7 +1558,6 @@ def minimal_from_elementary_errorgens(cls, errs): parameterization = "CPTPLND" else: parameterization = '+'.join(paramtypes) - print(f'{parameterization=}') return cls.cast(parameterization) @classmethod diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py index 69005faef..87fba3a42 100644 --- a/test/unit/objects/test_errorgenpropagation.py +++ b/test/unit/objects/test_errorgenpropagation.py @@ -87,6 +87,28 @@ def test_propagation_length_zero_one(self): error_propagator.propagate_errorgens(empty_circuit, include_spam=True) error_propagator.propagate_errorgens(empty_circuit, include_spam=False) + def test_errorgen_transform_map(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + errorgen_input_output_map = error_propagator.errorgen_transform_map(self.circuit, include_spam=True) + + assert errorgen_input_output_map[(_LSE('H', (stim.PauliString("+___X"),)), 1)] == (_LSE('H', (stim.PauliString("+__ZY"),)), 1.0) + assert errorgen_input_output_map[(_LSE('S', (stim.PauliString("+X___"),)), 2)] == (_LSE('S', (stim.PauliString("+Z___"),)), 1.0) + assert errorgen_input_output_map[(_LSE('H', (stim.PauliString("+X___"),)), 3)] == (_LSE('H', (stim.PauliString("+Z___"),)), -1.0) + + def test_errorgen_gate_contributors(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + test_1 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['XIII']), self.circuit, 1, include_spam=True) + assert test_1 == [Label(('Gypi2', 0))] + + test_2 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IYII']), self.circuit, 2, include_spam=False) + assert test_2 == [Label(('Gypi2', 1))] + + test_3 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IIIX']), self.circuit, 3, include_spam=True) + assert test_3 == [Label(('Gxpi2', 3))] + + test_4 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IIYX']), self.circuit, 4, include_spam=True) + assert test_4 == [Label(('Gcphase', 2, 3))] + class LocalStimErrorgenLabelTester(BaseCase): def setUp(self): self.local_eel = LocalElementaryErrorgenLabel('C', ['XX', 'YY']) From c3d4f2a5ceaa3d54c5a6027cc18e76e12c5a58e9 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Wed, 12 Feb 2025 23:02:36 -0700 Subject: [PATCH 085/102] python 3.8 compatibility Dictionary merge operator was only added in 3.9, rework for compatibility. --- pygsti/tools/errgenproptools.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index b81892d46..253ae7e20 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -436,7 +436,9 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th #Finally accumulate all of the dictionaries in new_errorgen_layer into a single one, summing overlapping terms. errorgen_labels_by_order = [{key: None for key in order_dict} for order_dict in new_errorgen_layer] - complete_errorgen_labels = reduce(lambda a, b: a|b, errorgen_labels_by_order) + complete_errorgen_labels = errorgen_labels_by_order[0] + for order_dict in errorgen_labels_by_order[1:]: + complete_errorgen_labels.update(order_dict) #initialize a dictionary with requisite keys new_errorgen_layer_dict = {lbl: 0 for lbl in complete_errorgen_labels} From 0498adb616196574a971e7c692ffbd037f216ae6 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Thu, 13 Feb 2025 23:07:56 -0700 Subject: [PATCH 086/102] Approximate pauli expectation value corrections Implementation of approximate corrections to pauli expectation values using similar approach to the corrections to computational basis readout probabilities. Includes a new alpha function for the linear sensitivity of pauli expectations to error generators. In principle this should work for arbitrary order corrections. Also add in some infrastructure for building out unit tests related to this new functionality. --- pygsti/tools/errgenproptools.py | 296 +++++++++++++++++++++++++++++++- 1 file changed, 295 insertions(+), 1 deletion(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 253ae7e20..7065b5c86 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -6460,7 +6460,7 @@ def pairwise_bch_numerical(mat1, mat2, order=1): def _matrix_commutator(mat1, mat2): return mat1@mat2 - mat2@mat1 -#-----------First-Order Approximate Error Generator Probabilities---------------# +#-----------First-Order Approximate Error Generator Probabilities and Expectation Values---------------# def random_support(tableau, return_support=False): """ @@ -6912,6 +6912,155 @@ def alpha_numerical(errorgen, tableau, desired_bitstring): return alpha +def alpha_pauli(errorgen, tableau, pauli): + """ + First-order error generator sensitivity function for pauli expectations. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator label for which to calculate sensitivity. + + tableau : stim.Tableau + Stim Tableau corresponding to the stabilizer state to calculate the sensitivity for. + + pauli : stim.PauliString + Pauli to calculate the sensitivity for. + """ + + sim = stim.TableauSimulator() + sim.set_inverse_tableau(tableau**-1) + + errgen_type = errorgen.errorgen_type + basis_element_labels = errorgen.basis_element_labels + + if not isinstance(basis_element_labels[0], stim.PauliString): + basis_element_labels = tuple([stim.PauliString(lbl) for lbl in basis_element_labels]) + + identity_pauli = stim.PauliString('I'*len(basis_element_labels[0])) + + if errgen_type == 'H': + pauli_bel_0_comm = com(pauli, basis_element_labels[0]) + if pauli_bel_0_comm is not None: + sign = -1j*pauli_bel_0_comm[0] + expectation = sim.peek_observable_expectation(pauli_bel_0_comm[1]) + return sign*expectation + else: + return 0 + elif errgen_type == 'S': + if pauli.commutes(basis_element_labels[0]): + return 0 + else: + expectation = sim.peek_observable_expectation(pauli) + return -2*expectation + elif errgen_type == 'C': + A = basis_element_labels[0] + B = basis_element_labels[1] + com_AP = A.commutes(pauli) + com_BP = B.commutes(pauli) #TODO: can skip computing this in some cases for minor performance boost. + if A.commutes(B): + if com_AP: + return 0 + else: + if com_BP: + return 0 + else: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return -4*expectation + else: #{A,B} = 0 + if com_AP: + if com_BP: + return 0 + else: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return -2*expectation + else: + if com_BP: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return 2*expectation + else: + return 0 + else: #A + A = basis_element_labels[0] + B = basis_element_labels[1] + com_AP = A.commutes(pauli) + com_BP = B.commutes(pauli) #TODO: can skip computing this in some cases for minor performance boost. + if A.commutes(B): + if com_AP: + if com_BP: + return 0 + else: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return 1j*2*expectation + else: + if com_BP: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return -1j*2*expectation + else: + return 0 + else: #{A,B} = 0 + if com_AP: + return 0 + else: + if com_BP: + return 0 + else: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return 1j*4*expectation + +def alpha_pauli_numerical(errorgen, tableau, pauli): + """ + First-order error generator sensitivity function for pauli expectatons. This implementation calculates + this quantity numerically, and as such is primarily intended for used as parting of testing + infrastructure. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator label for which to calculate sensitivity. + + tableau : stim.Tableau + Stim Tableau corresponding to the stabilizer state to calculate the sensitivity for. + + pauli : stim.PauliString + Pauli to calculate the sensitivity for. + """ + + #get the stabilizer state corresponding to the tableau. + stabilizer_state = tableau.to_state_vector(endian='big') + stabilizer_state_dmvec = state_to_dmvec(stabilizer_state) + stabilizer_state_dmvec.reshape((len(stabilizer_state_dmvec),1)) + #also get the superoperator (in the standard basis) corresponding to the elementary error generator + if isinstance(errorgen, _LSE): + local_eel = errorgen.to_local_eel() + elif isinstance(errorgen, _GEEL): + local_eel = _LEEL.cast(errorgen) + else: + local_eel = errorgen + + errgen_type = local_eel.errorgen_type + basis_element_labels = local_eel.basis_element_labels + basis_1q = _BuiltinBasis('PP', 4) + errorgen_superop = create_elementary_errorgen_nqudit(errgen_type, basis_element_labels, basis_1q, normalize=False, sparse=False, + tensorprod_basis=False) + + #finally need the superoperator for the selected pauli. + pauli_unitary = pauli.to_unitary_matrix(endian='big') + #flatten this row-wise + pauli_vec = _np.ravel(pauli_unitary) + pauli_vec.reshape((len(pauli_vec),1)) + + #compute the needed trace inner product. + alpha = _np.real_if_close(pauli_vec.conj().T@errorgen_superop@stabilizer_state_dmvec).item() + + return alpha + def _bitstring_to_int(bitstring) -> int: if isinstance(bitstring, str): # If the input is a string, convert it directly @@ -6992,6 +7141,75 @@ def stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, return correction +#TODO: The implementations for the pauli expectation value correction and probability correction +#are basically identical modulo some additional scale factors and the alpha function used. Should be able to combine +#the implementations into one function. +def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order = 1, truncation_threshold = 1e-14): + """ + Compute the kth-order correction to the expectation value of the specified pauli. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + tableau : stim.Tableau + Stim tableau corresponding to a particular stabilizer state being measured. + + pauli : stim.PauliString + Pauli operator to compute expectation value correction for. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding rates + are below this value. + + Returns + ------- + correction : float + float corresponding to the correction to the expectation value for the + selected pauli operator induced by the error generator (to specified order). + """ + + #do the first order correction separately since it doesn't require composition logic: + #now get the sum over the alphas and the error generator rate products needed. + alpha_errgen_prods = _np.zeros(len(errorgen_dict)) + + for i, (lbl, rate) in enumerate(errorgen_dict.items()): + if abs(rate) > truncation_threshold: + alpha_errgen_prods[i] = alpha_pauli(lbl, tableau, pauli)*rate + correction = _np.sum(alpha_errgen_prods) + if order > 1: + #The order of the approximation determines the combinations of error generators + #which need to be composed. (given by cartesian products of labels in errorgen_dict). + labels_by_order = [list(product(errorgen_dict.keys(), repeat = i+1)) for i in range(1,order)] + #Get a similar structure for the corresponding rates + rates_by_order = [list(product(errorgen_dict.values(), repeat = i+1)) for i in range(1,order)] + for current_order, (current_order_labels, current_order_rates) in enumerate(zip(labels_by_order, rates_by_order), start=2): + current_order_scale = 1/factorial(current_order) + composition_results = [] + for label_tup, rate_tup in zip(current_order_labels, current_order_rates): + composition_results.extend(iterative_error_generator_composition(label_tup, rate_tup)) + #aggregate together any overlapping terms in composition_results + composition_results_dict = dict() + for lbl, rate in composition_results: + if composition_results_dict.get(lbl,None) is None: + composition_results_dict[lbl] = rate + else: + composition_results_dict[lbl] += rate + alpha_errgen_prods = _np.zeros(len(composition_results_dict)) + for i, (lbl, rate) in enumerate(composition_results_dict.items()): + if current_order_scale*abs(rate) > truncation_threshold: + sensitivity = alpha_pauli(lbl, tableau, pauli) + alpha_errgen_prods[i] = _np.real_if_close(sensitivity*rate) + correction += current_order_scale*_np.sum(alpha_errgen_prods) + + return correction + def iterative_error_generator_composition(errorgen_labels, rates): """ @@ -7068,6 +7286,35 @@ def stabilizer_probability(tableau, desired_bitstring): #compute what Gidney calls the tableau fidelity (which in this case gives the probability). return tableau_fidelity(tableau, bitstring_to_tableau(desired_bitstring)) +def stabilizer_pauli_expectation(tableau, pauli): + """ + Calculate the output probability for the specifed output bitstring. + + Parameters + ---------- + tableau : stim.Tableau + Stim tableau for the stabilizer state being measured. + + pauli : stim.PauliString + Pauli operator to compute expectation value for. + + Returns + ------- + expected_value : float + Expectation value of specified pauli + """ + if pauli.sign != 1: + pauli_sign = pauli.sign + unsigned_pauli = pauli/pauli_sign + else: + pauli_sign = 1 + unsigned_pauli = pauli + + sim = stim.TableauSimulator() + sim.set_inverse_tableau(tableau**-1) + expectation = pauli_sign*sim.peek_observable_expectation(unsigned_pauli) + return expectation + def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring, order=1, truncation_threshold=1e-14): """ Calculate the approximate probability of a desired bit string using a first-order approximation. @@ -7115,6 +7362,53 @@ def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring correction = stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, order, truncation_threshold) return ideal_prob + correction +def approximate_stabilizer_pauli_expectation(errorgen_dict, circuit, pauli, order=1, truncation_threshold=1e-14): + """ + Calculate the approximate probability of a desired bit string using a first-order approximation. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `ElementaryErrorgenLabel` and whose values are corresponding + rates. + + circuit : `Circuit` or `stim.Tableau` + A pygsti `Circuit` or a stim.Tableau to compute the output probability for. In either + case this should be a Clifford circuit and convertable to a stim.Tableau. + + pauli : stim.PauliString + Pauli operator to compute expectation value for. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding error generator rates + are below this value. (Used internally in computation of probability corrections) + + Returns + ------- + p : float + Approximate output probability for desired bitstring. + """ + + if isinstance(circuit, _Circuit): + tableau = circuit.convert_to_stim_tableau() + elif isinstance(circuit, stim.Tableau): + tableau = circuit + else: + raise ValueError('`circuit` should either be a pygsti `Circuit` or a stim.Tableau.') + + #recast keys to local stim ones if needed. + first_lbl = next(iter(errorgen_dict)) + if isinstance(first_lbl, (_GEEL, _LEEL)): + errorgen_dict = {_LSE.cast(lbl):val for lbl,val in errorgen_dict.items()} + + ideal_expectation = stabilizer_pauli_expectation(tableau, pauli) + correction = stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order, truncation_threshold) + return ideal_expectation + correction + def approximate_stabilizer_probabilities(errorgen_dict, circuit, order=1, truncation_threshold=1e-14): """ Calculate the approximate probability distribution over all bitstrings using a first-order approximation. From 7d83da03322d9ac1a07885f2840fe153908641aa Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 14 Feb 2025 12:31:03 -0700 Subject: [PATCH 087/102] Local label support for EmbeddedErrorgen Fix the implementation of local label support for EmbeddedErrorgen. This commit ports over missing logic from the parent EmbeddedOp class related to the embedding of labels properly into the higher-dimension space before returning error generator coefficient related information. This code path gets hit by cloud noise models so this should patch propagation support for those models. Additionally cleans up the documentation a bit, and refactors the embedding logic in the EmbeddedOp class to enable reuse by EmbeddedErrorgen with less code duplication. --- pygsti/errorgenpropagation/errorpropagator.py | 97 ++++++++------- .../operations/embeddederrorgen.py | 104 ++++++++++------ pygsti/modelmembers/operations/embeddedop.py | 114 +++++++++--------- 3 files changed, 172 insertions(+), 143 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index d02a5b986..bbf493619 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -239,55 +239,54 @@ def propagate_errorgens_bch(self, circuit, bch_order=1, include_spam=True, trunc return combined_err_layer - def propagate_errorgens_nonmarkovian(self, circuit, include_spam=True): - """ - Propagate all of the error generators for each circuit layer to the end without - any recombinations or averaging. This version also only track the overall modifier/weighting - factor picked up by each of the final error generators over the course of the optimization, - with the actual rates introduced in subsequent stages. - - Parameters - ---------- - circuit : `Circuit` - Circuit to construct a set of post gate error generators for. - - include_spam : bool, optional (default True) - If True then we include in the propagation the error generators associated - with state preparation and measurement. - - Returns - ------- - propagated_errorgen_layers : list of lists of dictionaries - A list of lists of dictionaries, each corresponding to the result of propagating - an error generator layer through to the end of the circuit. - - """ - #start by converting the input circuit into a list of stim Tableaus with the - #first element dropped. - stim_layers = self.construct_stim_layers(circuit, drop_first_layer = not include_spam) - - #We next want to construct a new set of Tableaus corresponding to the cumulative products - #of each of the circuit layers with those that follow. These Tableaus correspond to the - #clifford operations each error generator will be propagated through in order to reach the - #end of the circuit. - propagation_layers = self.construct_propagation_layers(stim_layers) - - #Next we take the input circuit and construct a list of dictionaries, each corresponding - #to the error generators for a particular gate layer. - #TODO: Add proper inferencing for number of qubits: - assert circuit.line_labels is not None and circuit.line_labels != ('*',) - errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam, - include_circuit_time=True, fixed_rate=1) - - #propagate the errorgen_layers through the propagation_layers to get a list - #of end of circuit error generator dictionaries. - propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) - - #in the context of doing propagation for nonmarkovianity we won't be using BCH, so do a partial flattening - #of this data structure. - propagated_errorgen_layers = [errorgen_layers[0] for errorgen_layers in propagated_errorgen_layers] - - return propagated_errorgen_layers +# def propagate_errorgens_nonmarkovian(self, circuit, include_spam=True): +# """ +# Propagate all of the error generators for each circuit layer to the end without +# any recombinations or averaging. This version also only track the overall modifier/weighting +# factor picked up by each of the final error generators over the course of the optimization, +# with the actual rates introduced in subsequent stages. +# +# Parameters +# ---------- +# circuit : `Circuit` +# Circuit to construct a set of post gate error generators for. +# +# include_spam : bool, optional (default True) +# If True then we include in the propagation the error generators associated +# with state preparation and measurement. +# +# Returns +# ------- +# propagated_errorgen_layers : list of lists of dictionaries +# A list of lists of dictionaries, each corresponding to the result of propagating +# an error generator layer through to the end of the circuit. +# +# """ +# #start by converting the input circuit into a list of stim Tableaus with the +# #first element dropped. +# stim_layers = self.construct_stim_layers(circuit, drop_first_layer = not include_spam) +# +# #We next want to construct a new set of Tableaus corresponding to the cumulative products +# #of each of the circuit layers with those that follow. These Tableaus correspond to the +# #clifford operations each error generator will be propagated through in order to reach the +# #end of the circuit. +# propagation_layers = self.construct_propagation_layers(stim_layers) +# +# #Next we take the input circuit and construct a list of dictionaries, each corresponding +# #to the error generators for a particular gate layer. +# #TODO: Add proper inferencing for number of qubits: +# assert circuit.line_labels is not None and circuit.line_labels != ('*',) +# errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam, +# include_circuit_time=True, fixed_rate=1) +# #propagate the errorgen_layers through the propagation_layers to get a list +# #of end of circuit error generator dictionaries. +# propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) +# +# #in the context of doing propagation for nonmarkovianity we won't be using BCH, so do a partial flattening +# #of this data structure. +# propagated_errorgen_layers = [errorgen_layers[0] for errorgen_layers in propagated_errorgen_layers] +# +# return propagated_errorgen_layers def errorgen_transform_map(self, circuit, include_spam=True): diff --git a/pygsti/modelmembers/operations/embeddederrorgen.py b/pygsti/modelmembers/operations/embeddederrorgen.py index 1334ecdf4..68d7e8dc6 100644 --- a/pygsti/modelmembers/operations/embeddederrorgen.py +++ b/pygsti/modelmembers/operations/embeddederrorgen.py @@ -10,7 +10,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** -import collections as _collections from pygsti.baseobjs.basis import Basis as _Basis import warnings as _warnings @@ -93,7 +92,10 @@ def from_vector(self, v, close=False, dirty_value=True): _EmbeddedOp.from_vector(self, v, close, dirty_value) self.dirty = dirty_value - def coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): + #TODO: I don't think the return_basis flag actually works atm. Maybe remove? + #TODO: Refactor naming to match EmbeddedOp. Only reason we can't just directly use the + #method from the parent class is naming convention mismatches for methods on children. + def coefficients(self, return_basis=False, logscale_nonham=False, label_type='global', identity_label='I'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this operation. @@ -122,24 +124,33 @@ def coefficients(self, return_basis=False, logscale_nonham=False, label_type='gl 'global' for `GlobalElementaryErrorgenLabel` and 'local' for `LocalElementaryErrorgenLabel`. + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. + Returns ------- - Ltermdict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are integers starting at 0. Values are complex - coefficients. - basis : Basis - A Basis mapping the basis labels used in the - keys of `Ltermdict` to basis matrices. + embedded_coeffs : dict + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ - return self.embedded_op.coefficients(return_basis, logscale_nonham, label_type) + coeffs_to_embed = self.embedded_op.coefficients(return_basis, logscale_nonham, label_type) - def coefficient_labels(self, label_type='global'): + if coeffs_to_embed: + embedded_labels = self.coefficient_labels(label_type=label_type, identity_label=identity_label) + embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, coeffs_to_embed.values())} + else: + embedded_coeffs = dict() + + return embedded_coeffs + + def coefficient_labels(self, label_type='global', identity_label='I'): """ The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. @@ -151,13 +162,29 @@ def coefficient_labels(self, label_type='global'): 'global' for `GlobalElementaryErrorgenLabel` and 'local' for `LocalElementaryErrorgenLabel`. + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. + Returns ------- tuple A tuple of (, [,)` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are real error rates except for the 2-basis-label - case. + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ - return self.coefficients(return_basis=False, logscale_nonham=True, label_type=label_type) + return self.coefficients(return_basis=False, logscale_nonham=True, label_type=label_type, identity_label=identity_label) def set_coefficients(self, lindblad_term_dict, action="update", logscale_nonham=False, truncate=True): """ @@ -245,14 +276,11 @@ def set_coefficients(self, lindblad_term_dict, action="update", logscale_nonham= Parameters ---------- lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are the coefficients of these error generators, - and should be real except for the 2-basis-label case. + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. Values are corresponding rates. action : {"update","add","reset"} How the values in `lindblad_term_dict` should be combined with existing @@ -277,7 +305,9 @@ def set_coefficients(self, lindblad_term_dict, action="update", logscale_nonham= ------- None """ - self.embedded_op.set_coefficients(lindblad_term_dict, action, logscale_nonham, truncate) + if lindblad_term_dict: + unembedded_coeffs = self._unembed_coeff_dict_labels(lindblad_term_dict) + self.embedded_op.set_coefficients(unembedded_coeffs, action, logscale_nonham, truncate) def set_error_rates(self, lindblad_term_dict, action="update"): """ @@ -375,6 +405,6 @@ def __str__(self): """ Return string representation """ s = "Embedded error generator with full dimension %d and state space %s\n" % (self.dim, self.state_space) s += " that embeds the following %d-dimensional operation into acting on the %s space\n" \ - % (self.embedded_op.dim, str(self.targetLabels)) + % (self.embedded_op.dim, str(self.target_labels)) s += str(self.embedded_op) return s diff --git a/pygsti/modelmembers/operations/embeddedop.py b/pygsti/modelmembers/operations/embeddedop.py index 90ec4f6d6..2e0bce671 100644 --- a/pygsti/modelmembers/operations/embeddedop.py +++ b/pygsti/modelmembers/operations/embeddedop.py @@ -10,7 +10,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** -import collections as _collections import itertools as _itertools import numpy as _np @@ -556,6 +555,7 @@ def transform_inplace(self, s): # s and Sinv matrices... but haven't needed it yet. raise NotImplementedError("Cannot transform an EmbeddedOp yet...") + #TODO: I don't think the return_basis flag actually works atm. Maybe remove? def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label_type='global', identity_label='I'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this operation. @@ -594,18 +594,12 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label Returns ------- - lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are integers starting at 0. Values are complex - coefficients. - basis : Basis - A Basis mapping the basis labels used in the - keys of `lindblad_term_dict` to basis matrices. + embedded_coeffs : dict + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ #*** Note: this function is nearly identical to EmbeddedErrorgen.coefficients() *** coeffs_to_embed = self.embedded_op.errorgen_coefficients(return_basis, logscale_nonham, label_type) @@ -649,7 +643,12 @@ def errorgen_coefficient_labels(self, label_type='global', identity_label='I'): return self._cached_embedded_errorgen_labels_local labels_to_embed = self.embedded_op.errorgen_coefficient_labels(label_type) + embedded_labels = self._embed_labels(labels_to_embed, label_type, identity_label) + + return embedded_labels + def _embed_labels(self, labels_to_embed, label_type='global', identity_label='I'): + """Helper function encapsulating error generator coefficient emedding logic""" if len(labels_to_embed)>0: if isinstance(labels_to_embed[0], _GlobalElementaryErrorgenLabel): mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, @@ -675,7 +674,6 @@ def errorgen_coefficient_labels(self, label_type='global', identity_label='I'): raise ValueError(f'Invalid error generator label type {labels_to_embed[0]}') else: embedded_labels = tuple() - return embedded_labels def errorgen_coefficients_array(self): @@ -748,14 +746,11 @@ def error_rates(self, label_type='global', identity_label='I'): Returns ------- lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are real error rates except for the 2-basis-label - case. + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ return self.errorgen_coefficients(return_basis=False, logscale_nonham=True, label_type=label_type, identity_label=identity_label) @@ -769,14 +764,11 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal Parameters ---------- lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are the coefficients of these error generators, - and should be real except for the 2-basis-label case. + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. Values are corresponding rates. action : {"update","add","reset"} How the values in `lindblad_term_dict` should be combined with existing @@ -803,37 +795,45 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal """ #determine is we need to unembed the error generator labels in lindblad_term_dict. if lindblad_term_dict: - first_coeff_lbl = next(iter(lindblad_term_dict)) - if isinstance(first_coeff_lbl, _GlobalElementaryErrorgenLabel): - if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - mapdict = {tgt: loc for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, - self.target_labels)} - unembedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in lindblad_term_dict.items()} - else: - unembedded_coeffs = lindblad_term_dict - elif isinstance(first_coeff_lbl, _LocalElementaryErrorgenLabel): - #if the length of the basis element labels are the same as the length of this - #embedded op's target labels then assume those are associated. - if len(first_coeff_lbl.basis_element_labels[0]) == len(self.target_labels): - unembedded_coeffs = lindblad_term_dict - #if the length is equal to the number of qudits then we need to unembed. - elif len(first_coeff_lbl.basis_element_labels[0]) == self.state_space.num_qudits: - unembedded_labels = list(lindblad_term_dict.keys()) - for lbl in unembedded_labels: - new_bels = [] - for bel in lbl.basis_element_labels: - new_bels.append("".join(bel[target] for target in self.target_labels)) - lbl.basis_element_labels = tuple(new_bels) - unembedded_coeffs = {lbl:val for lbl, val in zip(unembedded_labels, lindblad_term_dict.values())} - else: - msg = "Could not parse error generator labels. Expected either length equal to this embedded op's"\ - +" target_labels or equal to the number of qudits." - raise ValueError(msg) - + unembedded_coeffs = self._unembed_coeff_dict_labels(lindblad_term_dict) self.embedded_op.set_errorgen_coefficients(unembedded_coeffs, action, logscale_nonham, truncate) if self._rep_type == 'dense': self._update_denserep() self.dirty = True + def _unembed_coeff_dict_labels(self, lindblad_term_dict): + """ + Helper function encapsulating unembedding logic for error generator labels. + Returns a new dictionary of error generator coefficient rate with unembedded labels. + """ + first_coeff_lbl = next(iter(lindblad_term_dict)) + if isinstance(first_coeff_lbl, _GlobalElementaryErrorgenLabel): + if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: + mapdict = {tgt: loc for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, + self.target_labels)} + unembedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in lindblad_term_dict.items()} + else: + unembedded_coeffs = lindblad_term_dict + elif isinstance(first_coeff_lbl, _LocalElementaryErrorgenLabel): + #if the length of the basis element labels are the same as the length of this + #embedded op's target labels then assume those are associated. + if len(first_coeff_lbl.basis_element_labels[0]) == len(self.target_labels): + unembedded_coeffs = lindblad_term_dict + #if the length is equal to the number of qudits then we need to unembed. + elif len(first_coeff_lbl.basis_element_labels[0]) == self.state_space.num_qudits: + unembedded_labels = list(lindblad_term_dict.keys()) + for lbl in unembedded_labels: + new_bels = [] + for bel in lbl.basis_element_labels: + new_bels.append("".join(bel[target] for target in self.target_labels)) + lbl.basis_element_labels = tuple(new_bels) + unembedded_coeffs = {lbl:val for lbl, val in zip(unembedded_labels, lindblad_term_dict.values())} + else: + msg = "Could not parse error generator labels. Expected either length equal to this embedded op's"\ + +" target_labels or equal to the number of qudits." + raise ValueError(msg) + return unembedded_coeffs + + def set_error_rates(self, lindblad_term_dict, action="update"): """ Sets the coeffcients of terms in the error generator of this operation. From c2e400b1dfe0eec2897f2464341af397cb7c8dab Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 14 Feb 2025 14:19:32 -0700 Subject: [PATCH 088/102] Minor tweak to alpha_pauli Cast the output to real values. --- pygsti/tools/basistools.py | 2 +- pygsti/tools/errgenproptools.py | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/pygsti/tools/basistools.py b/pygsti/tools/basistools.py index 3b7793717..b9878e5f3 100644 --- a/pygsti/tools/basistools.py +++ b/pygsti/tools/basistools.py @@ -125,7 +125,7 @@ def change_basis(mx, from_basis, to_basis, expect_real=True): Parameters ---------- mx : numpy array - The operation matrix (a 2D square array) in the `from_basis` basis. + The operation matrix (a 2D square array or 1D vector) in the `from_basis` basis. from_basis: {'std', 'gm', 'pp', 'qt'} or Basis object The source basis. Allowed values are Matrix-unit (std), Gell-Mann (gm), diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 7065b5c86..7b58d8798 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -6944,7 +6944,7 @@ def alpha_pauli(errorgen, tableau, pauli): if pauli_bel_0_comm is not None: sign = -1j*pauli_bel_0_comm[0] expectation = sim.peek_observable_expectation(pauli_bel_0_comm[1]) - return sign*expectation + return _np.real_if_close(sign*expectation) else: return 0 elif errgen_type == 'S': @@ -6952,7 +6952,7 @@ def alpha_pauli(errorgen, tableau, pauli): return 0 else: expectation = sim.peek_observable_expectation(pauli) - return -2*expectation + return _np.real_if_close(-2*expectation) elif errgen_type == 'C': A = basis_element_labels[0] B = basis_element_labels[1] @@ -6967,7 +6967,7 @@ def alpha_pauli(errorgen, tableau, pauli): else: ABP = pauli_product(A*B, pauli) expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) - return -4*expectation + return _np.real_if_close(-4*expectation) else: #{A,B} = 0 if com_AP: if com_BP: @@ -6975,12 +6975,12 @@ def alpha_pauli(errorgen, tableau, pauli): else: ABP = pauli_product(A*B, pauli) expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) - return -2*expectation + return _np.real_if_close(-2*expectation) else: if com_BP: ABP = pauli_product(A*B, pauli) expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) - return 2*expectation + return _np.real_if_close(2*expectation) else: return 0 else: #A @@ -6995,12 +6995,12 @@ def alpha_pauli(errorgen, tableau, pauli): else: ABP = pauli_product(A*B, pauli) expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) - return 1j*2*expectation + return _np.real_if_close(1j*2*expectation) else: if com_BP: ABP = pauli_product(A*B, pauli) expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) - return -1j*2*expectation + return _np.real_if_close(-1j*2*expectation) else: return 0 else: #{A,B} = 0 @@ -7012,7 +7012,7 @@ def alpha_pauli(errorgen, tableau, pauli): else: ABP = pauli_product(A*B, pauli) expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) - return 1j*4*expectation + return _np.real_if_close(1j*4*expectation) def alpha_pauli_numerical(errorgen, tableau, pauli): """ @@ -7181,6 +7181,8 @@ def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order for i, (lbl, rate) in enumerate(errorgen_dict.items()): if abs(rate) > truncation_threshold: + #print(f'{alpha_pauli(lbl, tableau, pauli)=}') + #print(f'{alpha_pauli_numerical(lbl, tableau, pauli)=}') alpha_errgen_prods[i] = alpha_pauli(lbl, tableau, pauli)*rate correction = _np.sum(alpha_errgen_prods) if order > 1: From 455103b455c648e43f642f7099732801ab6c9926 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Fri, 14 Feb 2025 20:26:32 -0700 Subject: [PATCH 089/102] A bunch more unit tests Add a bunch more unit tests covering the following: - Iterative error generator composition (more comprehensive testing against numerics). - approximate pauli expectation corrections - Explicit and cloud crosstalk models for propagation Also rework the iterative error generator composition a bit, including checks for short circuitablility (when a composition that gives zero is detected). --- pygsti/tools/errgenproptools.py | 95 ++++++++--- test/unit/objects/test_errorgenpropagation.py | 96 +++++++++++- test/unit/tools/test_errgenproptools.py | 147 +++++++++++++++++- 3 files changed, 312 insertions(+), 26 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 7b58d8798..432959725 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -7238,32 +7238,89 @@ def iterative_error_generator_composition(errorgen_labels, rates): if len(errorgen_labels) == 1: return [(errorgen_labels[0], rates[0])] else: - labels_to_process = [errorgen_labels] - rates_to_process = [rates] + label_tuples_to_process = [errorgen_labels] + rate_tuples_to_process = [rates] - fully_processed_labels = [] - - while labels_to_process: - new_labels_to_process = [] - new_rates_to_process = [] - #loop through the labels to process - for label_tup, rate_tup in zip(labels_to_process, rates_to_process): + fully_processed_label_rate_tuples = [] + while label_tuples_to_process: + new_label_tuples_to_process = [] + new_rate_tuples_to_process = [] + + for label_tup, rate_tup in zip(label_tuples_to_process, rate_tuples_to_process): #grab the last two elements of each of these and do the composition. new_labels_and_rates = error_generator_composition(label_tup[-2], label_tup[-1], rate_tup[-2]*rate_tup[-1]) - for new_label_rate_tup in new_labels_and_rates: - new_label_tup = label_tup[:-2] + (new_label_rate_tup[0],) - new_rate_tup = rate_tup[:-2] + (new_label_rate_tup[1],) - if len(new_label_tup) == 1: - fully_processed_labels.append(new_label_rate_tup) + #if the new labels and rates sum to zero overall then we can kill this branch of the tree. + aggregated_labels_and_rates_dict = dict() + for lbl, rate in new_labels_and_rates: + if aggregated_labels_and_rates_dict.get(lbl, None) is None: + aggregated_labels_and_rates_dict[lbl] = rate else: - new_labels_to_process.append(new_label_tup) - new_rates_to_process.append(new_rate_tup) + aggregated_labels_and_rates_dict[lbl] += rate + if all([abs(val)<1e-15 for val in aggregated_labels_and_rates_dict.values()]): + continue + + label_tup_remainder = label_tup[:-2] + rate_tup_remainder = rate_tup[:-2] + if label_tup_remainder: + for new_label, new_rate in aggregated_labels_and_rates_dict.items(): + new_label_tup = label_tup_remainder + (new_label,) + new_rate_tup = rate_tup_remainder + (new_rate,) + new_label_tuples_to_process.append(new_label_tup) + new_rate_tuples_to_process.append(new_rate_tup) + else: + for new_label_rate_tup in aggregated_labels_and_rates_dict.items(): + fully_processed_label_rate_tuples.append(new_label_rate_tup) + label_tuples_to_process = new_label_tuples_to_process + rate_tuples_to_process = new_rate_tuples_to_process + + return fully_processed_label_rate_tuples + +def iterative_error_generator_composition_numeric(errorgen_labels, rates, errorgen_matrix_dict=None, num_qubits=None): + """ + Iteratively compute error generator compositions. The function computes a dense representation of this composition + numerically and is primarily intended as part of testing infrastructure. + + Parameters + ---------- + errorgen_labels : tuple of `LocalStimErrorgenLabel` + A tuple of the elementary error generator labels to be composed. + + rates : tuple of float + A tuple of corresponding error generator rates of the same length as the tuple + of error generator labels. - labels_to_process = new_labels_to_process - rates_to_process = new_rates_to_process + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + num_qubits : int, optional (default None) + Number of qubits for the error generator commutator being computed. Only required if `errorgen_matrix_dict` is None. + + Returns + ------- + numpy.ndarray + Dense numpy array representation of the super operator corresponding to the iterated composition written in + the standard basis. + """ - return fully_processed_labels + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + composition = errorgen_matrix_dict[errorgen_labels[0]] + for lbl in errorgen_labels[1:]: + composition = composition@errorgen_matrix_dict[lbl] + composition *= _np.prod(rates) + return composition + + def stabilizer_probability(tableau, desired_bitstring): """ diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py index 87fba3a42..73a889719 100644 --- a/test/unit/objects/test_errorgenpropagation.py +++ b/test/unit/objects/test_errorgenpropagation.py @@ -1,15 +1,17 @@ from ..util import BaseCase from pygsti.circuits import Circuit -from pygsti.algorithms.randomcircuit import create_random_circuit +from pygsti.algorithms.randomcircuit import create_random_circuit, find_all_sets_of_compatible_two_q_gates from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator from pygsti.processors import QubitProcessorSpec -from pygsti.models.modelconstruction import create_crosstalk_free_model -from pygsti.baseobjs import Label, BuiltinBasis, QubitSpace, CompleteElementaryErrorgenBasis +from pygsti.models.modelconstruction import create_crosstalk_free_model, create_cloud_crosstalk_model +from pygsti.baseobjs import Label, BuiltinBasis, QubitSpace, CompleteElementaryErrorgenBasis, QubitGraph from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel from pygsti.tools import errgenproptools as _eprop from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.tools.matrixtools import print_mx from itertools import product +from math import floor +from pygsti.modelpacks import smq2Q_XYCPHASE import numpy as np import stim @@ -109,6 +111,37 @@ def test_errorgen_gate_contributors(self): test_4 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IIYX']), self.circuit, 4, include_spam=True) assert test_4 == [Label(('Gcphase', 2, 3))] + def test_explicit_model(self): + + target_model = smq2Q_XYCPHASE.target_model('full TP') + noisy_model = target_model.copy() + noisy_model = noisy_model.rotate(max_rotate = .01) + noisy_model.set_all_parameterizations('GLND') + errorgen_propagator = ErrorGeneratorPropagator(noisy_model) + circuit_2Q = list(smq2Q_XYCPHASE.create_gst_experiment_design(4).all_circuits_needing_data)[-1] + + #make sure that the various methods don't die. + propagated_errorgens = errorgen_propagator.propagate_errorgens(circuit_2Q) + gate_contributors = errorgen_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['XI']), circuit_2Q, 1, include_spam=True) + + def test_cloud_crosstalk_model(self): + oq=['Gxpi2','Gypi2','Gzpi2'] + qbts=4 + gate_names=oq+['Gcphase'] + max_strengths = {1: {'S': 10**(-3), 'H': 10**(-2)}, + 2: {'S': (1/6)*10**(-2), 'H': 2*10**(-3)} + } + + #Build circuit models + qubit_labels =range(qbts) + gate_names = ['Gxpi2','Gzpi2','Gcphase','Gypi2'] + ps = QubitProcessorSpec(qbts, gate_names,availability= {'Gcphase':[(i,(i+1)%qbts) for i in range(qbts)]} , qubit_labels=qubit_labels) + lindblad_error_coeffs=sample_error_rates_cloud_crosstalk(max_strengths,4,gate_names) + mdl_cloudnoise = create_cloud_crosstalk_model(ps, lindblad_error_coeffs=lindblad_error_coeffs, errcomp_type="errorgens") + errorgen_prop=ErrorGeneratorPropagator(mdl_cloudnoise) + propagated_errorgens = errorgen_prop.propagate_errorgens(self.circuit) + gate_contributors = errorgen_prop.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IZZI']), self.circuit, 1, include_spam=True) + class LocalStimErrorgenLabelTester(BaseCase): def setUp(self): self.local_eel = LocalElementaryErrorgenLabel('C', ['XX', 'YY']) @@ -257,4 +290,61 @@ def error_generator_commutator_numerical(errorgen_1, errorgen_2, errorgen_matrix return errorgen_matrix_dict[errorgen_1]@errorgen_matrix_dict[errorgen_2] - errorgen_matrix_dict[errorgen_2]@errorgen_matrix_dict[errorgen_1] +#--------- Cloud crosstalk helper functions---------------------# +def sample_error_rates_cloud_crosstalk(strengths,qbts, gates): + error_rates_dict = {} + for gate in gates: + if not gate =='Gcphase': + for el in range(qbts): + stochastic_strength = strengths[1]['S']*np.random.random() + hamiltonian_strength = 2*strengths[1]['H']*np.random.random()-strengths[1]['H'] + paulis=['X','Y','Z'] + error_rates_dict[(gate,el)]=dict() + for pauli_label in paulis: + if (gate=='Gxpi2' and pauli_label=='X') or (gate=='Gypi2' and pauli_label=='Y') or (gate=='Gzpi2' and pauli_label=='Z'): + error_rates_dict[(gate,el)].update({('H', pauli_label+':'+str(el)):hamiltonian_strength}) + error_rates_dict[(gate,el)].update({('S', pauli_label+':'+str(el)): stochastic_strength}) + else: + error_rates_dict[(gate,el)].update({('H', pauli_label+':'+str(el)):0.0}) + error_rates_dict[(gate,el)].update({('S', pauli_label+':'+str(el)): 0.0}) + else: + for qbt in range(qbts): + + gate_lbl=('Gcphase',qbt,(qbt+1)%4) + error_rates_dict[gate_lbl]=dict() + for qbt1 in range(qbts): + for qbt2 in range(qbts): + if qbt1 < qbt2: + hamiltonian_strength = 2*strengths[2]['H']*np.random.random()-strengths[2]['H'] + for pauli in two_qbt_pauli_str(): + if pauli =='ZZ': + error_rates_dict[gate_lbl].update({('H',pauli+':'+str(qbt1)+','+str(qbt2)):hamiltonian_strength}) + else: + error_rates_dict[gate_lbl].update({('H',pauli+':'+str(qbt1)+','+str(qbt2)):0.0}) + + for qbt1 in range(qbts): + hamiltonian_strength = 2*strengths[2]['H']*np.random.random()-strengths[2]['H'] + for pauli in ['X','Y','Z']: + if pauli=='Z': + error_rates_dict[gate_lbl].update({('H',pauli+':'+str(qbt1)):hamiltonian_strength}) + else: + error_rates_dict[gate_lbl].update({('H',pauli+':'+str(qbt1)):0.0}) + + + stochastic_strength = strengths[2]['S']*np.random.random() + error_rates_dict[gate_lbl].update({('S', 'ZZ:'+str(gate_lbl[1])+','+str(gate_lbl[2])): stochastic_strength}) + stochastic_strength = strengths[2]['S']*np.random.random() + error_rates_dict[gate_lbl].update({('S', 'Z:'+str(gate_lbl[1])): stochastic_strength}) + stochastic_strength = strengths[2]['S']*np.random.random() + error_rates_dict[gate_lbl].update({('S', 'Z:'+str(gate_lbl[2])): stochastic_strength}) + + return error_rates_dict +def two_qbt_pauli_str(): + paulis=['I','X','Y','Z'] + pauli_strs=[] + for p1 in paulis: + for p2 in paulis: + pauli_strs.append(p1+p2) + pauli_strs.remove('II') + return pauli_strs diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py index 6de3ad418..fb61c4e4f 100644 --- a/test/unit/tools/test_errgenproptools.py +++ b/test/unit/tools/test_errgenproptools.py @@ -8,6 +8,7 @@ from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE from pygsti.tools import errgenproptools as _eprop from pygsti.tools.matrixtools import print_mx +from pygsti.tools.basistools import change_basis from ..util import BaseCase from itertools import product import random @@ -162,9 +163,8 @@ def test_iterative_error_generator_composition(self): (_LSE('S', [stim.PauliString('YY')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')]))] rates = [(1,1,1), (1,1,1), (1,1,1)] - correct_iterative_compositions = [[(_LSE('H', (stim.PauliString("+X"),)), (-2-0j)), (_LSE('H', (stim.PauliString("+X"),)), -2)], - [(_LSE('H', (stim.PauliString("+X_"),)), (-1+0j)), (_LSE('A', (stim.PauliString("+_X"), stim.PauliString("+XX"))), (1+0j)), - (_LSE('A', (stim.PauliString("+_X"), stim.PauliString("+XX"))), (1+0j)), (_LSE('H', (stim.PauliString("+X_"),)), (-1+0j))], + correct_iterative_compositions = [[(_LSE('H', (stim.PauliString("+X"),)), (-4-0j))], + [(_LSE('H', (stim.PauliString("+X_"),)), (-2+0j)), (_LSE('A', (stim.PauliString("+_X"), stim.PauliString("+XX"))), (2+0j))], [(_LSE('C', (stim.PauliString("+YZ"), stim.PauliString("+ZY"))), (1+0j)), (_LSE('C', (stim.PauliString("+YY"), stim.PauliString("+ZZ"))), (1+0j)), (_LSE('C', (stim.PauliString("+_X"), stim.PauliString("+X_"))), -1)] ] @@ -173,6 +173,9 @@ def test_iterative_error_generator_composition(self): iterated_composition = _eprop.iterative_error_generator_composition(lbls, rates) self.assertEqual(iterated_composition, correct_lbls) + _compare_analytic_numeric_iterative_composition(2) + + def test_bch_approximation(self): first_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=1) propagated_errorgen_layers_bch_order_1 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=1) @@ -324,6 +327,30 @@ def test_alpha(self): alpha_num = _eprop.alpha_numerical(lbl, self.circuit_tableau_3Q, bit_string) assert abs(alpha_num - _eprop.alpha(lbl, self.circuit_tableau_3Q, bit_string)) <1e-4 + def test_alpha_pauli(self): + from pygsti.modelpacks import smq2Q_XYCPHASE + pspec_2Q = smq2Q_XYCPHASE.processor_spec() + random_circuits_2Q = [create_random_circuit(pspec_2Q, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345+i) for i in range(5)] + random_circuit_tableaus_2Q = [ckt.convert_to_stim_tableau() for ckt in random_circuits_2Q] + def _compare_alpha_pauli_analytic_numeric(num_qubits, tableau): + #loop through all error generators and all paulis + errorgen_basis = CompleteElementaryErrorgenBasis('PP', QubitSpace(num_qubits), default_label_type='local') + errorgen_labels = [_LSE.cast(lbl) for lbl in errorgen_basis.labels] + pauli_list = list(stim.PauliString.iter_all(num_qubits)) + for lbl in errorgen_labels: + for pauli in pauli_list: + alpha_analytic = _eprop.alpha_pauli(lbl, tableau, pauli) + alpha_numerical = _eprop.alpha_pauli_numerical(lbl, tableau, pauli) + + if abs(alpha_analytic - alpha_numerical)>1e-5: + print(f'{alpha_analytic=}') + print(f'{alpha_numerical=}') + print(f'error generator label: {lbl}') + print(f'pauli: {pauli}') + raise ValueError('Analytic and numerically computed alpha pauli values differ by more than 1e-5') + for ckt_tableau in random_circuit_tableaus_2Q: + _compare_alpha_pauli_analytic_numeric(2, ckt_tableau) + def test_stabilizer_probability_correction(self): #The corrections testing here will just be integration testing, we'll #check for correctness with the probability functions instead. @@ -333,6 +360,15 @@ def test_stabilizer_probability_correction(self): for order in orders: _eprop.stabilizer_probability_correction(self.propagated_errorgen_layer, self.circuit_tableau, bitstring, order) + def test_stabilizer_pauli_expectation_correction(self): + #The corrections testing here will just be integration testing, we'll + #check for correctness with the full expecation functions instead. + paulis = [stim.PauliString('XXXX'), stim.PauliString('ZIII')] + orders = [1,2,3] + for pauli in paulis: + for order in orders: + _eprop.stabilizer_pauli_expectation_correction(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order) + def test_approximate_stabilizer_probability(self): exact_prop_probs = probabilities_errorgen_prop(self.error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=1) @@ -371,6 +407,30 @@ def test_approximate_stabilizer_probabilities(self): assert tvd_order_1 > tvd_order_2 + def test_approximate_stabilizer_pauli_expectation(self): + rng = np.random.default_rng(seed=12345) + paulis_4Q = list(stim.PauliString.iter_all(4)) + #random_4Q_pauli_indices = rng.choice(len(paulis_4Q), 10, replace=False) + #random_4Q_paulis = [paulis_4Q[idx] for idx in random_4Q_pauli_indices] + + for pauli in paulis_4Q:#random_4Q_paulis: + exact_pauli_expectation = pauli_expectation_errorgen_prop(self.error_propagator, self.target_model, + self.circuit, pauli, use_bch=True, bch_order=1) + first_order_diff = exact_pauli_expectation - _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=1) + second_order_diff = exact_pauli_expectation - _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=2) + third_order_diff = exact_pauli_expectation - _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=3) + + if abs(first_order_diff) < abs(second_order_diff): + print(f'{first_order_diff=}') + print(f'{second_order_diff=}') + print(f'{pauli=}') + raise ValueError('Going to higher order made the expectation value worse!') + if abs(second_order_diff) < abs(third_order_diff): + print(f'{second_order_diff=}') + print(f'{third_order_diff=}') + print(f'{pauli=}') + raise ValueError('Going to higher order made the expectation value worse!') + def test_error_generator_taylor_expansion(self): #this is just an integration test atm. _eprop.error_generator_taylor_expansion(self.propagated_errorgen_layer, order=2) @@ -486,4 +546,83 @@ def probabilities_errorgen_prop(error_propagator, target_model, circuit, use_bch dense_effect = effect.to_dense().copy() dense_prep = ideal_prep.to_dense().copy() prob_vec[i] = np.linalg.multi_dot([dense_effect.reshape((1,len(dense_effect))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]) - return prob_vec \ No newline at end of file + return prob_vec + +def pauli_expectation_errorgen_prop(error_propagator, target_model, circuit, pauli, use_bch=False, bch_order=1, truncation_threshold=1e-14): + #get the eoc error channel, and the process matrix for the ideal circuit: + if use_bch: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True, use_bch=use_bch, + bch_kwargs={'bch_order':bch_order, + 'truncation_threshold':truncation_threshold}) + else: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True) + ideal_channel = target_model.sim.product(circuit) + #also get the ideal state prep and povm: + ideal_prep = target_model.circuit_layer_operator(Label('rho0'), typ='prep').copy() + + #finally need the superoperator for the selected pauli. + pauli_unitary = pauli.to_unitary_matrix(endian='big') + #flatten this row-wise + pauli_vec = np.ravel(pauli_unitary) + pauli_vec.reshape((len(pauli_vec),1)) + #put this in pp basis (since these are paulis themselves I could just read this off directly). + pauli_vec = change_basis(pauli_vec, 'std', 'pp') + #print(pauli_vec) + dense_prep = ideal_prep.to_dense().copy() + expectation = np.linalg.multi_dot([pauli_vec.reshape((1,len(pauli_vec))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]).item() + return expectation + +#helper function for iterative composition testing +def _compare_analytic_numeric_iterative_composition(num_qubits): + #create an error generator basis. + complete_errorgen_basis = CompleteElementaryErrorgenBasis('PP', QubitSpace(num_qubits), default_label_type='local') + complete_errorgen_lbls = complete_errorgen_basis.labels + complete_errorgen_lbl_matrix_dict = {lbl: mat for lbl, mat in zip(complete_errorgen_lbls, complete_errorgen_basis.elemgen_matrices)} + + #loop through all triples. + errorgen_label_triples = list(product(complete_errorgen_lbls,repeat=3)) + + #select a random subset of these + rng = np.random.default_rng(seed=1234) + random_indices = rng.choice(len(errorgen_label_triples), 10000) + random_triples = [errorgen_label_triples[idx] for idx in random_indices] + + #create local stim error gen label versions: + random_triples_stim = [(_LSE.cast(a), _LSE.cast(b), _LSE.cast(c)) for a,b,c in random_triples] + + #for each triple compute the composition directly and compute it analytically (then converting it to + #a numeric array) and see how they compare. + for i, (triple_1, triple_2) in enumerate(zip(random_triples, random_triples_stim)): + numeric_composition = _eprop.iterative_error_generator_composition_numeric(triple_1, (1,1,1), complete_errorgen_lbl_matrix_dict) + analytic_composition = _eprop.iterative_error_generator_composition(triple_2, (1,1,1)) + analytic_composition_dict = dict() + for lbl, rate in analytic_composition: + local_lbl = lbl.to_local_eel() + if analytic_composition_dict.get(local_lbl, None) is None: + analytic_composition_dict[local_lbl] = rate + else: + analytic_composition_dict[local_lbl] += rate + analytic_composition = analytic_composition_dict + try: + analytic_composition_mat = _eprop.errorgen_layer_to_matrix(analytic_composition, num_qubits, errorgen_matrix_dict = complete_errorgen_lbl_matrix_dict) + except KeyError: + print(f'{analytic_composition=}') + norm_diff = np.linalg.norm(numeric_composition-analytic_composition_mat) + if norm_diff > 1e-10: + print(f'Difference in compositions for triple {triple_1} is greater than 1e-10.') + print(f'{triple_2=}') + print(f'Error encountered on iteration {i}') + print(f'{np.linalg.norm(numeric_composition-analytic_composition_mat)=}') + print('numeric_composition=') + print_mx(numeric_composition) + + #Decompose the numerical composition into rates. + for lbl, dual in zip(complete_errorgen_basis.labels, complete_errorgen_basis.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_composition) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_composition=}') + print('analytic_composition_mat=') + print_mx(analytic_composition_mat) + raise ValueError('Numeric and analytic error generator compositions were not found to be identical!') From 946a374be982d376961c3aa4a2cd3949d5c60e6e Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 15 Feb 2025 17:11:10 -0700 Subject: [PATCH 090/102] Still more testing additions Add comparisons to numerics for the pauli expectation values. --- pygsti/errorgenpropagation/errorpropagator.py | 6 +- pygsti/tools/errgenproptools.py | 332 ++++++++++++------ test/unit/tools/test_errgenproptools.py | 39 +- 3 files changed, 255 insertions(+), 122 deletions(-) diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index bbf493619..54fc431cf 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -73,7 +73,7 @@ def eoc_error_channel(self, circuit, include_spam=True, use_bch=False, #should return a single dictionary of error generator rates propagated_error_generator = self.propagate_errorgens_bch(circuit, **bch_kwargs) #convert this to a process matrix - return _spl.expm(self.errorgen_layer_dict_to_errorgen(propagated_error_generator, mx_basis='pp', return_dense=True)) + return _spl.expm(self.errorgen_layer_dict_to_errorgen(propagated_error_generator, mx_basis='pp')) else: propagated_error_generators = self.propagate_errorgens(circuit, include_spam) @@ -84,7 +84,7 @@ def eoc_error_channel(self, circuit, include_spam=True, use_bch=False, if err_gen_layer: #if not empty. #Keep the error generator in the standard basis until after the end-of-circuit #channel is constructed so we can reduce the overhead of changing basis. - exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer, mx_basis='pp', return_dense=True))) + exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer, mx_basis='pp'))) #Next take the product of these exponentiated error generators. #These are in circuit ordering, so reverse for matmul. exp_error_generators.reverse() @@ -610,7 +610,7 @@ def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, includ return fully_propagated_layers - def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp', return_dense=False): + def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp'): """ Helper method for converting from an error generator dictionary in the format utilized in the `errorgenpropagation` module into a numpy array. diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 432959725..6edaac05f 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -6257,6 +6257,68 @@ def errorgen_layer_to_matrix(errorgen_layer, num_qubits, errorgen_matrix_dict=No return mat +def iterative_error_generator_composition(errorgen_labels, rates): + """ + Iteratively compute error generator compositions. Each error generator + composition in general returns a list of multiple new error generators, + so this function manages the distribution and recursive application + of the compositions for two-or-more error generator labels. + + Parameters + ---------- + errorgen_labels : tuple of `LocalStimErrorgenLabel` + A tuple of the elementary error generator labels to be composed. + + rates : tuple of float + A tuple of corresponding error generator rates of the same length as the tuple + of error generator labels. + + Returns + ------- + List of tuples, the first element of each tuple is a `LocalStimErrorgenLabel`. + The second element of each tuple is the final rate for that term. + """ + + if len(errorgen_labels) == 1: + return [(errorgen_labels[0], rates[0])] + else: + label_tuples_to_process = [errorgen_labels] + rate_tuples_to_process = [rates] + + fully_processed_label_rate_tuples = [] + while label_tuples_to_process: + new_label_tuples_to_process = [] + new_rate_tuples_to_process = [] + + for label_tup, rate_tup in zip(label_tuples_to_process, rate_tuples_to_process): + #grab the last two elements of each of these and do the composition. + new_labels_and_rates = error_generator_composition(label_tup[-2], label_tup[-1], rate_tup[-2]*rate_tup[-1]) + + #if the new labels and rates sum to zero overall then we can kill this branch of the tree. + aggregated_labels_and_rates_dict = dict() + for lbl, rate in new_labels_and_rates: + if aggregated_labels_and_rates_dict.get(lbl, None) is None: + aggregated_labels_and_rates_dict[lbl] = rate + else: + aggregated_labels_and_rates_dict[lbl] += rate + if all([abs(val)<1e-15 for val in aggregated_labels_and_rates_dict.values()]): + continue + + label_tup_remainder = label_tup[:-2] + rate_tup_remainder = rate_tup[:-2] + if label_tup_remainder: + for new_label, new_rate in aggregated_labels_and_rates_dict.items(): + new_label_tup = label_tup_remainder + (new_label,) + new_rate_tup = rate_tup_remainder + (new_rate,) + new_label_tuples_to_process.append(new_label_tup) + new_rate_tuples_to_process.append(new_rate_tup) + else: + for new_label_rate_tup in aggregated_labels_and_rates_dict.items(): + fully_processed_label_rate_tuples.append(new_label_rate_tup) + label_tuples_to_process = new_label_tuples_to_process + rate_tuples_to_process = new_rate_tuples_to_process + + return fully_processed_label_rate_tuples #Helper functions for doing numeric commutators, compositions and BCH. @@ -6460,6 +6522,50 @@ def pairwise_bch_numerical(mat1, mat2, order=1): def _matrix_commutator(mat1, mat2): return mat1@mat2 - mat2@mat1 +def iterative_error_generator_composition_numerical(errorgen_labels, rates, errorgen_matrix_dict=None, num_qubits=None): + """ + Iteratively compute error generator compositions. The function computes a dense representation of this composition + numerically and is primarily intended as part of testing infrastructure. + + Parameters + ---------- + errorgen_labels : tuple of `LocalStimErrorgenLabel` + A tuple of the elementary error generator labels to be composed. + + rates : tuple of float + A tuple of corresponding error generator rates of the same length as the tuple + of error generator labels. + + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + num_qubits : int, optional (default None) + Number of qubits for the error generator commutator being computed. Only required if `errorgen_matrix_dict` is None. + + Returns + ------- + numpy.ndarray + Dense numpy array representation of the super operator corresponding to the iterated composition written in + the standard basis. + """ + + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + composition = errorgen_matrix_dict[errorgen_labels[0]] + for lbl in errorgen_labels[1:]: + composition = composition@errorgen_matrix_dict[lbl] + composition *= _np.prod(rates) + return composition + #-----------First-Order Approximate Error Generator Probabilities and Expectation Values---------------# def random_support(tableau, return_support=False): @@ -7183,6 +7289,11 @@ def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order if abs(rate) > truncation_threshold: #print(f'{alpha_pauli(lbl, tableau, pauli)=}') #print(f'{alpha_pauli_numerical(lbl, tableau, pauli)=}') + alpha_numerical = alpha_pauli_numerical(lbl, tableau, pauli) + alpha_analytical = alpha_pauli(lbl, tableau, pauli) + if abs(alpha_numerical-alpha_analytical)>1e-6: + print(f'{alpha_pauli(lbl, tableau, pauli)=}') + print(f'{alpha_pauli_numerical(lbl, tableau, pauli)=}') alpha_errgen_prods[i] = alpha_pauli(lbl, tableau, pauli)*rate correction = _np.sum(alpha_errgen_prods) if order > 1: @@ -7206,6 +7317,11 @@ def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order alpha_errgen_prods = _np.zeros(len(composition_results_dict)) for i, (lbl, rate) in enumerate(composition_results_dict.items()): if current_order_scale*abs(rate) > truncation_threshold: + alpha_numerical = alpha_pauli_numerical(lbl, tableau, pauli) + alpha_analytical = alpha_pauli(lbl, tableau, pauli) + if abs(alpha_numerical-alpha_analytical)>1e-6: + print(f'{alpha_pauli(lbl, tableau, pauli)=}') + print(f'{alpha_pauli_numerical(lbl, tableau, pauli)=}') sensitivity = alpha_pauli(lbl, tableau, pauli) alpha_errgen_prods[i] = _np.real_if_close(sensitivity*rate) correction += current_order_scale*_np.sum(alpha_errgen_prods) @@ -7213,114 +7329,53 @@ def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order return correction -def iterative_error_generator_composition(errorgen_labels, rates): +def stabilizer_pauli_expectation_correction_numerical(errorgen_dict, errorgen_propagator, circuit, pauli, order = 1): """ - Iteratively compute error generator compositions. Each error generator - composition in general returns a list of multiple new error generators, - so this function manages the distribution and recursive application - of the compositions for two-or-more error generator labels. + Compute the kth-order correction to the expectation value of the specified pauli. Parameters ---------- - errorgen_labels : tuple of `LocalStimErrorgenLabel` - A tuple of the elementary error generator labels to be composed. - - rates : tuple of float - A tuple of corresponding error generator rates of the same length as the tuple - of error generator labels. - - Returns - ------- - List of tuples, the first element of each tuple is a `LocalStimErrorgenLabel`. - The second element of each tuple is the final rate for that term. - """ - - if len(errorgen_labels) == 1: - return [(errorgen_labels[0], rates[0])] - else: - label_tuples_to_process = [errorgen_labels] - rate_tuples_to_process = [rates] - - fully_processed_label_rate_tuples = [] - while label_tuples_to_process: - new_label_tuples_to_process = [] - new_rate_tuples_to_process = [] - - for label_tup, rate_tup in zip(label_tuples_to_process, rate_tuples_to_process): - #grab the last two elements of each of these and do the composition. - new_labels_and_rates = error_generator_composition(label_tup[-2], label_tup[-1], rate_tup[-2]*rate_tup[-1]) - - #if the new labels and rates sum to zero overall then we can kill this branch of the tree. - aggregated_labels_and_rates_dict = dict() - for lbl, rate in new_labels_and_rates: - if aggregated_labels_and_rates_dict.get(lbl, None) is None: - aggregated_labels_and_rates_dict[lbl] = rate - else: - aggregated_labels_and_rates_dict[lbl] += rate - if all([abs(val)<1e-15 for val in aggregated_labels_and_rates_dict.values()]): - continue - - label_tup_remainder = label_tup[:-2] - rate_tup_remainder = rate_tup[:-2] - if label_tup_remainder: - for new_label, new_rate in aggregated_labels_and_rates_dict.items(): - new_label_tup = label_tup_remainder + (new_label,) - new_rate_tup = rate_tup_remainder + (new_rate,) - new_label_tuples_to_process.append(new_label_tup) - new_rate_tuples_to_process.append(new_rate_tup) - else: - for new_label_rate_tup in aggregated_labels_and_rates_dict.items(): - fully_processed_label_rate_tuples.append(new_label_rate_tup) - label_tuples_to_process = new_label_tuples_to_process - rate_tuples_to_process = new_rate_tuples_to_process - - return fully_processed_label_rate_tuples - -def iterative_error_generator_composition_numeric(errorgen_labels, rates, errorgen_matrix_dict=None, num_qubits=None): - """ - Iteratively compute error generator compositions. The function computes a dense representation of this composition - numerically and is primarily intended as part of testing infrastructure. + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. - Parameters - ---------- - errorgen_labels : tuple of `LocalStimErrorgenLabel` - A tuple of the elementary error generator labels to be composed. + errorgen_propagator : `ErrorGeneratorPropagator` + Error generator propagator used for constructing dense representation of the error generator dictionary. - rates : tuple of float - A tuple of corresponding error generator rates of the same length as the tuple - of error generator labels. + circuit : `Circuit` + Circuit the expectation value is being measured against. - errorgen_matrix_dict : dict, optional (default None) - An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. - If not specified this will be constructed from scratch each call, so specifying this can provide a performance - benefit. + pauli : stim.PauliString + Pauli operator to compute expectation value correction for. - num_qubits : int, optional (default None) - Number of qubits for the error generator commutator being computed. Only required if `errorgen_matrix_dict` is None. + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. Returns ------- - numpy.ndarray - Dense numpy array representation of the super operator corresponding to the iterated composition written in - the standard basis. + correction : float + float corresponding to the correction to the expectation value for the + selected pauli operator induced by the error generator (to specified order). """ + tableau = circuit.convert_to_stim_tableau() - if errorgen_matrix_dict is None: - #create an error generator basis. - errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') - - #use this basis to construct a dictionary from error generator labels to their - #matrices. - errorgen_lbls = errorgen_basis.labels - errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} - - composition = errorgen_matrix_dict[errorgen_labels[0]] - for lbl in errorgen_labels[1:]: - composition = composition@errorgen_matrix_dict[lbl] - composition *= _np.prod(rates) - return composition - - + stabilizer_state = tableau.to_state_vector(endian='big') + stabilizer_state_dmvec = state_to_dmvec(stabilizer_state) + stabilizer_state_dmvec.reshape((len(stabilizer_state_dmvec),1)) + + #also get the superoperator (in the standard basis) corresponding to the taylor series + #expansion of the specified error generator dictionary. + taylor_expanded_errorgen = error_generator_taylor_expansion_numerical(errorgen_dict, errorgen_propagator, order=order, mx_basis='std') + + #finally need the superoperator for the selected pauli. + pauli_unitary = pauli.to_unitary_matrix(endian='big') + #flatten this row-wise + pauli_vec = _np.ravel(pauli_unitary) + pauli_vec.reshape((len(pauli_vec),1)) + + expectation_correction = _np.linalg.multi_dot([pauli_vec.conj().T, taylor_expanded_errorgen,stabilizer_state_dmvec]).item() + return expectation_correction def stabilizer_probability(tableau, desired_bitstring): """ @@ -7448,8 +7503,8 @@ def approximate_stabilizer_pauli_expectation(errorgen_dict, circuit, pauli, orde Returns ------- - p : float - Approximate output probability for desired bitstring. + expectation_value : float + Approximate expectation value for desired pauli. """ if isinstance(circuit, _Circuit): @@ -7468,6 +7523,52 @@ def approximate_stabilizer_pauli_expectation(errorgen_dict, circuit, pauli, orde correction = stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order, truncation_threshold) return ideal_expectation + correction +def approximate_stabilizer_pauli_expectation_numerical(errorgen_dict, errorgen_propagator, circuit, pauli, order=1): + """ + Calculate the approximate probability of a desired bit string using a first-order approximation. + This function performs the corrections numerically and so it primarily intended for testing + infrastructure. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `ElementaryErrorgenLabel` and whose values are corresponding + rates. + + errorgen_propagator : `ErrorGeneratorPropagator` + Error generator propagator used for constructing dense representation of the error generator dictionary. + + circuit : `Circuit` + A pygsti `Circuit` or a stim.Tableau to compute the output pauli expectation value for. + + pauli : stim.PauliString + Pauli operator to compute expectation value for. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding error generator rates + are below this value. (Used internally in computation of probability corrections) + + Returns + ------- + expectation_value : float + Approximate expectation value for desired pauli. + """ + + tableau = circuit.convert_to_stim_tableau() + + #recast keys to local stim ones if needed. + first_lbl = next(iter(errorgen_dict)) + if isinstance(first_lbl, (_GEEL, _LEEL)): + errorgen_dict = {_LSE.cast(lbl):val for lbl,val in errorgen_dict.items()} + + ideal_expectation = stabilizer_pauli_expectation(tableau, pauli) + correction = stabilizer_pauli_expectation_correction_numerical(errorgen_dict, errorgen_propagator, circuit, pauli, order) + return ideal_expectation + correction + def approximate_stabilizer_probabilities(errorgen_dict, circuit, order=1, truncation_threshold=1e-14): """ Calculate the approximate probability distribution over all bitstrings using a first-order approximation. @@ -7572,4 +7673,39 @@ def error_generator_taylor_expansion(errorgen_dict, order = 1, truncation_thresh if order_scale*abs(rate) > truncation_threshold: taylor_order_terms[current_order-1][lbl] = order_scale*rate - return taylor_order_terms \ No newline at end of file + return taylor_order_terms + +def error_generator_taylor_expansion_numerical(errorgen_dict, errorgen_propagator, order = 1, mx_basis = 'pp'): + """ + Compute the nth-order taylor expansion for the exponentiation of the error generator described by the input + error generator dictionary. (Excluding the zeroth-order identity). This function computes a dense representation + of this taylor expansion as a numpy array and is primarily intended for testing infrastructure. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + errorgen_propagator : `ErrorGeneratorPropagator` + Error generator propagator used for constructing dense representation of the error generator dictionary. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + mx_basis : `Basis` or str, optional (default 'pp') + Basis in which to return the matrix. + + Returns + ------- + numpy.ndarray + A dense numpy array corresponding to the nth order taylor expansion of the specified error generator. + """ + + errorgen_mat = errorgen_propagator.errorgen_layer_dict_to_errorgen(errorgen_dict, mx_basis) + taylor_expansion = _np.zeros(errorgen_mat.shape, dtype=_np.complex128) + for i in range(1, order+1): + taylor_expansion += 1/factorial(i)*_np.linalg.matrix_power(errorgen_mat, i) + + return taylor_expansion \ No newline at end of file diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py index fb61c4e4f..ad5396d40 100644 --- a/test/unit/tools/test_errgenproptools.py +++ b/test/unit/tools/test_errgenproptools.py @@ -410,26 +410,23 @@ def test_approximate_stabilizer_probabilities(self): def test_approximate_stabilizer_pauli_expectation(self): rng = np.random.default_rng(seed=12345) paulis_4Q = list(stim.PauliString.iter_all(4)) - #random_4Q_pauli_indices = rng.choice(len(paulis_4Q), 10, replace=False) - #random_4Q_paulis = [paulis_4Q[idx] for idx in random_4Q_pauli_indices] - - for pauli in paulis_4Q:#random_4Q_paulis: - exact_pauli_expectation = pauli_expectation_errorgen_prop(self.error_propagator, self.target_model, - self.circuit, pauli, use_bch=True, bch_order=1) - first_order_diff = exact_pauli_expectation - _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=1) - second_order_diff = exact_pauli_expectation - _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=2) - third_order_diff = exact_pauli_expectation - _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=3) - - if abs(first_order_diff) < abs(second_order_diff): - print(f'{first_order_diff=}') - print(f'{second_order_diff=}') - print(f'{pauli=}') - raise ValueError('Going to higher order made the expectation value worse!') - if abs(second_order_diff) < abs(third_order_diff): - print(f'{second_order_diff=}') - print(f'{third_order_diff=}') - print(f'{pauli=}') - raise ValueError('Going to higher order made the expectation value worse!') + random_4Q_pauli_indices = rng.choice(len(paulis_4Q), 5, replace=False) + random_4Q_paulis = [paulis_4Q[idx] for idx in random_4Q_pauli_indices] + + for pauli in random_4Q_paulis: + + + first_order_diff = _eprop.approximate_stabilizer_pauli_expectation_numerical(self.propagated_errorgen_layer, self.error_propagator, self.circuit, pauli, order=1) -\ + _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=1) + second_order_diff = _eprop.approximate_stabilizer_pauli_expectation_numerical(self.propagated_errorgen_layer, self.error_propagator, self.circuit, pauli, order=2) -\ + _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=2) + third_order_diff = _eprop.approximate_stabilizer_pauli_expectation_numerical(self.propagated_errorgen_layer, self.error_propagator, self.circuit, pauli, order=3) -\ + _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=3) + + assert abs(first_order_diff) < 1e-6, f'{pauli=}' + assert abs(second_order_diff) < 1e-8, f'{pauli=}' + assert abs(third_order_diff) < 5e-8, f'{pauli=}' + def test_error_generator_taylor_expansion(self): #this is just an integration test atm. @@ -593,7 +590,7 @@ def _compare_analytic_numeric_iterative_composition(num_qubits): #for each triple compute the composition directly and compute it analytically (then converting it to #a numeric array) and see how they compare. for i, (triple_1, triple_2) in enumerate(zip(random_triples, random_triples_stim)): - numeric_composition = _eprop.iterative_error_generator_composition_numeric(triple_1, (1,1,1), complete_errorgen_lbl_matrix_dict) + numeric_composition = _eprop.iterative_error_generator_composition_numerical(triple_1, (1,1,1), complete_errorgen_lbl_matrix_dict) analytic_composition = _eprop.iterative_error_generator_composition(triple_2, (1,1,1)) analytic_composition_dict = dict() for lbl, rate in analytic_composition: From bc8aa0b4821203dd2a9716cbafebdbbb0c4bc678 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 15 Feb 2025 17:16:16 -0700 Subject: [PATCH 091/102] Clean up errant debugging Forgot to remove some debugging statements. --- pygsti/tools/errgenproptools.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 6edaac05f..d9f826f73 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -7287,13 +7287,6 @@ def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order for i, (lbl, rate) in enumerate(errorgen_dict.items()): if abs(rate) > truncation_threshold: - #print(f'{alpha_pauli(lbl, tableau, pauli)=}') - #print(f'{alpha_pauli_numerical(lbl, tableau, pauli)=}') - alpha_numerical = alpha_pauli_numerical(lbl, tableau, pauli) - alpha_analytical = alpha_pauli(lbl, tableau, pauli) - if abs(alpha_numerical-alpha_analytical)>1e-6: - print(f'{alpha_pauli(lbl, tableau, pauli)=}') - print(f'{alpha_pauli_numerical(lbl, tableau, pauli)=}') alpha_errgen_prods[i] = alpha_pauli(lbl, tableau, pauli)*rate correction = _np.sum(alpha_errgen_prods) if order > 1: @@ -7317,11 +7310,6 @@ def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order alpha_errgen_prods = _np.zeros(len(composition_results_dict)) for i, (lbl, rate) in enumerate(composition_results_dict.items()): if current_order_scale*abs(rate) > truncation_threshold: - alpha_numerical = alpha_pauli_numerical(lbl, tableau, pauli) - alpha_analytical = alpha_pauli(lbl, tableau, pauli) - if abs(alpha_numerical-alpha_analytical)>1e-6: - print(f'{alpha_pauli(lbl, tableau, pauli)=}') - print(f'{alpha_pauli_numerical(lbl, tableau, pauli)=}') sensitivity = alpha_pauli(lbl, tableau, pauli) alpha_errgen_prods[i] = _np.real_if_close(sensitivity*rate) correction += current_order_scale*_np.sum(alpha_errgen_prods) From 15443577fb7a2cc0dc8f960bcd22b650c24386c4 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 15 Feb 2025 23:19:32 -0700 Subject: [PATCH 092/102] Misc updates and small fixes Fix a few minor issues with unit tests. Fix a small bug that was found when using higher-order taylor with higher-order BCH (just needed to cast some values to real numbers). Minor new addition to random error generator rate code. --- pygsti/tools/errgenproptools.py | 10 ++++++---- pygsti/tools/lindbladtools.py | 10 ++++++++-- test/unit/tools/test_errgenproptools.py | 10 +++++----- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index d9f826f73..763cb2b0d 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -445,7 +445,7 @@ def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_th for order_dict in new_errorgen_layer: for lbl, rate in order_dict.items(): - new_errorgen_layer_dict[lbl] += rate + new_errorgen_layer_dict[lbl] += rate.real #Future: Possibly do one last truncation pass in case any of the different order cancel out when aggregated? @@ -7316,7 +7316,6 @@ def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order return correction - def stabilizer_pauli_expectation_correction_numerical(errorgen_dict, errorgen_propagator, circuit, pauli, order = 1): """ Compute the kth-order correction to the expectation value of the specified pauli. @@ -7419,7 +7418,7 @@ def stabilizer_pauli_expectation(tableau, pauli): def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring, order=1, truncation_threshold=1e-14): """ - Calculate the approximate probability of a desired bit string using a first-order approximation. + Calculate the approximate probability of a desired bit string using an nth-order taylor series approximation. Parameters ---------- @@ -7478,7 +7477,7 @@ def approximate_stabilizer_pauli_expectation(errorgen_dict, circuit, pauli, orde A pygsti `Circuit` or a stim.Tableau to compute the output probability for. In either case this should be a Clifford circuit and convertable to a stim.Tableau. - pauli : stim.PauliString + pauli : str or stim.PauliString Pauli operator to compute expectation value for. order : int, optional (default 1) @@ -7502,6 +7501,9 @@ def approximate_stabilizer_pauli_expectation(errorgen_dict, circuit, pauli, orde else: raise ValueError('`circuit` should either be a pygsti `Circuit` or a stim.Tableau.') + if isinstance(pauli, str): + pauli = stim.PauliString(pauli) + #recast keys to local stim ones if needed. first_lbl = next(iter(errorgen_dict)) if isinstance(first_lbl, (_GEEL, _LEEL)): diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index 5ff9799b9..34e688100 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -509,7 +509,7 @@ def create_lindbladian_term_errorgen(typ, Lm, Ln=None, sparse=False): # noqa N8 def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A'), max_weights=None, H_params=(0.,.01), SCA_params=(0.,.01), error_metric=None, error_metric_value=None, relative_HS_contribution=None, fixed_errorgen_rates=None, sslbl_overlap=None, - label_type='global', seed = None): + label_type='global', seed=None, qubit_labels=None): """ Function for generating a random set of CPTP error generator rates. @@ -588,6 +588,10 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') seed : int, optional (default None) An optional integer used in seeding the RNG. + + qubit_labels : list or int or str, optional (default None) + An optional list of qubit labels upon which the error generator should act. + Only utilized when returning global labels. Returns ------- @@ -808,7 +812,9 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') if label_type == 'global': errorgen_rates_dict = {_GlobalElementaryErrorgenLabel.cast(lbl, sslbls=state_space.state_space_labels): val for lbl, val in errorgen_rates_dict.items()} - + if qubit_labels is not None: + mapper= {i:lbl for i,lbl in enumerate(qubit_labels)} + errorgen_rates_dict = {lbl.map_state_space_labels(mapper):val for lbl,val in errorgen_rates_dict.items()} return errorgen_rates_dict def _sort_errorgen_labels(errgen_labels): diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py index ad5396d40..32e660868 100644 --- a/test/unit/tools/test_errgenproptools.py +++ b/test/unit/tools/test_errgenproptools.py @@ -179,27 +179,27 @@ def test_iterative_error_generator_composition(self): def test_bch_approximation(self): first_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=1) propagated_errorgen_layers_bch_order_1 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=1) - first_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_1,mx_basis='pp', return_dense=True) + first_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_1,mx_basis='pp') assert np.linalg.norm(first_order_bch_analytical-first_order_bch_numerical) < 1e-14 propagated_errorgen_layers_bch_order_2 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=2) second_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=2) - second_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_2, mx_basis='pp', return_dense=True) + second_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_2, mx_basis='pp') assert np.linalg.norm(second_order_bch_analytical-second_order_bch_numerical) < 1e-14 third_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=3) propagated_errorgen_layers_bch_order_3 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=3) - third_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_3, mx_basis='pp', return_dense=True) + third_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_3, mx_basis='pp') assert np.linalg.norm(third_order_bch_analytical-third_order_bch_numerical) < 1e-14 fourth_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=4) propagated_errorgen_layers_bch_order_4 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=4) - fourth_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_4, mx_basis='pp', return_dense=True) + fourth_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_4, mx_basis='pp') assert np.linalg.norm(fourth_order_bch_analytical-fourth_order_bch_numerical) < 1e-14 fifth_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=5) propagated_errorgen_layers_bch_order_5 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=5, truncation_threshold=0) - fifth_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_5, mx_basis='pp', return_dense=True) + fifth_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_5, mx_basis='pp') assert np.linalg.norm(fifth_order_bch_analytical-fifth_order_bch_numerical) < 1e-14 exact_errorgen = logm(self.errorgen_propagator.eoc_error_channel(self.circuit)) From 2fc7d0848e4e6e4e6e7e238195e77e2df3682569 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sat, 15 Feb 2025 23:20:09 -0700 Subject: [PATCH 093/102] Checkpoint Tutorial Drafting Checkpoint progress on the new error generator propagation tutorial. --- .../Propagatable error gens tutorial.ipynb | 275 ------- .../ErrorGeneratorPropagation.ipynb | 764 ++++++++++++++++++ 2 files changed, 764 insertions(+), 275 deletions(-) delete mode 100644 jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb create mode 100644 jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb diff --git a/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb b/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb deleted file mode 100644 index 47f95861c..000000000 --- a/jupyter_notebooks/Examples/Propagatable error gens tutorial.ipynb +++ /dev/null @@ -1,275 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "from pygsti.extras.errorgenpropagation.propagatableerrorgen import *\n", - "from pygsti.extras.errorgenpropagation.errorpropagator import *\n", - "from pygsti.circuits import Circuit\n", - "import numpy as np\n", - "import pygsti.processors\n", - "import pygsti\n", - "import pygsti.tools.lindbladtools as _lt\n", - "import scipy\n", - "import matplotlib.pyplot as plt" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Introduction to the Propagatable Error Generators Code" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining a circuit and error generators" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Currently Error Propgagation works for any model that meets three criteria\n", - "\n", - " 1. The circuit is clifford\n", - " 2. The errors on each gate can be defined at a time t of interest in the small markovian errors basis\n", - " 3. The error error model is defined such that a gate G has some linear combination of error generators following it\n", - "\n", - "We can therefore, start a code by defining a circuit and an error model by simply following the common pyGSTi notation" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "errorModel={\n", - " 'Gxpi2' : {('H','Y'):.01}\n", - "\n", - "}\n", - "c=Circuit(10*[('Gxpi2',0)])" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can take the above definitions and plug them into the errorpropagator function, to get out a list of post-circuit error generators out." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n", - "[[[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]], [[('H', ('Y',), 0.01)]]]\n" - ] - } - ], - "source": [ - "errors=ErrorPropagator(c,errorModel,BCHOrder=1,BCHLayerwise=False,NonMarkovian=False)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here BCH order determines the to what order the BCH order will be taken to (if applicable). BCHLayerwise will if false, propagatate all errors to the end before taking the BCH expansion, otherwise it will push the errorgens through a layer and combine with the the error generators for that layer by the rules given by the BCHOrder. Non-markovian prevents any simplification or BCH expansions being taken, instead allowing the output to be a list a lists, where the each sublist denotes the errorgenerators that were occuring at time t in the circuit." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Additionally, if you want to describe a gate with multiple associated error definitions you can define it as follows." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[('H', ('X',), (0.09999999999999999+0j))]" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "MultiGateDict={'Gxpi22' : 'Gxpi2'}\n", - "errorModel={\n", - " 'Gxpi2' : {('H','Y'):.01},\n", - " 'Gxpi22' : {('H','X'):.01}\n", - "\n", - "}\n", - "c=Circuit(10*[('Gxpi2',0),('Gxpi22',0)])\n", - "\n", - "ErrorPropagator(c,errorModel,MultiGateDict=MultiGateDict, MultiGate=True)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once the errors are propagated to the process matrix given by the end of circuit error generators is given by" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "expMat=np.zeros([4**len(c.line_labels),4**len(c.line_labels)],dtype=np.complex128)\n", - "for error in errors:\n", - " expMat +=error.toWeightedErrorBasisMatrix()\n", - "processMatrix = scipy.linalg.expm(expMat)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Non-Markovianity" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you want to use the non markovianity function you need to define an n x n correlation where n is the number of layers. Currently, we are capable of describing each layer to be governed by some stochastic process, that is correlated to the other layers. To using the code is relatively simple, see the below example" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Text(0.5, 1.0, 'White noise dephasing')" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjwAAAHHCAYAAAC7soLdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABWQUlEQVR4nO3dd3gU5d7G8e8mIQklCT20QCjSewuEKgQjIAcQERAhYEUBKTZQEUWKNOUoCIKKqCDtUFSKxtCLgPQOUgSBEBBDgECAZN4/5mVhTYAsKZNs7s917eXuzOzObwdlb595is0wDAMRERERF+ZmdQEiIiIiaU2BR0RERFyeAo+IiIi4PAUeERERcXkKPCIiIuLyFHhERETE5SnwiIiIiMtT4BERERGXp8AjIiIiLk+BRySDs9ls9OnT577Hff3119hsNo4fP572RaWy48ePY7PZ+Prrr60uJV2u463vO27cuDQ7R1J69OhBYGBgup5TJKNQ4BFJI3PnzsVms7Fw4cJE+6pVq4bNZmPlypWJ9hUvXpzg4OBUqeGzzz7LECFCRMRqCjwiaaRhw4YArFu3zmF7TEwMe/bswcPDg/Xr1zvsO3nyJCdPnrS/1xndunXj6tWrlChRwr4tswSeEiVKcPXqVbp162Z1KS5t2rRpHDx40OoyRCzhYXUBIq6qSJEilCxZMlHg2bhxI4Zh0LFjx0T7br1+kMDj7u6Ou7v7gxdsIZvNhre3t9VluLxs2bJZXYKIZdTCI5KGGjZsyPbt27l69ap92/r166lUqRItW7bkt99+IyEhwWGfzWajQYMGiT5r0aJFVK5cGS8vLypVqsTy5csd9v+770lgYCB79+5l9erV2Gw2bDYbTZs2tR8fHR1N//79CQgIwMvLizJlyjB69GiHeu4mMDCQxx57jHXr1lG3bl28vb0pVaoU33zzTaJjjx49SseOHcmbNy85cuSgXr16LFmyxOGYpPrwREZG0rNnT4oVK4aXlxeFCxembdu2ifrWLFu2jEaNGpEzZ058fHxo3bo1e/fuve93ANi7dy/NmjUje/bsFCtWjOHDh9/1+yfnPD169CBXrlwcPXqU0NBQcubMSZEiRRg2bBiGYST5uVOnTqV06dJ4eXlRp04dtmzZ4rB/165d9OjRg1KlSuHt7U2hQoV45pln+Pvvvx2Ou3TpEv379ycwMBAvLy8KFixIixYt2LZtm0N9d/bhubMv0f3qAJg3bx4VK1bE29ubypUrs3DhQvULkkxDLTwiaahhw4Z8++23bNq0yR421q9fT3BwMMHBwVy8eJE9e/ZQtWpV+77y5cuTL18+h89Zt24dCxYs4OWXX8bHx4dPPvmEDh06cOLEiUTH3jJhwgT69u1Lrly5ePvttwHw9/cHIDY2liZNmnDq1ClefPFFihcvzoYNGxg8eDBnzpxhwoQJ9/1uf/zxB0888QTPPvssYWFhfPXVV/To0YNatWpRqVIlAM6ePUtwcDCxsbG88sor5MuXjxkzZvCf//yH+fPn0759+7t+focOHdi7dy99+/YlMDCQqKgowsPDOXHihP0H9ttvvyUsLIzQ0FBGjx5NbGwskydPtgfNe/0QR0ZG8vDDD3Pz5k0GDRpEzpw5mTp1KtmzZ090rDPniY+P59FHH6VevXqMGTOG5cuXM3ToUG7evMmwYcMcPnfWrFlcunSJF198EZvNxpgxY3j88cc5evSovTUmPDyco0eP0rNnTwoVKsTevXuZOnUqe/fu5bfffsNmswHQq1cv5s+fT58+fahYsSJ///0369atY//+/dSsWfOef5bJqWPJkiV06tSJKlWqMGrUKP755x+effZZihYtes/PFskwDBFJM3v37jUA44MPPjAMwzBu3Lhh5MyZ05gxY4ZhGIbh7+9vTJo0yTAMw4iJiTHc3d2N559/3uEzAMPT09P4448/7Nt27txpAMann35q3zZ9+nQDMI4dO2bfVqlSJaNJkyaJ6vrggw+MnDlzGocOHXLYPmjQIMPd3d04ceLEPb9XiRIlDMBYs2aNfVtUVJTh5eVlvPrqq/Zt/fv3NwBj7dq19m2XLl0ySpYsaQQGBhrx8fGGYRjGsWPHDMCYPn26YRiG8c8//xiAMXbs2LvWcOnSJSN37tyJrldkZKTh5+eXaPu/3apt06ZNDt/Bz8/P4To6c56wsDADMPr27WvflpCQYLRu3drw9PQ0zp075/B98+XLZ1y4cMF+7OLFiw3A+PHHH+3bYmNjE9X+/fffJ7r+fn5+Ru/eve/5ncPCwowSJUrYXztTR5UqVYxixYoZly5dsm9btWqVATh8pkhGpVtaImmoQoUK5MuXz943Z+fOnVy5csU+Cis4ONjecXnjxo3Ex8cn2X8nJCSE0qVL219XrVoVX19fjh49+kB1zZs3j0aNGpEnTx7Onz9vf4SEhBAfH8+aNWvu+xkVK1akUaNG9tcFChSgXLlyDjUtXbqUunXrOnynXLly8cILL3D8+HH27duX5Gdnz54dT09PVq1axT///JPkMeHh4URHR9OlSxeH7+Du7k5QUFCSI+DutHTpUurVq0fdunUdvkPXrl1TfJ47pxG4Na3A9evX+fXXXx2O69SpE3ny5LG/vnU977yGd7Y4Xbt2jfPnz1OvXj0Ah9tVuXPnZtOmTZw+ffqe3zsp96vj9OnT7N69m+7du5MrVy77cU2aNKFKlSpOn0/ECrqlJZKGbDYbwcHBrFmzhoSEBNavX0/BggUpU6YMYAaeiRMnAtiDT1KBp3jx4om25cmT565h4H4OHz7Mrl27KFCgQJL7o6Ki7vsZyanpzz//JCgoKNFxFSpUsO+vXLlyov1eXl6MHj2aV199FX9/f+rVq8djjz1G9+7dKVSokP07ADRr1izJ+nx9fe9Z/91qK1eunMNrZ8/j5uZGqVKlHLaVLVsWIFH/o39fw1uh485reOHCBd5//31mz56d6M/l4sWL9udjxowhLCyMgIAAatWqRatWrejevXuiWpJyvzr+/PNPAPu/t3cqU6aMQ/ASyagUeETSWMOGDfnxxx/ZvXu3vf/OLcHBwbz++uucOnWKdevWUaRIkSR/oO42+sq4S0fY+0lISKBFixa88cYbSe6/9QN9L6ld07/179+fNm3asGjRIn7++WeGDBnCqFGjWLFiBTVq1LB3Lv7222/tIehOHh6p89dbWp4nOdfwySefZMOGDbz++utUr16dXLlykZCQwKOPPurQwfrJJ5+kUaNGLFy4kF9++YWxY8cyevRoFixYQMuWLVNch0hmp8AjksbunI9n/fr19O/f376vVq1aeHl5sWrVKjZt2kSrVq1S9dy3OrT+W+nSpbl8+TIhISGper5/K1GiRJLzvhw4cMC+/15Kly7Nq6++yquvvsrhw4epXr0648eP57vvvrPf4itYsOADfY8SJUrYW2/u9O96nT1PQkICR48edQiNhw4dAnB6NNM///xDREQE77//Pu+++659e1J1AxQuXJiXX36Zl19+maioKGrWrMmIESPuG3ju59af0x9//JFoX1LbRDIi9eERSWO1a9fG29ubmTNncurUKYcWHi8vL2rWrMmkSZO4cuXKA82/cy85c+YkOjo60fYnn3ySjRs38vPPPyfaFx0dzc2bN1Pl/K1atWLz5s1s3LjRvu3KlStMnTqVwMBAKlasmOT7YmNjuXbtmsO20qVL4+PjQ1xcHAChoaH4+voycuRIbty4kegzzp07d9/afvvtNzZv3uzwnpkzZzoc9yDnuXWbEsxWkokTJ5ItWzaaN29+z5r+7VbLy79bWv49ii4+Pt7h9haYAa1IkSL265USRYoUoXLlynzzzTdcvnzZvn316tXs3r07xZ8vkh7UwiOSxjw9PalTpw5r167Fy8uLWrVqOewPDg5m/PjxwINNOHgvtWrVYvLkyQwfPpwyZcpQsGBBmjVrxuuvv84PP/zAY489Zh9KfuXKFXbv3s38+fM5fvw4+fPnT/H5Bw0axPfff0/Lli155ZVXyJs3LzNmzODYsWP873//w80t6f/nOnToEM2bN+fJJ5+kYsWKeHh4sHDhQs6ePUvnzp0Bs+/M5MmT6datGzVr1qRz584UKFCAEydOsGTJEho0aOAQPP7tjTfe4Ntvv+XRRx+lX79+9mHpJUqUYNeuXfbjnD2Pt7c3y5cvJywsjKCgIJYtW8aSJUt466237tpn6m58fX1p3LgxY8aM4caNGxQtWpRffvmFY8eOORx36dIlihUrxhNPPEG1atXIlSsXv/76K1u2bLH/u5VSI0eOpG3btjRo0ICePXvyzz//MHHiRCpXruwQgkQyLCuHiIlkFYMHDzYAIzg4ONG+BQsWGIDh4+Nj3Lx5M9F+IMnhxiVKlDDCwsLsr5Malh4ZGWm0bt3a8PHxMQCHIeqXLl0yBg8ebJQpU8bw9PQ08ufPbwQHBxvjxo0zrl+/fs/vU6JECaN169aJtjdp0iTRMPgjR44YTzzxhJE7d27D29vbqFu3rvHTTz85HPPvYennz583evfubZQvX97ImTOn4efnZwQFBRlz585NdM6VK1caoaGhhp+fn+Ht7W2ULl3a6NGjh/H777/f8zsYhmHs2rXLaNKkieHt7W0ULVrU+OCDD4wvv/wy0XVM7nnCwsKMnDlzGkeOHDEeeeQRI0eOHIa/v78xdOhQ+xD8O79vUsPuAWPo0KH213/99ZfRvn17I3fu3Iafn5/RsWNH4/Tp0w7HxcXFGa+//rpRrVo1w8fHx8iZM6dRrVo147PPPnP47LsNS09OHYZhGLNnzzbKly9veHl5GZUrVzZ++OEHo0OHDkb58uXvc6VFrGczDPVKExFJDT169GD+/PlZqsWjevXqFChQgPDwcKtLEbkn9eEREZH7unHjRqK+XatWrWLnzp0OS5aIZFTqwyMiIvd16tQpQkJCePrppylSpAgHDhxgypQpFCpUiF69elldnsh9KfCIiMh95cmTh1q1avHFF19w7tw5cubMSevWrfnwww/vup6bSEaiPjwiIiLi8tSHR0RERFyeAo+IiIi4vCzXhychIYHTp0/j4+Nz12n3RUREJGMxDINLly5RpEiRu05aei9ZLvCcPn2agIAAq8sQERGRB3Dy5EmKFSvm9PuyXODx8fEBzAvm6+trcTUiIiKSHDExMQQEBNh/x52V5QLPrdtYvr6+CjwiIiKZzIN2R1GnZREREXF5CjwiIiLi8hR4RERExOUp8IiIiIjLU+ARERERl6fAIyIiIi5PgUdERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8hR4RERExOUp8IiIiIjLU+ARERERl6fAIyIiIi5PgUdERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8hR4RERExOUp8IiIiIjLU+ARERERl6fAIyIiIi5PgUdERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8iwNPGvWrKFNmzYUKVIEm83GokWL7vueVatWUbNmTby8vChTpgxff/11mtcpIiIimZulgefKlStUq1aNSZMmJev4Y8eO0bp1ax5++GF27NhB//79ee655/j555/TuFIRERHJzDysPHnLli1p2bJlso+fMmUKJUuWZPz48QBUqFCBdevW8fHHHxMaGppWZYqIiEgml6n68GzcuJGQkBCHbaGhoWzcuNGiikRERCQzsLSFx1mRkZH4+/s7bPP39ycmJoarV6+SPXv2RO+Ji4sjLi7O/jomJibN6xQREZGMJVO18DyIUaNG4efnZ38EBARYXZKIiIiks0wVeAoVKsTZs2cdtp09exZfX98kW3cABg8ezMWLF+2PkydPpkepIiIikoFkqlta9evXZ+nSpQ7bwsPDqV+//l3f4+XlhZeXV1qXJiIiIhmYpS08ly9fZseOHezYsQMwh53v2LGDEydOAGbrTPfu3e3H9+rVi6NHj/LGG29w4MABPvvsM+bOncuAAQOsKF9EREQyCUsDz++//06NGjWoUaMGAAMHDqRGjRq8++67AJw5c8YefgBKlizJkiVLCA8Pp1q1aowfP54vvvhCQ9JFRETknmyGYRhWF5GeYmJi8PPz4+LFi/j6+lpdjoiIiCRDSn+/M1WnZREREZEHocAjIiIiLk+BR0RERFyeAo+IiIi4PAUeERERcXkKPKno99/h2DGrqxAREZF/U+BJJYsWQYMG8MQTcO2a1dWIiIjInRR4UkmtWuDrC9u2wSuvWF2NiIiI3EmBJ5UEBMCsWWCzwbRpMH261RWJiIjILQo8qahFCxg2zHz+8svw/0uEiYiIiMUUeFLZW29Bq1ZmP54OHSA62uqKRERERIEnlbm5wbffQmAgHD0KYWGQkGB1VSIiIlmbAk8ayJsX5s8HT0/44QcYO9bqikRERLI2BZ40UqsWTJxoPn/rLVi50tp6REREsjIFnjT03HPQo4d5S6tzZzh1yuqKREREsiYFnjRks8GkSVCtGkRFQadOcOOG1VWJiIhkPQo8aSxHDrM/j68vrF8Pb75pdUUiIiJZjwJPOihTBr75xnz+8ccwb5619YiIiGQ1CjzppG3b2607zzwDBw5YW4+IiEhWosCTjoYPh6ZN4fJlc1LCy5etrkhERCRrUOBJRx4eMHs2FC4M+/bBCy+AYVhdlYiIiOtT4Eln/v4wdy64u8P338Nnn1ldkYiIiOtT4LFAw4a3Z18eMAB++83aekRERFydAo9F+veHJ54w5+Xp2BHOnbO6IhEREdelwGMRmw2+/BLKloW//oKuXSE+3uqqREREXJMCj4V8feF//zMnJwwPh/fft7oiERER16TAY7HKlWHqVPP5Bx/AkiXW1iMiIuKKFHgygK5doXdv83m3bnD8uKXliIiIuBwFngxi/HgICoJ//jE7M1+7ZnVFIiIirkOBJ4Pw8jLn58mXD7ZuhX79rK5IRETEdSjwZCDFi8OsWeYIrqlT4euvra5IRETENSjwZDCPPHJ7tNZLL8HOndbWIyIi4goUeDKgt9+Gli3NfjwdOkB0tNUViYiIZG4KPBmQmxt89x2UKAFHjkCPHlpkVEREJCUUeDKovHlh/nzw9ITFi2+vvSUiIiLOU+DJwGrXhk8/NZ8PHgyrVllajoiISKalwJPBPf88hIVBQgJ07gynT1tdkYiISOajwJPB2Wzw2WdQtSqcPQvt2kFsrNVViYiIZC4KPJlAjhywYIHZr2fLFuje3WzxERERkeRR4MkkSpeGRYvMTsz/+x+89ZbVFYmIiGQeCjyZSKNG8OWX5vPRo28/FxERkXtT4Mlknn4a3n3XfN6rF0REWFuPiIhIZqDAkwm99x489RTcvGnOxLx/v9UViYiIZGwKPJmQzWbezmrQAC5ehNat4dw5q6sSERHJuBR4Milvb1i4EEqVgmPHzOHq165ZXZWIiEjGpMCTiRUoAEuWQO7csGED9OypNbdERESSosCTyZUvbw5T9/CA2bNh6FCrKxIREcl4FHhcQLNmMHWq+fyDD+Cbb6ytR0REJKNR4HERPXvCoEHm8+eegzVrrK1HREQkI1HgcSEjRkDHjnDjBrRvD4cPW12RiIhIxqDA40Lc3GDGDAgKggsXzOHqf/9tdVUiIiLWU+BxMdmzw+LFUKKE2cLz+OMQF2d1VSIiItZS4HFB/v7mcHVfX7MvzwsvaLi6iIhkbQo8LqpSJZg3D9zdzVFbI0ZYXZGIiIh1FHhc2COPwKRJ5vMhQ8x5ekRERLIiBR4X9+KLMHCg+bxHD3NGZhERkaxGgScLGDMG2rY1Oy+3awdHj1pdkYiISPqyPPBMmjSJwMBAvL29CQoKYvPmzfc8fsKECZQrV47s2bMTEBDAgAEDuKZVM+/J3R1mzoSaNc1V1Vu3huhoq6sSERFJP5YGnjlz5jBw4ECGDh3Ktm3bqFatGqGhoURFRSV5/KxZsxg0aBBDhw5l//79fPnll8yZM4e33nornSvPfHLmhB9/hGLF4MABeOIJc4JCERGRrMDSwPPRRx/x/PPP07NnTypWrMiUKVPIkSMHX331VZLHb9iwgQYNGvDUU08RGBjII488QpcuXe7bKiSmIkXgp58gVy6IiICXXtJwdRERyRosCzzXr19n69athISE3C7GzY2QkBA2btyY5HuCg4PZunWrPeAcPXqUpUuX0qpVq7ueJy4ujpiYGIdHVlatmjlay80NvvwSxo61uiIREZG0Z1ngOX/+PPHx8fj7+zts9/f3JzIyMsn3PPXUUwwbNoyGDRuSLVs2SpcuTdOmTe95S2vUqFH4+fnZHwEBAan6PTKj1q1hwgTz+ZtvwoIFlpYjIiKS5izvtOyMVatWMXLkSD777DO2bdvGggULWLJkCR988MFd3zN48GAuXrxof5w8eTIdK864+vaFPn3M508/DVu2WFuPiIhIWvKw6sT58+fH3d2ds2fPOmw/e/YshQoVSvI9Q4YMoVu3bjz33HMAVKlShStXrvDCCy/w9ttv4+aWOL95eXnh5eWV+l/ABXz8sTlEfelSaNMGNm+G4sWtrkpERCT1WdbC4+npSa1atYiIiLBvS0hIICIigvr16yf5ntjY2EShxt3dHQBDvW+d5uFh9uepWhXOnjVvdV28aHVVIiIiqc/SW1oDBw5k2rRpzJgxg/379/PSSy9x5coVevbsCUD37t0ZPHiw/fg2bdowefJkZs+ezbFjxwgPD2fIkCG0adPGHnzEOT4+5sitwoVhzx547DGIjbW6KhERkdRl2S0tgE6dOnHu3DneffddIiMjqV69OsuXL7d3ZD5x4oRDi84777yDzWbjnXfe4dSpUxQoUIA2bdowQitjpkhAgHlbq2lTWLcO2reHH34A3QkUERFXYTOy2L2gmJgY/Pz8uHjxIr6+vlaXk6Fs2AAtWpgtPI8/DnPmmLe9RERErJbS3+9MNUpL0lZwMCxeDJ6e5lD1556DhASrqxIREUk5BR5xEBICc+ea62/NmAH9+mk2ZhERyfwUeCSRtm3h66/BZoOJE2HIEKsrEhERSRkFHknS00/DZ5+Zz0eMgDFjrK1HREQkJRR45K569YLRo83nb74JU6ZYW4+IiMiDUuCRe3rjDbi1VNnLL8N331lbj4iIyINQ4JH7Gj7cXHfLMKBHD3Mkl4iISGaiwCP3ZbPBf/8LYWEQHw9PPgm//mp1VSIiIsmnwCPJ4uYGX3wBHTrA9evmSK4NG6yuSkREJHkUeCTZPDxg5kwIDTVnY27VCnbssLoqERGR+3M68DRp0oRvvvmGq1evpkU9ksF5eZmzMDdsaK6s/sgjcOCA1VWJiIjcm9OBp0aNGrz22msUKlSI559/nt9++y0t6pIMLEcOc4X1mjXh3Dlz/a3jx62uSkRE5O6cDjwTJkzg9OnTTJ8+naioKBo3bkzFihUZN24cZ8+eTYsaJQPy84Off4YKFeCvv8wlKc6csboqERGRpD1QHx4PDw8ef/xxFi9ezF9//cVTTz3FkCFDCAgIoF27dqxYsSK165QMKH9+CA+HkiXhyBHz9tbff1tdlYiISGIp6rS8efNmhg4dyvjx4ylYsCCDBw8mf/78PPbYY7z22mupVaNkYEWLmkPUixSBPXugZUu4dMnqqkRERBzZDMO5tbCjoqL49ttvmT59OocPH6ZNmzY899xzhIaGYrPZAFi3bh2PPvooly9fTpOiUyImJgY/Pz8uXryIr6+v1eW4jH37oHFjs4WnSRNYtgyyZ7e6KhERcRUp/f32cPYNxYoVo3Tp0jzzzDP06NGDAgUKJDqmatWq1KlTx+liJPOqWNHs09OsGaxeDU88AQsXgqen1ZWJiIg8QAvP2rVradSoUVrVk+bUwpO21q0z+/JcvWrOyDxrFri7W12ViIhkdin9/Xa6D09mDjuS9ho2NFt2smWDuXPhxRfNNbhERESs5PQtLYD58+czd+5cTpw4wfXr1x32bdu2LVUKk8wrNBS+/95s4fnyS/DxgY8+MtfkEhERsYLTLTyffPIJPXv2xN/fn+3bt1O3bl3y5cvH0aNHadmyZVrUKJlQhw7w1Vfm8wkT4P33LS1HRESyOKcDz2effcbUqVP59NNP8fT05I033iA8PJxXXnmFixcvpkWNkkmFhcGnn5rP338fxo+3th4REcm6nA48J06cIDg4GIDs2bNz6f8nXenWrRvff/996lYnmV6fPjBihPn8tddg7Fhr6xERkazJ6cBTqFAhLly4AEDx4sXta2kdO3YMJwd8SRYxeDAMGWI+f+MNs7VH/6qIiEh6cjrwNGvWjB9++AGAnj17MmDAAFq0aEGnTp1o3759qhcomZ/NBsOGwciR5uv33oM331ToERGR9OP0PDwJCQkkJCTg4WEO8Jo9ezYbNmzgoYce4sUXX8Qzg880p3l4rPXf/0L//ubz3r3hk0/ALUULnIiISFaQ0t9vpwNPZqfAY71p027Pz9Ozp/lakxOKiMi9pPvEg9OnT2fevHmJts+bN48ZM2Y4XYBkPc8/D998Y4ac6dPh6afhxg2rqxIREVfmdOAZNWoU+fPnT7S9YMGCjLzVSUPkPp5+GubMMWdknj0bOnaEuDirqxIREVf1QMPSS5YsmWh7iRIlOHHiRKoUJVlDhw6waBF4ecHixdC2LcTGWl2ViIi4IqcDT8GCBdm1a1ei7Tt37iRfvnypUpRkHa1awZIlkCOHudp6q1bw/1M7iYiIpBqnA0+XLl145ZVXWLlyJfHx8cTHx7NixQr69etH586d06JGcXHNm8Mvv4CvL6xeba62Hh1tdVUiIuJKnB6ldf36dbp168a8efPsQ9MTEhLo3r07U6ZM0bB0eWC//26GnX/+gRo1zBCURHcxERHJgiwbln7o0CF27txJ9uzZqVKlCiVKlHiQj0l3CjwZ265d0KIFREVBxYrw669QuLDVVYmIiNVS+vvt8aAnDgwMxDAMSpcubW/pEUmpqlXN21rNm8O+fdC4MUREQPHiVlcmIiKZmdN9eGJjY3n22WfJkSMHlSpVso/M6tu3Lx9++GGqFyhZT/nysHYtBAbCH39Ao0Zw5IjVVYmISGbmdOAZPHgwO3fuZNWqVXh7e9u3h4SEMGfOnFQtTrKuUqVgzRooWxZOnDBDz/79VlclIiKZldOBZ9GiRUycOJGGDRtis9ns2ytVqsQR/W+4pKKAAPP2VuXKcOYMNGkCO3daXZWIiGRGTgeec+fOUbBgwUTbr1y54hCARFJDoUKwahXUrAnnzkHTprB5s9VViYhIZuN04KlduzZLliyxv74Vcr744gvq16+fepWJ/L98+cyOy/Xrm/PzhISYfXxERESSy+nhVSNHjqRly5bs27ePmzdv8t///pd9+/axYcMGVq9enRY1ipA7tzkvz3/+AytXQmiouRxFixZWVyYiIpmB0y08DRs2ZMeOHdy8eZMqVarwyy+/ULBgQTZu3EitWrXSokYRAHLlMpehaNkSrl6Fxx6DH3+0uioREckMHnjiwcxKEw9mfnFx0KULLFwIHh4wa5a52rqIiLiudJ948OLFi4SHh3P8+HFsNhulSpWiefPmCg+Sbry8YO5cCAszw07nzmaLT/fuVlcmIiIZlVOB57vvvqNPnz7ExMQ4bPfz82PKlCl06tQpVYsTuRsPD/jmG3OV9S++MMPP5cvw8stWVyYiIhlRsvvwbNu2jZ49e9KuXTu2b9/O1atXiY2N5ffff6dNmzZ069aNnZokRdKRuzt8/jn07Wu+7t0b3ngDEhKsrUtERDKeZPfh6dmzJ5cvX2bevHlJ7n/iiSfw9fXlq6++StUCU5v68Lgew4Dhw+Hdd83XHTvCjBmQPbu1dYmISOpJ6e93slt41q9fz4svvnjX/b169WLdunVOFyCSUjYbDBkC334L2bLBvHnmXD3nz1tdmYiIZBTJDjynT5+mbNmyd91ftmxZTp06lSpFiTyIp5825+rJnRs2bDAnKjx82OqqREQkI0h24ImNjXVYLPTfvLy8uHbtWqoUJfKgmjY1w86tldbr14f1662uSkRErObUKK2ff/4ZPz+/JPdFR0enRj0iKVahAvz2G7RpA1u2QPPmZp8eDSIUEcm6kt1p2c3t/o1BNpuN+Pj4FBeVltRpOeuIjYWnnjKXoAD48ENzFJfWuBURyXzSrdNyQkLCfR8ZPexI1pIjB/zvf9Cvn/l60CB46SW4edPaukREJP05vZaWSGbi7g4TJpgPm82ct6dNG7h0yerKREQkPSnwSJbQr5+59lb27LB8OTRqBBpUKCKSdSjwSJbRti2sXg0FC8LOnRAUBLt2WV2ViIikB8sDz6RJkwgMDMTb25ugoCA2b958z+Ojo6Pp3bs3hQsXxsvLi7Jly7J06dJ0qlYyuzp1zBFcFSqYLTwNG8LPP1tdlYiIpDVLA8+cOXMYOHAgQ4cOZdu2bVSrVo3Q0FCioqKSPP769eu0aNGC48ePM3/+fA4ePMi0adMoWrRoOlcumVnJkubcPE2bmn15Wrc2FyAVERHXlexh6XeKjo5m/vz5HDlyhNdff528efOybds2/P39nQofQUFB1KlTh4kTJwLmSLCAgAD69u3LoEGDEh0/ZcoUxo4dy4EDB8iWLZuzZQMali63xcXBc8/Bd9+Zr996Cz74AJIxA4OIiKSzdBuWfsuuXbsoW7Yso0ePZty4cfYJBxcsWMDgwYOT/TnXr19n69athISE3C7GzY2QkBA2btyY5Ht++OEH6tevT+/evfH396dy5cqMHDnynsPh4+LiiImJcXiIAHh5wTff3F50dORIc3mKuDhr6xIRkdTndOAZOHAgPXr04PDhww5LTbRq1Yo1a9Yk+3POnz9PfHw8/v7+Dtv9/f2JjIxM8j1Hjx5l/vz5xMfHs3TpUoYMGcL48eMZPnz4Xc8zatQo/Pz87I+AgIBk1yiuz2aD99+H6dPBwwO+/x5atIC//7a6MhERSU1OB54tW7YkuWp60aJF7xpUUktCQgIFCxZk6tSp1KpVi06dOvH2228zZcqUu75n8ODBXLx40f44efJkmtYomVOPHuZwdV9fWLsWgoPhyBGrqxIRkdTidODx8vJK8rbQoUOHKFCgQLI/J3/+/Li7u3P27FmH7WfPnqVQoUJJvqdw4cKULVsWd3d3+7YKFSoQGRnJ9evX71qvr6+vw0MkKc2bm52ZixeHQ4egXj1zRJeIiGR+Tgee//znPwwbNowbN24A5vpZJ06c4M0336RDhw7J/hxPT09q1apFRESEfVtCQgIRERHUr18/yfc0aNCAP/74g4SEBPu2Q4cOUbhwYTw9PZ39KiKJVK5shpyaNeH8eXj4YXN5ChERydycDjzjx4/n8uXLFCxYkKtXr9KkSRPKlCmDj48PI0aMcOqzBg4cyLRp05gxYwb79+/npZde4sqVK/Ts2ROA7t27O3SEfumll7hw4QL9+vXj0KFDLFmyhJEjR9K7d29nv4bIXRUubE5Q2Lo1XLsGHTvCRx+B8+MZRUQko/Bw9g1+fn6Eh4ezfv16du7cyeXLl6lZs6bDaKvk6tSpE+fOnePdd98lMjKS6tWrs3z5cntH5hMnTjis0h4QEMDPP//MgAEDqFq1KkWLFqVfv368+eabTp9b5F5y5YJFi8wlKT77DF59Ffbtg4kT4Y6++iIikkk80Dw8mZnm4RFnGAZ8/DG8/jokJEDt2uYtruLFra5MRCRrSfd5eF555RU++eSTRNsnTpxI//79nS5AJCOz2WDgQHMEV7588PvvZv+eX3+1ujIREXGG04Hnf//7Hw0aNEi0PTg4mPnz56dKUSIZTYsWsHUr1KplztETGgoffqh+PSIimYXTgefvv//Gz88v0XZfX1/Onz+fKkWJZEQlSsC6dfDMM+btrcGDoUMH0OTdIiIZn9OBp0yZMixfvjzR9mXLllGqVKlUKUoko/L2Nhca/fxz8PSEhQuhbl2zQ7OIiGRcTo/SGjhwIH369OHcuXM0a9YMgIiICMaPH8+ECRNSuz6RDMdmgxdegOrVzRaegwfN0DN9ujmEXUREMp4HGqU1efJkRowYwenTpwEIDAzkvffeo3v37qleYGrTKC1JTVFR0LkzrFxpvn7tNRg1ylyXS0REUk9Kf79TNCz93LlzZM+enVy5cj3oR6Q7BR5JbTdvwttvw5gx5uuHH4bZs6FgQWvrEhFxJek+LP1OBQoUyFRhRyQteHjA6NEwf745YeHKleZork2brK5MRERucTrwnD17lm7dulGkSBE8PDxwd3d3eIhkVR06wObNUK4c/PUXNG5sdm7W0HUREes53dOgR48enDhxgiFDhlC4cGFsNlta1CWSKVWoYIaenj1hwQLo1cts6Zk0CbJnt7o6EZGsy+k+PD4+Pqxdu5bq1aunUUlpS314JD0Yhtmn5623zDl7atY0l6QIDLS6MhGRzCnd+/AEBASQxZbfEnGazQZvvgm//AL588O2bWa/nl9+sboyEZGsyenAM2HCBAYNGsTx48fToBwR19K8ubkkRe3acOECPPoojBxptvqIiEj6cfqWVp48eYiNjeXmzZvkyJGDbNmyOey/cOFCqhaY2nRLS6xw7Rr07WvO0gzQti3MmAFJrNIiIiJJSOnvt9OdljWbsojzvL1h2jQICoLevWHxYnN25gULoFIlq6sTEXF9KZp4MDNSC49YbcsWcwj7yZOQMyd89RU8+aTVVYmIZGyWTDx45MgR3nnnHbp06UJUVBRgLh66d+/eB/k4kSylTh2zX0/z5nDlCnTqBK++CtevW12ZiIjrcjrwrF69mipVqrBp0yYWLFjA5cuXAdi5cydDhw5N9QJFXFGBArB8uTmSC+Cjj6BBAzh0yNq6RERcldOBZ9CgQQwfPpzw8HA8PT3t25s1a8Zvv/2WqsWJuDIPD/jwQ1i4EPLkgd9/hxo1zL4+WetGs4hI2nM68OzevZv27dsn2l6wYEHOnz+fKkWJZCXt2sGuXdCsGcTGwgsvwOOPg/5zEhFJPU4Hnty5c3PmzJlE27dv307RokVTpSiRrKZYMQgPh3HjIFs2WLQIqlY1t4mISMo5HXg6d+7Mm2++SWRkJDabjYSEBNavX89rr71G9+7d06JGkSzBzc3svLxpk7km15kz8Mgj5ra4OKurExHJ3JwOPCNHjqR8+fIEBARw+fJlKlasSOPGjQkODuadd95JixpFspQaNcz+PC+/bL7+6CNzzh4NghQReXBOzcNjGAYnT56kQIECnD9/nt27d3P58mVq1KjBQw89lJZ1phrNwyOZyU8/wTPPwLlz5uSFY8eaExfabFZXJiKSvlL6++1U4ElISMDb25u9e/dmmoDzbwo8ktlERkLPnuYwdoBWrczJCv39ra1LRCQ9pevEg25ubjz00EP8/fffTp9IRB5MoUKwdCl8+il4eZnPq1SBJUusrkxEJPNwug/Phx9+yOuvv86ePXvSoh4RSYLNBn36mH17qlQxb3E99pi57epVq6sTEcn4UrRauqenJ9mzZ3fYr9XSRdLWtWvw1lvw8cfm6woVYNYsqF7d0rJERNKUVksXyWK8vc2RW48+CmFhsH+/uQr7yJEwYIA5vF1ERBxptXSRTOz8eXjuOVi82HzdvDnMmAGaA1REXI1WSxfJwvLnN9fi+vxzyJEDIiLMGZoXLLC6MhGRjEWrpYtkcjabuf7Wtm1QqxZcuAAdOpgtP///n6eISJan1dJFXES5crBhAwwaZIagL7+EmjVhyxarKxMRsZ5WSxdxIZ6eMGoUrFhhLkh6+DAEB8Pw4XDjhtXViYhYR6uli7igpk1h1y7o2BFu3oQhQ6BOHXMeHxGRrEirpYu4qDx5YM4c+PZbyJcPdu40h6+/9hrExlpdnYhI+tJq6SIuzGaDp5+GffugSxdISIDx483Zmn/91erqRETSzwPPw3PixAn27Nmj1dJFMpElS+Cll+DkSfN1z54wbhzkzWttXSIi95Ouq6W7AgUeyeouXTKXppg0CQzDXHX900/hiSfMFiERkYwo3QNPfHw8X3/9NREREURFRZGQkOCwf8WKFU4XkZ4UeERMGzaYc/Xs32++btvWDEEaeyAiGVG6z7Tcr18/+vXrR3x8PJUrV6ZatWoODxHJHIKDYft2ePddyJbNXJ6iYkVz1uZ//X+MiEim53QLT/78+fnmm29o1apVWtWUptTCI5LYnj1ma8+mTebrxo1h6lRzMkMRkYwg3Vt4PD09KVOmjNMnEpGMq3JlWL8eJkyAnDlhzRqoVs1cgV0TFoqIK3A68Lz66qv897//JYv1dRZxee7u0K+f2doTGgpxcfD225qwUERcQ7JuaT3++OMOr1esWEHevHmpVKkS2bJlc9i3IIMv06xbWiL3Zxgwcyb07w9//w1ubjBgAAwbZq7KLiKS3lL6++2RnIP8/PwcXie1lpaIuI5bExY+8ogZdGbNMicsXLjQ7NQcEmJ1hSIiztE8PCJyX5qwUESslu6dlm85d+4c69atY926dZw7d+5BP0ZEMoHWrWHvXujTx2z9mT7dHMI+b555+0tEJKNzOvBcuXKFZ555hsKFC9O4cWMaN25MkSJFePbZZ4nVioQiLsvHx5yRed06qFABzp6FJ5+E9u3hr7+srk5E5N6cDjwDBw5k9erV/Pjjj0RHRxMdHc3ixYtZvXo1r776alrUKCIZSFITFpYrB8OHw7VrVlcnIpK0B5p4cP78+TRt2tRh+8qVK3nyyScz/O0t9eERST179kCvXuYcPgCBgWbn5vbttS6XiKSudO/DExsbi7+/f6LtBQsW1C0tkSymcmVYu9YcxVW0KBw/Dh06mKO49uyxujoRkducDjz169dn6NChXLuj7frq1au8//771K9fP1WLE5GMz2aDLl3g4EF45x3w8oIVK6B6dejbFy5csLpCEZEHuKW1Z88eQkNDiYuLsy8WunPnTry9vfn555+pVKlSmhSaWnRLSyRtHTsGr70Gt+YgzZcPPvgAXnjBnM1ZRORBpPT3+4Hm4YmNjWXmzJkcOHAAgAoVKtC1a1eyZ8/udAHpTYFHJH2sWHF7qQqAqlXhk0+gSRNr6xKRzMmSwJOZKfCIpJ+bN2HKFHNE1z//mNs6doSxY6FECWtrE5HMJd06LW/dupWHH36YmJiYRPsuXrzIww8/zM6dO50uAGDSpEkEBgbi7e1NUFAQmzdvTtb7Zs+ejc1mo127dg90XhFJWx4e5mSFhw+bMzW7uZmTFZYvD0OHgsY5iEh6SXbgGT9+PM2aNUsyVfn5+dGiRQvGjh3rdAFz5sxh4MCBDB06lG3btlGtWjVCQ0OJioq65/uOHz/Oa6+9RqNGjZw+p4ikr3z54LPPzPl7mjY15+sZNswMPnPmaLZmEUl7yQ48mzZtom3btnfd36ZNGzZs2OB0AR999BHPP/88PXv2pGLFikyZMoUcOXLw1Vdf3fU98fHxdO3alffff59SpUo5fU4RsUbVqmbfnnnzoHhxc22uzp3NELRjh9XViYgrS3bgOXXqFD4+PnfdnytXLs6cOePUya9fv87WrVsJuWPpZTc3N0JCQti4ceNd3zds2DAKFizIs88+e99zxMXFERMT4/AQEevYbPDEE3DgALz/PmTPDmvWQK1a5iSG589bXaGIuKJkB54CBQpw8ODBu+4/cOAA+fPnd+rk58+fJz4+PtFEhv7+/kRGRib5nnXr1vHll18ybdq0ZJ1j1KhR+Pn52R8BAQFO1SgiaSN7drMz84ED0KkTJCTA55/DQw+Zo7lu3LC6QhFxJckOPCEhIYwYMSLJfYZhMGLECIeWmrRw6dIlunXrxrRp05IdrgYPHszFixftj5MnT6ZpjSLinOLFYfZsWL0aqlWD6GhzOHv16hAebnV1IuIqPJJ74DvvvEOtWrUICgri1VdfpVy5coDZsjN+/HgOHTrE119/7dTJ8+fPj7u7O2fPnnXYfvbsWQoVKpTo+CNHjnD8+HHatGlj35aQkGB+EQ8PDh48SOnSpR3e4+XlhZeXl1N1iUj6a9wYtm6FL76At9+GffvgkUegbVsYPdpcoFRE5EElu4WndOnS/Prrr1y5coXOnTtTs2ZNatasSZcuXYiNjSU8PJwyZco4dXJPT09q1apFRESEfVtCQgIRERFJLlNRvnx5du/ezY4dO+yP//znPzz88MPs2LFDt6tEMjl3d3jxRXMY+yuvmK8XL4ZKleC558xOziIiD+KBJh7csWMHhw8fxjAMypYtS/Xq1R+4gDlz5hAWFsbnn39O3bp1mTBhAnPnzuXAgQP4+/vTvXt3ihYtyqhRo5J8f48ePYiOjmbRokXJOp8mHhTJPPbuhcGD4ccfzdeenvDyy/DWW1CggLW1iUj6Sunvd7Jvad2pevXqKQo5d+rUqRPnzp3j3XffJTIykurVq7N8+XJ7R+YTJ07g5ub0Gqci4gIqVYIffoANG8yQs3o1TJhg3vYaONB8+PlZXaWIZAZaWkJEMgXDgF9+MYPPtm3mtrx5zRag3r3NUV8i4rrSbWkJEREr2WwQGgq//25OXFiuHFy4AK+/bg5lnzpVQ9lF5O4UeEQkU7k1ceGePfDVV+aw9lOnzM7OFSvC99+bc/qIiNxJgUdEMiUPD+jZEw4dMvv1FCgAf/wBTz0FNWvCkiVao0tEbkt24BkyZAg3b9686/4TJ07QokWLVClKRCS5vLzMiQqPHIEPPgBfX9i5Ex57DBo1grVrra5QRDKCZAeeGTNmUKdOHfbs2ZNo3+eff07lypXx8HigQV8iIinm4wPvvANHj5r9ery9Yf16c0LDli3NldpFJOtKduDZs2cPVapUoXbt2owaNYqEhAROnDhBSEgIb7zxBuPGjWPZsmVpWauIyH3lywdjxpi3t1580bz1tXy5eZurUye4x5KAIuLCnB6WvnjxYl588UUKFSrEsWPHqFu3Ll988QUlSpRIqxpTlYali2Qtf/wBQ4eanZkNw5y9uUcPc5smZxfJPNJ9WHq9evWoUqUKu3btIiEhgXfeeSfThB0RyXrKlIGZM2HHDmjTBuLj4csvzaHsAwfCuXNWVygi6cGpwPP9999TsWJFEhIS2L9/Py+99BKPPPIIAwYM4Nq1a2lVo4hIilWtas7avH49NGkCcXHw8cdQsiS89hqcPm11hSKSlpIdeDp06MDzzz/Pe++9R0REBOXKlWPMmDGsXLmSpUuXUq1aNTZu3JiWtYqIpFhwMKxcebtfz5UrMH68GXx69TI7PYuI60l24ImMjGT79u307dvXYXtwcDA7duzg0UcfpUmTJqleoIhIartz1ualS6FBA7h+HT7/3LzV9fTT5sSGIuI6kt1pOSEh4b6LeK5Zs4bGjRunSmFpRZ2WRSQpa9fCyJFmy88t//mPuXZXUJB1dYmIKd06LSdnxfKMHnZERO6mUSNYtgy2bjWXrrDZzD4/9epB8+YQEaGZm0UyMy0tISJyh5o1zcVJ9+0zh697eMCKFRASYoafxYu1VpdIZqTAIyKShPLlYfp0c8mKvn3NmZs3b4Z27cwRXzNnwj1W2xGRDEaBR0TkHooXh08+gT//hMGDzbW69u41OzaXLWt2dNasHCIZnwKPiEgyFCxodmr+808YMQLy54djx8yh7CVLwrhxcOmS1VWKyN0o8IiIOCF3bnPk1p9/wn//ay5PERlpLlhaogS89x78/bfVVYrIvynwiIg8gBw54JVXzLW6vvrKvL31zz/w/vtm8NHszSIZiwKPiEgKeHpCz57mqK65c6F6dcfZm597DnbvtrpKEVHgERFJBe7u0LEjbNtmzt7csKE5e/OXX5qjupo1g0WLzMVLRST9KfCIiKQimw1atjRnbl63zgxB7u7m+l3t25urt48fb97+EpH0o8AjIpJGGjQwb3MdPQqDBkHevHD8uNm/p1gxeOkl2L/f6ipFsgYFHhGRNFa8OIwaBX/9BdOmQZUqEBsLU6ZAxYrwyCPw00+awVkkLSnwiIikk+zZzU7MO3eat7jatQM3NwgPhzZtzJFe//0vXLxodaUirkeBR0Qkndls0LQpLFxoDmt/7TVzfp8jR6B/f/N2V9++cOiQxYWKuBAFHhERC5UsCWPHmre7Jk+GChXg8mWYOBHKlYNWrWD5ct3uEkkpBR4RkQwgZ05zmYq9e+GXX+Cxx8yWoGXLzFFfFSvCpElavkLkQSnwiIhkIDYbtGgBP/5o3tLq1w98fODgQejTx7zdNXCgeftLRJJPgUdEJIMqUwYmTIBTp+DTT+GhhyAmBj7+2Hz+n/+YLUCazFDk/hR4REQyOB8fs3XnwAFzFudHHwXDMFuBWrWCwEAYOtSc40dEkqbAIyKSSbi5mf15li0zw88rr5iTGf71FwwbBqVKmXP6zJ0LcXFWVyuSsSjwiIhkQuXKmXP2nDoF338PzZubrT7h4dCpExQtavb12bvX6kpFMgabYRiG1UWkp5iYGPz8/Lh48SK+vr5WlyMikmqOHoWvvoLp0+H06dvb69c3Jzx88knIlcu6+kRSIqW/32rhERFxEaVKwfDh8Oef5lIV7dqZC5du3AjPPguFC8Pzz8OmTWZrkEhWohYeEREXFhkJM2bAF1+YszrfUrmy2erz9NOQL5919Ykkl1p4RETkrgoVgjffNOf0Wb0aunUDb2/Ys8dcxqJIEejcGX79VbM5i2tTC4+ISBYTHQ2zZpmtPtu3394eGGje+urRw5zgUCQjSenvtwKPiEgWtm0bfPklzJx5e5V2Nzdzrp9nn4XWrcHLy9oaRUCBx2kKPCIiicXGwv/+Z7b6rFlze3uePPDEE/DUU9C4sRmGRKygwOMkBR4RkXs7dMhs9fnuO8fh7UWLQpcuZvipXt1c90skvSjwOEmBR0QkeeLjzdaemTNh/vzbt7wAypeHrl3NAFS6tHU1StahwOMkBR4REefFxZlLWsycaa7hdefSFUFBZqtPp07g729djeLaFHicpMAjIpIyMTGwcKE50uvO4exubhASYoaf9u1Bf8VKalLgcZICj4hI6omMNBcrnTXLnMH5Fm9vaNPGDD8tW2qkl6ScAo+TFHhERNLGH3+YC5nOnAkHD97enju340gvd3fLSpRMTIHHSQo8IiJpyzDMCQ1nzTID0L9HenXubIafGjU00kuST4HHSQo8IiLp59ZIr1mzzJFe0dG395UtC48/bj5q11b4kXtT4HGSAo+IiDXi4mD58tsjva5du70vIMDs6Ny+PTRsCB4e1tUpGZMCj5MUeERErBcTA0uXmqO9liyBK1du78ufH9q2NVt+mjdXh2cxKfA4SYFHRCRjuXrVHN6+YAH88ANcuHB7n4+PuZ7X44+bo71y5bKuTrGWAo+TFHhERDKumzfNPj8LFpitP3d2ePbygkceMcNPmzaQL591dUr6U+BxkgKPiEjmkJAAmzebwWfBAnPY+y3u7tC0qdnnp107c/SXuDYFHicp8IiIZD6GAXv23A4/O3c67q9Xz2z5ad8eypSxpkZJWwo8TlLgERHJ/I4cMcPPwoWwYYPjvipVzPDTtq1WdXclCjxOUuAREXEtp0/D4sVmy8/KlebcP7cULmx2dm7Vylzny8/PujolZVL6++2WBjU5bdKkSQQGBuLt7U1QUBCbN2++67HTpk2jUaNG5MmThzx58hASEnLP40VExLUVKQIvvQTh4RAVBTNmmP16cuaEM2fgq6/MpS3y5zf7/YwZY94ey1r/uy+WB545c+YwcOBAhg4dyrZt26hWrRqhoaFERUUlefyqVavo0qULK1euZOPGjQQEBPDII49w6tSpdK5cREQymrx5oXt381bX33+bIWjAAChf3hwBtno1vPmmedurRAno1cscCn/5stWVS1qz/JZWUFAQderUYeLEiQAkJCQQEBBA3759GTRo0H3fHx8fT548eZg4cSLdu3e/7/G6pSUikjUdPQrLlpkTHq5Y4TjTs6cnNGly+/ZX2bLq+5PRZOpbWtevX2fr1q2EhITYt7m5uRESEsLGjRuT9RmxsbHcuHGDvHnzplWZIiLiAkqVgt69zZmdL1wwg0+fPlCyJFy/brYGDRxotgaVKQN9+5oB6epVqyuX1GBp4Dl//jzx8fH4+/s7bPf39ycyMjJZn/Hmm29SpEgRh9B0p7i4OGJiYhweIiKStWXPbrbmfPqpOeLrwAH46COzY3O2bGZr0MSJZmtP3rzmbM+TJsGxY1ZXLg8qUy/P9uGHHzJ79mxWrVqFt7d3kseMGjWK999/P50rExGRzMJmg3LlzMeAAWZ/nogIswVo6VL466/bz8FsAWrZElq0gEaNtNxFZmFpH57r16+TI0cO5s+fT7t27ezbw8LCiI6OZvHixXd977hx4xg+fDi//vortWvXvutxcXFxxMXF2V/HxMQQEBCgPjwiInJftyY8vBV41q93HPbu4WFOeti8ufkICjL7A0nqy/Tz8AQFBVG3bl0+/fRTwOy0XLx4cfr06XPXTstjxoxhxIgR/Pzzz9SrV8+p86nTsoiIPKjoaHOh02XLzFagP/903J8zp9nqcysAVasGbpaPh3YNmT7wzJkzh7CwMD7//HPq1q3LhAkTmDt3LgcOHMDf35/u3btTtGhRRo0aBcDo0aN59913mTVrFg0aNLB/Tq5cuciVjHZFBR4REUkNhmH29YmIMB8rVsD5847H5MsHDz8MzZqZAeihhzT660Fl+sADMHHiRMaOHUtkZCTVq1fnk08+ISgoCICmTZsSGBjI119/DUBgYCB//jtSA0OHDuW9996777kUeEREJC0kJJi3v24FoNWrE8/vExBwO/w0b25OmijJ4xKBJz0p8IiISHq4cQO2bLkdgDZuNIe/36l8+dvhp2lTyJPHklIzBQUeJynwiIiIFWJjYd0689ZXRARs3eq4vIWbG9SsaYafZs0gOFgjwO6kwOMkBR4REckI/vkHVq263QJ04IDjfnd3c7X3Ro2gYUPz8a9p67IUBR4nKfCIiEhGdOqU2fqzYoW56nsS3VV56KHbAahRIyhdOut0glbgcZICj4iIZAZ//WXeAlu71vzn7t2JV3gvVOh260+jRlC1qjk3kCtS4HGSAo+IiGRG//xjdny+FYA2b07cCdrHB+rXvx2AgoLMZTRcgQKPkxR4RETEFVy7Zo4Cu9UKtH49/Hu5yGzZoFat27fBGjQw5wbKjBR4nKTAIyIirig+3pwH6FYAWrsWTp9OfFzFimbwCQqCunXN1+7u6V+vsxR4nKTAIyIiWYFhwPHjjv2A9u9PfFyuXFC7thmAboWgokXTvdz7UuBxkgKPiIhkVefOmbe+fvsNNm2C339PPBs0mIHnzgBUu7b1cwIp8DhJgUdERMQUH2+2+mzadPuxZ4+5TMad3NygUiXHEFSpUvreClPgcZICj4iIyN1dvgzbtjmGoL/+Snxczpy3b4XVrWv+s1ixtKtLgcdJCjwiIiLOOX3aHAZ/KwBt2ZL0rbAiRczg06gRDBiQujUo8DhJgUdERCRl7rwVdisI7d59+1ZY3brmttSU0t9vF52PUURERNKKuztUrmw+nn3W3Hblirkg6ubNGXOuHwUeERERSbGcOaFxY/OREblZXYCIiIhIWlPgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy1PgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy1PgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy1PgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy1PgEREREZenwCMiIiIuT4FHREREXJ4Cj4iIiLg8BR4RERFxeQo8IiIi4vIUeERERMTlKfCIiIiIy8sQgWfSpEkEBgbi7e1NUFAQmzdvvufx8+bNo3z58nh7e1OlShWWLl2aTpWKiIhIZmR54JkzZw4DBw5k6NChbNu2jWrVqhEaGkpUVFSSx2/YsIEuXbrw7LPPsn37dtq1a0e7du3Ys2dPOlcuIiIimYXNMAzDygKCgoKoU6cOEydOBCAhIYGAgAD69u3LoEGDEh3fqVMnrly5wk8//WTfVq9ePapXr86UKVPue76YmBj8/Py4ePEivr6+qfdFREREJM2k9Pfb0hae69evs3XrVkJCQuzb3NzcCAkJYePGjUm+Z+PGjQ7HA4SGht71eBEREREPK09+/vx54uPj8ff3d9ju7+/PgQMHknxPZGRkksdHRkYmeXxcXBxxcXH21xcvXgTMpCgiIiKZw63f7Qe9MWVp4EkPo0aN4v3330+0PSAgwIJqREREJCUuXbqEn5+f0++zNPDkz58fd3d3zp4967D97NmzFCpUKMn3FCpUyKnjBw8ezMCBA+2vExISuHDhAvny5cNmsyWrzpiYGAICAjh58qT6/aQzXXvr6NpbR9feOrr21rnftTcMg0uXLlGkSJEH+nxLA4+npye1atUiIiKCdu3aAWYgiYiIoE+fPkm+p379+kRERNC/f3/7tvDwcOrXr5/k8V5eXnh5eTlsy5079wPV6+vrq/8ALKJrbx1de+vo2ltH194697r2D9Kyc4vlt7QGDhxIWFgYtWvXpm7dukyYMIErV67Qs2dPALp3707RokUZNWoUAP369aNJkyaMHz+e1q1bM3v2bH7//XemTp1q5dcQERGRDMzywNOpUyfOnTvHu+++S2RkJNWrV2f58uX2jsknTpzAze32YLLg4GBmzZrFO++8w1tvvcVDDz3EokWLqFy5slVfQURERDI4ywMPQJ8+fe56C2vVqlWJtnXs2JGOHTumcVW3eXl5MXTo0ES3xiTt6dpbR9feOrr21tG1t05aX3vLJx4UERERSWuWLy0hIiIiktYUeERERMTlKfCIiIiIy1PgEREREZenwJMMkyZNIjAwEG9vb4KCgti8ebPVJbmcUaNGUadOHXx8fChYsCDt2rXj4MGDDsdcu3aN3r17ky9fPnLlykWHDh0SzbotKfPhhx9is9kcJvbUdU87p06d4umnnyZfvnxkz56dKlWq8Pvvv9v3G4bBu+++S+HChcmePTshISEcPnzYwopdQ3x8PEOGDKFkyZJkz56d0qVL88EHHzis0aRrnzrWrFlDmzZtKFKkCDabjUWLFjnsT851vnDhAl27dsXX15fcuXPz7LPPcvnyZadrUeC5jzlz5jBw4ECGDh3Ktm3bqFatGqGhoURFRVldmktZvXo1vXv35rfffiM8PJwbN27wyCOPcOXKFfsxAwYM4Mcff2TevHmsXr2a06dP8/jjj1tYtWvZsmULn3/+OVWrVnXYruueNv755x8aNGhAtmzZWLZsGfv27WP8+PHkyZPHfsyYMWP45JNPmDJlCps2bSJnzpyEhoZy7do1CyvP/EaPHs3kyZOZOHEi+/fvZ/To0YwZM4ZPP/3Ufoyufeq4cuUK1apVY9KkSUnuT8517tq1K3v37iU8PJyffvqJNWvW8MILLzhfjCH3VLduXaN379721/Hx8UaRIkWMUaNGWViV64uKijIAY/Xq1YZhGEZ0dLSRLVs2Y968efZj9u/fbwDGxo0brSrTZVy6dMl46KGHjPDwcKNJkyZGv379DMPQdU9Lb775ptGwYcO77k9ISDAKFSpkjB071r4tOjra8PLyMr7//vv0KNFltW7d2njmmWcctj3++ONG165dDcPQtU8rgLFw4UL76+Rc53379hmAsWXLFvsxy5YtM2w2m3Hq1Cmnzq8Wnnu4fv06W7duJSQkxL7Nzc2NkJAQNm7caGFlru/ixYsA5M2bF4CtW7dy48YNhz+L8uXLU7x4cf1ZpILevXvTunVrh+sLuu5p6YcffqB27dp07NiRggULUqNGDaZNm2bff+zYMSIjIx2uvZ+fH0FBQbr2KRQcHExERASHDh0CYOfOnaxbt46WLVsCuvbpJTnXeePGjeTOnZvatWvbjwkJCcHNzY1NmzY5db4MMdNyRnX+/Hni4+Pty1zc4u/vz4EDByyqyvUlJCTQv39/GjRoYF8yJDIyEk9Pz0QLv/r7+xMZGWlBla5j9uzZbNu2jS1btiTap+uedo4ePcrkyZMZOHAgb731Flu2bOGVV17B09OTsLAw+/VN6u8fXfuUGTRoEDExMZQvXx53d3fi4+MZMWIEXbt2BdC1TyfJuc6RkZEULFjQYb+Hhwd58+Z1+s9CgUcynN69e7Nnzx7WrVtndSku7+TJk/Tr14/w8HC8vb2tLidLSUhIoHbt2owcORKAGjVqsGfPHqZMmUJYWJjF1bm2uXPnMnPmTGbNmkWlSpXYsWMH/fv3p0iRIrr2Lky3tO4hf/78uLu7JxqRcvbsWQoVKmRRVa6tT58+/PTTT6xcuZJixYrZtxcqVIjr168THR3tcLz+LFJm69atREVFUbNmTTw8PPDw8GD16tV88skneHh44O/vr+ueRgoXLkzFihUdtlWoUIETJ04A2K+v/v5Jfa+//jqDBg2ic+fOVKlShW7dujFgwABGjRoF6Nqnl+Rc50KFCiUaJHTz5k0uXLjg9J+FAs89eHp6UqtWLSIiIuzbEhISiIiIoH79+hZW5noMw6BPnz4sXLiQFStWULJkSYf9tWrVIlu2bA5/FgcPHuTEiRP6s0iB5s2bs3v3bnbs2GF/1K5dm65du9qf67qnjQYNGiSaeuHQoUOUKFECgJIlS1KoUCGHax8TE8OmTZt07VMoNjYWNzfHnz93d3cSEhIAXfv0kpzrXL9+faKjo9m6dav9mBUrVpCQkEBQUJBzJ0xRl+ssYPbs2YaXl5fx9ddfG/v27TNeeOEFI3fu3EZkZKTVpbmUl156yfDz8zNWrVplnDlzxv6IjY21H9OrVy+jePHixooVK4zff//dqF+/vlG/fn0Lq3ZNd47SMgxd97SyefNmw8PDwxgxYoRx+PBhY+bMmUaOHDmM7777zn7Mhx9+aOTOndtYvHixsWvXLqNt27ZGyZIljatXr1pYeeYXFhZmFC1a1Pjpp5+MY8eOGQsWLDDy589vvPHGG/ZjdO1Tx6VLl4zt27cb27dvNwDjo48+MrZv3278+eefhmEk7zo/+uijRo0aNYxNmzYZ69atMx566CGjS5cuTteiwJMMn376qVG8eHHD09PTqFu3rvHbb79ZXZLLAZJ8TJ8+3X7M1atXjZdfftnIkyePkSNHDqN9+/bGmTNnrCvaRf078Oi6p50ff/zRqFy5suHl5WWUL1/emDp1qsP+hIQEY8iQIYa/v7/h5eVlNG/e3Dh48KBF1bqOmJgYo1+/fkbx4sUNb29vo1SpUsbbb79txMXF2Y/RtU8dK1euTPLv9rCwMMMwkned//77b6NLly5Grly5DF9fX6Nnz57GpUuXnK7FZhh3TC0pIiIi4oLUh0dERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8hR4RERExOUp8IiIiIjLU+ARERERl6fAIyIPzGazsWjRojQ9x6pVq7DZbInW88rMvv7660Qr0ItI2lLgEZEkRUZG0rdvX0qVKoWXlxcBAQG0adPGYd2bM2fO0LJlyzStIzg4mDNnzuDn5wckPyxklFARGBjIhAkTrC5DJMvzsLoAEcl4jh8/ToMGDcidOzdjx46lSpUq3Lhxg59//pnevXtz4MABgPuuVnzjxg2yZcuWolo8PT21QrWIpJhaeEQkkZdffhmbzcbmzZvp0KEDZcuWpVKlSgwcOJDffvvNftydt7SOHz+OzWZjzpw5NGnSBG9vb2bOnAnAV199RaVKlfDy8qJw4cL06dPH4T07duywf2Z0dDQ2m41Vq1YBjre0Vq1aRc+ePbl48SI2mw2bzcZ77733QN8xOjqa5557jgIFCuDr60uzZs3YuXOnff97771H9erV+fbbbwkMDMTPz4/OnTtz6dIl+zGXLl2ia9eu5MyZk8KFC/Pxxx/TtGlT+vfvD0DTpk35888/GTBggL3eO/38889UqFCBXLly8eijj3LmzJkH+i4icn8KPCLi4MKFCyxfvpzevXuTM2fORPvvd5to0KBB9OvXj/379xMaGsrkyZPp3bs3L7zwArt37+aHH36gTJkyD1RbcHAwEyZMwNfXlzNnznDmzBlee+21B/qsjh07EhUVxbJly9i6dSs1a9akefPmXLhwwX7MkSNHWLRoET/99BM//fQTq1ev5sMPP7TvHzhwIOvXr+eHH34gPDyctWvXsm3bNvv+BQsWUKxYMYYNG2av95bY2FjGjRvHt99+y5o1azhx4sQDfxcRuT/d0hIRB3/88QeGYVC+fPkHen///v15/PHH7a+HDx/Oq6++Sr9+/ezb6tSp80Cf7enpiZ+fHzabLUW3udatW8fmzZuJiorCy8sLgHHjxrFo0SLmz5/PCy+8AEBCQgJff/01Pj4+AHTr1o2IiAhGjBjBpUuXmDFjBrNmzaJ58+YATJ8+nSJFitjPkzdvXtzd3fHx8UlU740bN5gyZQqlS5cGoE+fPgwbNuyBv5OI3JsCj4g4MAwjRe+vXbu2/XlUVBSnT5+2B4KMYufOnVy+fJl8+fI5bL969SpHjhyxvw4MDLSHHYDChQsTFRUFwNGjR7lx4wZ169a17/fz86NcuXLJqiFHjhz2sPPvzxaR1KfAIyIOHnroIWw2m71jsrPuvA2WPXv2ex7r5mbeVb8zZN24ceOBzuuMy5cvU7hwYXs/oTvdecvu3x2ubTYbCQkJqVJDUp+d0rApInenPjwi4iBv3ryEhoYyadIkrly5kmi/M/Ph+Pj4EBgY6DCU/U4FChQAcOjbcmcH5qR4enoSHx+f7BqSUrNmTSIjI/Hw8KBMmTIOj/z58yfrM0qVKkW2bNnYsmWLfdvFixc5dOhQqtcrIimnFh4RSWTSpEk0aNCAunXrMmzYMKpWrcrNmzcJDw9n8uTJ7N+/P9mf9d5779GrVy8KFixIy5YtuXTpEuvXr6dv375kz56devXq8eGHH1KyZEmioqJ455137vl5gYGBXL58mYiICKpVq0aOHDnIkSNHksfGx8cnClBeXl6EhIRQv3592rVrx5gxYyhbtiynT59myZIltG/f3uG23N34+PgQFhbG66+/Tt68eSlYsCBDhw7Fzc3NYTRWYGAga9asoXPnznh5eSU7UIlI6lILj4gkUqpUKbZt28bDDz/Mq6++SuXKlWnRogURERFMnjzZqc8KCwtjwoQJfPbZZ1SqVInHHnuMw4cP2/d/9dVX3Lx5k1q1atG/f3+GDx9+z88LDg6mV69edOrUiQIFCjBmzJi7Hnv58mVq1Kjh8GjTpg02m42lS5fSuHFjevbsSdmyZencuTN//vkn/v7+yf5uH330EfXr1+exxx4jJCSEBg0aUKFCBby9ve3HDBs2jOPHj1O6dGl7i5aIpD+boZvGIiKp4sqVKxQtWpTx48fz7LPPWl2OiNxBt7RERB7Q9u3bOXDgAHXr1uXixYv2YeVt27a1uDIR+TcFHhGRFBg3bhwHDx7E09OTWrVqsXbtWvXTEcmAdEtLREREXJ46LYuIiIjLU+ARERERl6fAIyIiIi5PgUdERERcngKPiIiIuDwFHhEREXF5CjwiIiLi8hR4RERExOUp8IiIiIjL+z8ANdILegdk5gAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "x_coherence = []\n", - "Ls = range(2,100,5)\n", - "for L in Ls:\n", - " c=Circuit((L)*[('Gi' ,0)])\n", - " ErrorDict={'Gi' : {('H','Z'): 1}}\n", - " EndErrors = ErrorPropagator(c,ErrorDict,NonMarkovian=True)\n", - "\n", - " corr=np.eye(len(c))*.01\n", - " error = averaged_evolution(corr,EndErrors,1)\n", - "\n", - " x_coherence += [np.real(error[1,1])]\n", - "plt.plot(Ls,x_coherence, color='blue')\n", - "plt.ylim(0,1.1)\n", - "plt.xlabel('Circuit Length')\n", - "plt.ylabel('X Coherence Decay')\n", - "plt.title('White noise dephasing')" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]], [[('H', ('X',), (1+0j))]]]\n" - ] - } - ], - "source": [ - "list=[propagatableerrorgen('H',['X'],1)]\n", - "errors=ErrorPropagator(c,list,NonMarkovian=True,ErrorLayerDef=True)\n", - "print(errors)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "PyGSTi_EOC", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.4" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb b/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb new file mode 100644 index 000000000..4c021980d --- /dev/null +++ b/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb @@ -0,0 +1,764 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a2bf624d-6098-46e5-820e-e775e4fb41f0", + "metadata": {}, + "source": [ + "# Error Generator Propagation\n", + "In this tutorial we will provide an overview of the core functionality available through pyGSTi's error generator propagation module.\n", + "\n", + "Error generator propagation is a technique which leverages the analytical properties of the error generator formalism to enable efficient forward simulation by propagating general markovian error channels through Clifford circuits. Efficiency of this technique relies on two conditions:\n", + "\n", + "- Sparsity: At most a polynomial number of error generator rates (in the number of qubits) can be nonzero for any given circuit layer.\n", + "- Clifford-only: The propagation of error generators relies on the analytic properties of the elementary error generators when conjugated by cliffords.\n", + "\n", + "That is pretty much it though. Coherent errors, non-unital errors (e.g. amplitude damping), dephasing, all fair game. Practically there is a third requirement as well and that is that the error generator rates are relatively small. The larger the error generator rates, the higher-order the approximation you'll require (BCH and/or taylor series) to achieve a given precision target when using the functionality described herein for efficiently performing strong simulation in the error generator propagation framework. \n", + "\n", + "Please note: The implementation of the error generator propagation framework in pyGSTi requires the `stim` python package, so please ensure this is installed in your environment before proceeding." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "472bba68-9e69-4379-aa3c-7e062128ef7c", + "metadata": {}, + "outputs": [], + "source": [ + "import pygsti\n", + "import stim\n", + "from pygsti.tools import errgenproptools as eprop\n", + "from pygsti.tools.lindbladtools import random_error_generator_rates\n", + "from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator\n", + "from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE" + ] + }, + { + "cell_type": "markdown", + "id": "1a4682fa-f5e3-4a44-a596-e095f2cc4890", + "metadata": {}, + "source": [ + "To begin we need an error model, and particularly one parameterized using error generators (or otherwise capable of outputing error generators for a circuit layer). For this tutorial we'll work with a 4-qubit crosstalk-free model for a gate set consisting of $\\pi/2$ rotations about X and Y on each qubit, and a two-qubit CPHASE gate. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "ae464763-b211-4bf4-a85c-47465aa7187b", + "metadata": {}, + "outputs": [], + "source": [ + "num_qubits = 4\n", + "gate_names = ['Gcphase', 'Gxpi2', 'Gypi2']\n", + "availability = {'Gcphase':[(0,1), (1,2), (2,3), (3,0)]}\n", + "pspec = pygsti.processors.QubitProcessorSpec(num_qubits, gate_names, availability=availability)\n", + "target_model = pygsti.models.create_crosstalk_free_model(processor_spec = pspec)" + ] + }, + { + "cell_type": "markdown", + "id": "f24def6b-b268-4a62-bbe5-37d0c188c15b", + "metadata": {}, + "source": [ + "Now that we have a target model we'll also want a noisy model to simulate as well. For this example we'll randomly sample a weight-2 H+S (coherent + pauli stochastic) error model, but the error generator propagation framework can also handle C and A error generators as well (i.e. general lindbladian errors). \n", + "The specific specification we'll need for the model construction routine we're about to use is a dictionary whose keys are gate labels. Each value of this dictionary is itself a dictionary whose keys are elementary error generator labels, and whose values are error generator rates." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e205a715-c231-40a1-82bf-27cf303e8820", + "metadata": {}, + "outputs": [], + "source": [ + "qubit_labels = pspec.qubit_labels\n", + "error_rates_dict = {}\n", + "for gate, availability in pspec.availability.items():\n", + " n = pspec.gate_num_qubits(gate)\n", + " if availability == 'all-edges':\n", + " assert(n == 1), \"Currently require all 2-qubit gates have a specified availability!\"\n", + " qubits_for_gate = qubit_labels\n", + " else:\n", + " qubits_for_gate = availability \n", + " for qs in qubits_for_gate:\n", + " label = pygsti.baseobjs.Label(gate, qs)\n", + " # Sample error rates.\n", + " error_rates_dict[label] = random_error_generator_rates(num_qubits=n, errorgen_types=('H', 'S'), label_type='local', seed=1234)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5ebacf47-54ba-4cd1-b14c-26f1d0516d29", + "metadata": {}, + "outputs": [], + "source": [ + "error_model = pygsti.models.create_crosstalk_free_model(pspec, lindblad_error_coeffs=error_rates_dict)" + ] + }, + { + "cell_type": "markdown", + "id": "ab4dc617-6d7c-410c-a976-282a169c8bdf", + "metadata": {}, + "source": [ + "We'll also need an example circuit for the rest of our examples, so will construct one at random." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "4e88638b-cc35-49be-8976-ee658d3201a6", + "metadata": {}, + "outputs": [], + "source": [ + "c = pygsti.algorithms.randomcircuit.create_random_circuit(pspec, 3, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "145d7875-599a-447f-b2ac-b2fc702d6dda", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Qubit 0 ---|Gxpi2|-|Gxpi2|-|Gypi2|---\n", + "Qubit 1 ---|Gypi2|-|Gypi2|-|Gxpi2|---\n", + "Qubit 2 ---|Gypi2|-|Gypi2|-| C3 |---\n", + "Qubit 3 ---|Gypi2|-|Gxpi2|-| C2 |---\n", + "\n" + ] + } + ], + "source": [ + "print(c)" + ] + }, + { + "cell_type": "markdown", + "id": "192d74b6-3f36-499f-9db6-335a01b87c3f", + "metadata": {}, + "source": [ + "## Basic Propagation\n", + "In this section we'll introduce the basic syntax of the `ErrorGeneratorPropagator` class and usage of the basic error generator propagation functionality.\n", + "Our first step will be to create an instance of the `ErrorGeneratorPropagator` class. This is as simple as passing in our error model into the constructor." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c66532cd-876e-4002-89d2-b47eaafb5bf0", + "metadata": {}, + "outputs": [], + "source": [ + "errorgen_propagator = ErrorGeneratorPropagator(error_model)" + ] + }, + { + "cell_type": "markdown", + "id": "7fc2f197-6351-4c16-b351-7e539f1db839", + "metadata": {}, + "source": [ + "The very first thing we can do is propagate the error generators for each circuit layer to the end of the circuit. This is done using the `propagate_errorgens` method." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "90dc2734-2d17-4555-b32c-ac307967c21e", + "metadata": {}, + "outputs": [], + "source": [ + "propagated_errorgen_layers = errorgen_propagator.propagate_errorgens(c)" + ] + }, + { + "cell_type": "markdown", + "id": "044babe6-675d-4145-bb68-bc3e5e80efff", + "metadata": {}, + "source": [ + "The output of this method is a list of dictionaries, one for each original error generator layer in the circuit, containing an updated set of elementary error generator coefficients and rates corresponding to the result of propagating each error generator through the circuit. Note this list is returned in circuit ordering, so there is a one-to-one correspondence between the position an error generator appears in the original circuit and where it appears in this final list.\n", + "\n", + "So, to see the result of propagating the error generator corresponding to the noise induced after the first layer of gates to the very end we could query this list as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "b43a4a84-2795-440a-9db4-9d80cfceea6c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{(H, (stim.PauliString(\"+Z___\"),)): 0.016038368053963015, (H, (stim.PauliString(\"+X___\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+Y___\"),)): -0.007408912958767258, (S, (stim.PauliString(\"+Z___\"),)): 0.0011866037029552281, (S, (stim.PauliString(\"+X___\"),)): 0.001371201178398027, (S, (stim.PauliString(\"+Y___\"),)): 0.00034799818155795706, (H, (stim.PauliString(\"+_Y__\"),)): -0.016038368053963015, (H, (stim.PauliString(\"+_Z__\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+_X__\"),)): 0.007408912958767258, (S, (stim.PauliString(\"+_Y__\"),)): 0.0011866037029552281, (S, (stim.PauliString(\"+_Z__\"),)): 0.001371201178398027, (S, (stim.PauliString(\"+_X__\"),)): 0.00034799818155795706, (H, (stim.PauliString(\"+__Z_\"),)): 0.016038368053963015, (H, (stim.PauliString(\"+__YZ\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+__XZ\"),)): 0.007408912958767258, (S, (stim.PauliString(\"+__Z_\"),)): 0.0011866037029552281, (S, (stim.PauliString(\"+__YZ\"),)): 0.001371201178398027, (S, (stim.PauliString(\"+__XZ\"),)): 0.00034799818155795706, (H, (stim.PauliString(\"+__ZX\"),)): -0.016038368053963015, (H, (stim.PauliString(\"+___Z\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+__ZY\"),)): -0.007408912958767258, (S, (stim.PauliString(\"+__ZX\"),)): 0.0011866037029552281, (S, (stim.PauliString(\"+___Z\"),)): 0.001371201178398027, (S, (stim.PauliString(\"+__ZY\"),)): 0.00034799818155795706}\n" + ] + } + ], + "source": [ + "print(propagated_errorgen_layers[1])" + ] + }, + { + "cell_type": "markdown", + "id": "981a3cb7-a629-44cd-bb17-bc52dd74e9a2", + "metadata": {}, + "source": [ + "There are a few things worth noting at this point. First, we stated we'd be looking at the output of propagating the *first* circuit layer to the end but we indexed into the *second* position of the final list, what gives? This is because by default the `propagate_errorgens` method prepends and appends the error generator layers corresponding to state preparation and measurement respectively *before* beginning the propagation. As such the first layer in the final output corresponds to the error generator associated with state prep, and the final one with measurement. We never actually specified error generator rates for the SPAM, so you'll notice the corresponding dictionaries in the final output are both empty in this case." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "a7710d39-9574-41aa-a922-c95f7bb8225f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{}\n", + "{}\n" + ] + } + ], + "source": [ + "print(propagated_errorgen_layers[0])\n", + "print(propagated_errorgen_layers[-1])" + ] + }, + { + "cell_type": "markdown", + "id": "85b38f4f-9797-416b-b04d-395caed0438b", + "metadata": {}, + "source": [ + "To change this behavior so that the SPAM layers are not included you can set the optional kwarg `include_spam` to `FALSE` in `propgate_errorgens` and other related methods." + ] + }, + { + "cell_type": "markdown", + "id": "1076dfa4-da39-4c3d-9466-0de50c415521", + "metadata": {}, + "source": [ + "The next things worth noting are the keys of the final dictionary. Notice that the basis element labels for each of the elementary error generator coefficient labels are instances of `stim.PauliString`, very much unlike the other elementary error generator labels used in pyGSTi. These labels are instances of the class `LocalStimErrorgenLabel`, a specialized label class with additional metadata and methods used throughout the error generator propagation framework. For applications where you need to take the output of this module and utilize it elsewhere in pyGSTi you can utilize the `to_local_eel` and `to_global_eel` methods of the `LocalStimErrorgenLabel` class to convert these into instances of `LocalElementaryErrorgenLabel` and `GlobalElementaryErrorgenLabel`, respectively, for use within other parts of pyGSTi." + ] + }, + { + "cell_type": "markdown", + "id": "af5abf19-7c96-4c06-aa41-4a2b47890205", + "metadata": {}, + "source": [ + "While the output of `propgate_errorgens` is in and of itself incredibly useful, often we want to know more about how specific errors have been transformed by propagation through the circuit. Fortunately the analytic structure of error generator propagation through a clifford operation is such that it acts as a generalized permutation of each elementary error generator within it's sector (i.e. propagation can't in and of itself map H errors to anything other than H errors, for example). To view the input-output corresponding to the transformation of each error generator we can use the `errorgen_transform_map` method." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "b4b3ab03-399f-43dd-99d0-957bbf219ad5", + "metadata": {}, + "outputs": [], + "source": [ + "errorgen_transform_map = errorgen_propagator.errorgen_transform_map(c)" + ] + }, + { + "cell_type": "markdown", + "id": "eeddc38f-78cc-46e9-ae36-c506a352c7a5", + "metadata": {}, + "source": [ + "This method returns a dictionary with the following structure: Keys are tuples of the form (, ), and values are of the form (, ), where overall_phase corresponds to the overall sign accumulated on the final error generator rate as a result of propagation. So, for example, we can see that as a result of propagation through the circuit the H(XIII) error generator at circuit layer 1 is mapped to an H(ZIII) error generator accruing and overall phase of -1.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "d5600efa-93a4-4100-93f7-43173c83f948", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "((H, (stim.PauliString(\"+Z___\"),)), -1.0)\n" + ] + } + ], + "source": [ + "print(errorgen_transform_map[(_LSE('H', [stim.PauliString('XIII')]), 1)])" + ] + }, + { + "cell_type": "markdown", + "id": "16b35551-56e1-4cb9-ab69-9e0bb71205d6", + "metadata": {}, + "source": [ + "For some purposes it can be useful to go another step further and identity which gate a particular error might be associated with in the original error model. For this purpose `ErrorGeneratorPropagator` has a helper method available called `errorgen_gate_contributors`." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "432a7f5d-d27b-4f12-a262-810609318cf3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Label(('Gxpi2', 0))]\n" + ] + } + ], + "source": [ + "print(errorgen_propagator.errorgen_gate_contributors(_LSE('H', [stim.PauliString('XIII')]), c, layer_idx=1))" + ] + }, + { + "cell_type": "markdown", + "id": "bd1e42b3-2309-4c67-9d78-67ac5510bbb9", + "metadata": {}, + "source": [ + "Here this method returns the fact that in our particular error model the only gate at layer index 1 which could have contributed this particular error generator was the 'Gxpi2' gate acting on qubit 0. In some error models it may be possible for multiple gates to contribute to a particular rate, in which case this method should return all such gates." + ] + }, + { + "cell_type": "markdown", + "id": "90c2be36-4333-40b7-b3a4-742bc60623b1", + "metadata": {}, + "source": [ + "## BCH Approximation\n", + "In the previous section we showed how to use the `ErrorGeneratorPropagator` class to transform a circuit with a series of post-gate error generators into an equivalent representation of this noisy circuit with instead a series of post-circuit error generator layers. What if we want a single effective end-of-circuit error generator which approximates the overall action of the composition of each of the propagated error generators? To do so the `ErrorGeneratorPropagator` class supports the option to iteratively apply the BCH approximation at various orders to perform this recombination.\n", + "\n", + "The main method for performing propagation together with the iterative application of the BCH approximation is called `propagate_errorgens_bch`." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "6ee0db5d-972c-4367-96da-f390fe1ac56e", + "metadata": {}, + "outputs": [], + "source": [ + "propagated_errorgen_layer_first_order = errorgen_propagator.propagate_errorgens_bch(c)" + ] + }, + { + "cell_type": "markdown", + "id": "5c5f5c55-fbc1-4371-8f7e-d274c6c361bc", + "metadata": {}, + "source": [ + "As before this method propagated all of a circuits error generator layers to the very end, but follows this up with an iterative application of the BCH approximation resulting in a single final error generator. Without any additional optional arguments specified this uses the first-order BCH approximation." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "b0d58c38-f3ce-494d-8daa-cbb5ef8d66b0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{(H, (stim.PauliString(\"+___X\"),)): -0.016038368053963015, (H, (stim.PauliString(\"+___Y\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+___Z\"),)): 0.015458825057572158, (H, (stim.PauliString(\"+__X_\"),)): 0.0015261919356565306, (H, (stim.PauliString(\"+__XX\"),)): 0.008637438913233318, (H, (stim.PauliString(\"+__XY\"),)): 0.02913099222503971, (H, (stim.PauliString(\"+__XZ\"),)): -0.023417688701839773, (H, (stim.PauliString(\"+__Y_\"),)): 0.009454729746458598, (H, (stim.PauliString(\"+__YX\"),)): -0.016661354573179642, (H, (stim.PauliString(\"+__YY\"),)): 0.0034374458145267966, (H, (stim.PauliString(\"+__YZ\"),)): -0.003842438812773296, (H, (stim.PauliString(\"+__Z_\"),)): 0.036684870579615995, (H, (stim.PauliString(\"+__ZX\"),)): -0.04067953804377626, (H, (stim.PauliString(\"+__ZY\"),)): -0.0015729818285460167, (H, (stim.PauliString(\"+__ZZ\"),)): -0.012651437175495219, (S, (stim.PauliString(\"+___X\"),)): 0.0024143330497938234, (S, (stim.PauliString(\"+___Y\"),)): 0.003683612869525075, (S, (stim.PauliString(\"+___Z\"),)): 0.005101920188444729, (S, (stim.PauliString(\"+__X_\"),)): 0.002661734791363736, (S, (stim.PauliString(\"+__XX\"),)): 0.003737168273719153, (S, (stim.PauliString(\"+__XY\"),)): 0.003159101454087135, (S, (stim.PauliString(\"+__XZ\"),)): 0.0056859632815692025, (S, (stim.PauliString(\"+__Y_\"),)): 0.00254938052108778, (S, (stim.PauliString(\"+__YX\"),)): 0.0030131230078306983, (S, (stim.PauliString(\"+__YY\"),)): 0.002275601598622893, (S, (stim.PauliString(\"+__YZ\"),)): 0.005999188119143293, (S, (stim.PauliString(\"+__Z_\"),)): 0.005452070821520289, (S, (stim.PauliString(\"+__ZX\"),)): 0.004522281466412299, (S, (stim.PauliString(\"+__ZY\"),)): 0.004436402092067647, (S, (stim.PauliString(\"+__ZZ\"),)): 0.00272331772024521, (H, (stim.PauliString(\"+X___\"),)): -0.007988455955158116, (H, (stim.PauliString(\"+Y___\"),)): -0.0061269146786919765, (H, (stim.PauliString(\"+Z___\"),)): 0.03948564906669329, (S, (stim.PauliString(\"+X___\"),)): 0.002905803062911212, (S, (stim.PauliString(\"+Y___\"),)): 0.003090400538354011, (S, (stim.PauliString(\"+Z___\"),)): 0.0027212055874684133, (H, (stim.PauliString(\"+_X__\"),)): -0.024667823149158774, (H, (stim.PauliString(\"+_Y__\"),)): -0.02280628187269263, (H, (stim.PauliString(\"+_Z__\"),)): 0.00869091123884254, (S, (stim.PauliString(\"+_X__\"),)): 0.0027212055874684133, (S, (stim.PauliString(\"+_Y__\"),)): 0.002905803062911212, (S, (stim.PauliString(\"+_Z__\"),)): 0.003090400538354011}\n" + ] + } + ], + "source": [ + "print(propagated_errorgen_layer_first_order)" + ] + }, + { + "cell_type": "markdown", + "id": "e7b85f44-fe5e-4d2e-9555-8cae50e8f03b", + "metadata": {}, + "source": [ + "This method supports a number of additional arguments beyond those already for `propagate_errorgens`:\n", + "- `bch_order`: An integer from 1 to 5 specifying the order of the BCH approximation to apply (5 is the current maximum). Note that the computational cost of higher order BCH can scale rapidly, so keep this in mind when balancing the need for accuracy and speed of computation.\n", + "- `truncation_threshold`: This argument allows you to specify a minimum threshold (in terms of error generator rate) below which rates are truncated to zero. This can improve performance by allowing one to skip the computation of terms corresponding to very small corrections.\n", + "Some interesting emergent behavior starts to occur when we begin to look at higher-order BCH corrections." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "3b88d003-76d2-4464-9a77-3c4ce6e63745", + "metadata": {}, + "outputs": [], + "source": [ + "propagated_errorgen_layer_second_order = errorgen_propagator.propagate_errorgens_bch(c, bch_order=2)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "2a60a527-051b-47e3-bbdc-41492bdeb2f1", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{(H, (stim.PauliString(\"+___X\"),)): -0.016366628760355446, (H, (stim.PauliString(\"+___Y\"),)): 0.0012701883102106228, (H, (stim.PauliString(\"+___Z\"),)): 0.015811660858690683, (H, (stim.PauliString(\"+__X_\"),)): 0.0022810144249310817, (H, (stim.PauliString(\"+__XX\"),)): 0.00817058193708496, (H, (stim.PauliString(\"+__XY\"),)): 0.028939475463994727, (H, (stim.PauliString(\"+__XZ\"),)): -0.023550875758712682, (H, (stim.PauliString(\"+__Y_\"),)): 0.009994656773212925, (H, (stim.PauliString(\"+__YX\"),)): -0.016779562020536862, (H, (stim.PauliString(\"+__YY\"),)): 0.002926947172357588, (H, (stim.PauliString(\"+__YZ\"),)): -0.003297737298653086, (H, (stim.PauliString(\"+__Z_\"),)): 0.03660740203498932, (H, (stim.PauliString(\"+__ZX\"),)): -0.040531712484628495, (H, (stim.PauliString(\"+__ZY\"),)): -0.001851200491716163, (H, (stim.PauliString(\"+__ZZ\"),)): -0.012430018460989222, (S, (stim.PauliString(\"+___X\"),)): 0.0024141650598773568, (S, (stim.PauliString(\"+___Y\"),)): 0.0036830935030083747, (S, (stim.PauliString(\"+___Z\"),)): 0.005101371725612574, (S, (stim.PauliString(\"+__X_\"),)): 0.0026618925827871353, (S, (stim.PauliString(\"+__XX\"),)): 0.0037363923711846714, (S, (stim.PauliString(\"+__XY\"),)): 0.00315907976589997, (S, (stim.PauliString(\"+__XZ\"),)): 0.005685666597345913, (S, (stim.PauliString(\"+__Y_\"),)): 0.0025499711619194954, (S, (stim.PauliString(\"+__YX\"),)): 0.0030131078510913186, (S, (stim.PauliString(\"+__YY\"),)): 0.00227581501723727, (S, (stim.PauliString(\"+__YZ\"),)): 0.005999097874164904, (S, (stim.PauliString(\"+__Z_\"),)): 0.005452457750721968, (S, (stim.PauliString(\"+__ZX\"),)): 0.004522208837994344, (S, (stim.PauliString(\"+__ZY\"),)): 0.004437023183317757, (S, (stim.PauliString(\"+__ZZ\"),)): 0.0027238559732699116, (H, (stim.PauliString(\"+X___\"),)): -0.007783741055630419, (H, (stim.PauliString(\"+Y___\"),)): -0.005661459922019788, (H, (stim.PauliString(\"+Z___\"),)): 0.03953348672258176, (S, (stim.PauliString(\"+X___\"),)): 0.002905670793135387, (S, (stim.PauliString(\"+Y___\"),)): 0.0030905352244997224, (S, (stim.PauliString(\"+Z___\"),)): 0.002721203171098527, (H, (stim.PauliString(\"+_X__\"),)): -0.024485756015831597, (H, (stim.PauliString(\"+_Y__\"),)): -0.022833711046606444, (H, (stim.PauliString(\"+_Z__\"),)): 0.00938453200856714, (S, (stim.PauliString(\"+_X__\"),)): 0.002721018202212471, (S, (stim.PauliString(\"+_Y__\"),)): 0.002906281282011878, (S, (stim.PauliString(\"+_Z__\"),)): 0.003090109704509288, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__XZ\"))): 2.4623724334019127e-05, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__YZ\"))): 4.465348357724853e-05, (C, (stim.PauliString(\"+__ZY\"), stim.PauliString(\"+__ZZ\"))): -2.755957352422829e-05, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+___Z\"))): 2.8228349310421213e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__XZ\"))): 1.328318580782737e-06, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__YZ\"))): 2.688338229602585e-06, (C, (stim.PauliString(\"+__ZX\"), stim.PauliString(\"+__ZZ\"))): -2.2716555974974645e-06, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+___Z\"))): 2.01101914818271e-06, (C, (stim.PauliString(\"+__ZX\"), stim.PauliString(\"+__ZY\"))): 1.5753859413168797e-05, (C, (stim.PauliString(\"+__YZ\"), stim.PauliString(\"+__ZZ\"))): 5.184585649852202e-06, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__Z_\"))): -2.901062901596097e-06, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__ZX\"))): -4.532193667972443e-06, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZY\"))): -3.0219041584913162e-06, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__XZ\"))): -1.271227712663107e-05, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__Z_\"))): -1.3039770276036584e-05, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__ZX\"))): -1.9950145993005412e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__XY\"))): -1.4461367957463231e-05, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__XZ\"))): 4.4730567874280694e-05, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__Z_\"))): -4.4516539621792385e-05, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__ZY\"))): -5.0117079718570586e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__XX\"))): 4.966024254781249e-05, (C, (stim.PauliString(\"+__YZ\"), stim.PauliString(\"+__Z_\"))): -3.418975273080511e-05, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__ZZ\"))): -1.4893329106399474e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__Z_\"))): 1.4865094771659637e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZX\"))): 2.2906831593528624e-05, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZY\"))): 1.6545014507346572e-05, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__YZ\"))): 4.583807045912111e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__Z_\"))): -2.5623164611721798e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__ZX\"))): -3.96888593302071e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__YY\"))): 2.8676625088533037e-05, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__YZ\"))): 8.802782842580865e-06, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__Z_\"))): 5.033783897252847e-06, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__ZY\"))): 5.2823720213310345e-06, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__YX\"))): 5.653851705277768e-06, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__Z_\"))): 9.433585902582486e-07, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YZ\"))): -4.144067801371556e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__ZY\"))): 1.1421088209354261e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__ZX\"))): -1.1202206170765282e-06, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__Y_\"))): -1.929138988780776e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__YZ\"))): 3.4332066193646426e-05, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__ZX\"))): -2.9871798204252534e-05, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__ZY\"))): 2.162542939305882e-05, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__XY\"))): -6.432567576172542e-06, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__YY\"))): -3.477433017513346e-08, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__ZZ\"))): 2.251878563983956e-06, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+___Y\"))): 1.0334876388949038e-05, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__XX\"))): 4.7248554866870034e-07, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__YX\"))): 8.855351194403839e-07, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__ZZ\"))): 3.073358774802754e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__ZZ\"))): -1.1752359454690021e-07, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__Y_\"))): -2.6329064367355074e-06, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__YX\"))): -1.123106172668559e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__YY\"))): 2.7045660084993054e-06, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YX\"))): -1.6973158006614298e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__Y_\"))): 3.8068372532514665e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__XY\"))): -4.7882599046936274e-06, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YY\"))): -2.0707353884034883e-05, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__Y_\"))): 4.045637708335117e-06, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__ZZ\"))): -1.500883723711078e-06, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__YY\"))): -5.982706551279516e-06, (C, (stim.PauliString(\"+Y___\"), stim.PauliString(\"+Z___\"))): -4.066605487703408e-06, (C, (stim.PauliString(\"+X___\"), stim.PauliString(\"+Z___\"))): 6.147140455456534e-07, (C, (stim.PauliString(\"+X___\"), stim.PauliString(\"+Y___\"))): 3.8649506941318813e-05, (C, (stim.PauliString(\"+_Y__\"), stim.PauliString(\"+_Z__\"))): 3.8634448389020015e-05, (C, (stim.PauliString(\"+_X__\"), stim.PauliString(\"+_Z__\"))): -2.364593599604513e-05, (C, (stim.PauliString(\"+_X__\"), stim.PauliString(\"+_Y__\"))): -4.6546798822731776e-07, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YY\"))): 3.182463873575446e-08, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YZ\"))): 2.1425205471734596e-08, (A, (stim.PauliString(\"+__Z_\"), stim.PauliString(\"+__ZX\"))): -1.6511901898579762e-09, (A, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XX\"))): 9.770462482071315e-09, (A, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YX\"))): -2.3532403638316404e-08, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__ZZ\"))): -2.592151120917717e-07, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__ZY\"))): 2.617007349379505e-07, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YX\"))): -2.7452465733429826e-09, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YZ\"))): 1.2820453148221837e-07, (A, (stim.PauliString(\"+__Z_\"), stim.PauliString(\"+__ZY\"))): 4.117751923769017e-11, (A, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YY\"))): 3.4383444473268805e-10, (A, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XY\"))): -9.653252578904504e-10, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__ZX\"))): -2.485622846178766e-09, (C, (stim.PauliString(\"+__Z_\"), stim.PauliString(\"+__ZZ\"))): -1.6652135968731107e-08, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YZ\"))): -2.5886590148089146e-08, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__XZ\"))): -9.804193775862733e-11, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__XY\"))): -2.2983987525965355e-09, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__XX\"))): -8.186553210058193e-10, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZX\"))): -1.0370017316073812e-07, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__ZY\"))): 5.62296366051593e-08, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__Z_\"))): -1.3094977805556137e-07, (A, (stim.PauliString(\"+__YZ\"), stim.PauliString(\"+__ZY\"))): -4.412670990729238e-09, (A, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZZ\"))): -8.040184466997569e-09, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__X_\"))): 6.287118243550577e-10, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZX\"))): 5.478449762312655e-08, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__Y_\"))): -4.2438858069880333e-07, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__Z_\"))): -1.0399433264019869e-08, (A, (stim.PauliString(\"+__YZ\"), stim.PauliString(\"+__ZX\"))): 1.3754615244774694e-08, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZY\"))): 4.791730783219299e-07, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XZ\"))): 4.253872611682026e-08, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__YZ\"))): 4.83020627948673e-09, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__YY\"))): -6.882017282429429e-10, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__YX\"))): 8.800961953047258e-09, (A, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__ZY\"))): 1.0703185162195312e-09, (A, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZZ\"))): -3.938799776847555e-08, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__Y_\"))): 8.937215731408386e-09, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__X_\"))): 1.5992980976589743e-07, (A, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__ZX\"))): 3.5389366224904316e-10, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__Y_\"))): -2.094068127878404e-09, (A, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZZ\"))): -5.04361554243656e-09, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__Z_\"))): 8.127230836031873e-10, (A, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YY\"))): 6.7862803623284764e-09, (A, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YX\"))): -2.9908419333357734e-08, (A, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZX\"))): -2.204992806438256e-09, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__XY\"))): -3.083598730536217e-08, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__XX\"))): 3.083598730536217e-08, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZZ\"))): 1.1352994504609885e-07, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__Y_\"))): -1.1352994504609885e-07, (A, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZX\"))): -7.457445591587116e-11, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__YY\"))): -2.743866343309294e-09, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__YX\"))): 2.743866343309294e-09, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZZ\"))): 3.346877975963636e-10, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__X_\"))): 3.346877975963636e-10, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__Z_\"))): 3.656117441390804e-11, (A, (stim.PauliString(\"+__Z_\"), stim.PauliString(\"+__ZZ\"))): 7.886232657898631e-09, (A, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__ZY\"))): 1.5855839253262436e-10, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZZ\"))): 3.1485517035998804e-08, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__Y_\"))): 3.1485517035998804e-08, (A, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZY\"))): 3.604187530004226e-10, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__ZZ\"))): 1.2519833029231625e-08, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__X_\"))): -1.2519833029231625e-08, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__Z_\"))): 1.575892955605348e-08, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__Y_\"))): -3.310362125011788e-11, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__ZZ\"))): 8.517377174682687e-10, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YX\"))): -3.1453316663726613e-07, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XX\"))): 3.1453316663726613e-07, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YY\"))): -4.4426457799417374e-08, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XY\"))): 4.4426457799417374e-08, (A, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YY\"))): -5.766458525051134e-09, (A, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YX\"))): 4.5689913956486385e-11, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__ZY\"))): 5.4888582391826885e-09, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YX\"))): -1.970475713209035e-08, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YY\"))): -1.970475713209035e-08, (A, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YZ\"))): -1.9492198346828996e-08, (A, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YZ\"))): -3.5916189326911846e-09, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__ZX\"))): 6.697655090709081e-09, (A, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YZ\"))): -7.214634092569796e-11, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__X_\"))): -5.149658211911818e-10, (A, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XZ\"))): -4.582335063379096e-09}\n" + ] + } + ], + "source": [ + "print(propagated_errorgen_layer_second_order)" + ] + }, + { + "cell_type": "markdown", + "id": "90409574-98b3-4fec-9b05-9c5adc9764af", + "metadata": {}, + "source": [ + "Aside from the fact that there are now significantly more terms than was found for the first-order BCH approximation, notice that there are also now emergent second (and higher) order contributions due to C and A error generators which arise from the composition of purely H and S error generators. These additional terms arise from the non-commutivity of the elementary error generators, particularly the non-commutivity of H and S elementary error generators. For more on this phenomenon see [insert paper reference here]." + ] + }, + { + "cell_type": "markdown", + "id": "1e818c20-a1a7-4b1c-a068-e0ba548614f8", + "metadata": {}, + "source": [ + "## Approximate Probabilities and Expectation Values\n", + "Now you have an efficient representation for an approximation to the effective end-of-circuit error generator for your circuit, what can you do with it? In this section we show how to use this sparse representation to efficiently compute corrections to the outcome probability distributions and pauli observable expectation values of noisy clifford circuits." + ] + }, + { + "cell_type": "markdown", + "id": "405b95e6-15fc-4a2e-84d5-766a0573ee00", + "metadata": {}, + "source": [ + "We'll start off by demonstrating how to perform strong simulation using the results of error generator propagation to estimate the output probabilities for a desired computational basis state. \n", + "\n", + "To do so we'll be making use of the function `approximate_stabilizer_probability` from the `errgenproptools` module. This function takes as input the following arguments:\n", + "\n", + "- errorgen_dict : A dictionary of elementary error generator coefficients and their corresponding rates (as outputted, for example, by `propagate_errorgens_bch`.\n", + "- circuit : The circuit to compute the output probability for. This can by a pyGSTi `Circuit` object, or alternatively a `stim.Tableau`.\n", + "- desired_bitstring : A string corresponding to the desired computational basis state.\n", + "- order : Order of the taylor series approximation for the exponentiated error generator to use in computing the approximate output probability. In principle this function can compute arbitary-order approximation (but practically the cost of the computation scales in the order).\n", + "- truncation_threshold : As described above, this is a minimum value below which contributions are truncated to zero which can sometimes improve performance by reducing the number of terms computed with very small overall corrections to the calculated probability. \n", + "\n", + "Let's use the results of the application of the second-order BCH approximation above and compute the approximate probability of reading out the all-zeros state from our circuit. For the ideal circuit, the probability of observing the all-zeros state is 0." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "4cc9e0a0-2da5-40fc-8556-4d3272f1b1be", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.0036337950714817775\n" + ] + } + ], + "source": [ + "first_order_approximate_prob = eprop.approximate_stabilizer_probability(propagated_errorgen_layer_second_order, c, '0000', order=1)\n", + "print(first_order_approximate_prob)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "ce153eee-90c3-4d8e-b25e-8ec849cde6c0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.003403571491328329\n" + ] + } + ], + "source": [ + "second_order_approximate_prob = eprop.approximate_stabilizer_probability(propagated_errorgen_layer_second_order, c, '0000', order=2)\n", + "print(second_order_approximate_prob)" + ] + }, + { + "cell_type": "markdown", + "id": "091dea97-7421-45f3-a1ab-0d17f90d1e88", + "metadata": {}, + "source": [ + "In this few qubit test case we also have the luxury compare this to the results of the (effectively) exact forward simulation for the error model:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "2978aee0-7447-452e-85b4-41bbbb79f738", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.0034166543832386833\n" + ] + } + ], + "source": [ + "exact_probability = error_model.sim.probs(c)['0000']\n", + "print(exact_probability)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "5fb5fb21-bc96-446b-859c-2252c385c55a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): 0.00021714068824309424\n", + "Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): 1.3082891910354364e-05\n" + ] + } + ], + "source": [ + "print(f'Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): {abs(exact_probability-first_order_approximate_prob)}')\n", + "print(f'Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): {abs(exact_probability-second_order_approximate_prob)}')" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "87e43f33-5b0a-436a-8048-7836879fd205", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Relative Error Approx to Exact (First-order taylor, Second-order BCH): 6.355360065341588%\n", + "Relative Error Approx to Exact (Second-order taylor, Second-order BCH): 0.38291528620910587%\n" + ] + } + ], + "source": [ + "print(f'Relative Error Approx to Exact (First-order taylor, Second-order BCH): {100*abs(exact_probability-first_order_approximate_prob)/exact_probability}%')\n", + "print(f'Relative Error Approx to Exact (Second-order taylor, Second-order BCH): {100*abs(exact_probability-second_order_approximate_prob)/exact_probability}%')" + ] + }, + { + "cell_type": "markdown", + "id": "6bc34c95-9553-4c82-a80c-539c801b9029", + "metadata": {}, + "source": [ + "Here we can see that with the combination of second-order BCH and second-order taylor approximations our estimated probability is accurate to well below a 1 percent relative error. By going out to higher-order in either approximation one can achieve even higher levels of accuracy." + ] + }, + { + "cell_type": "markdown", + "id": "f858f5fd-2bb0-4e38-bab6-9b061aa5a273", + "metadata": {}, + "source": [ + "In addition to strong simulation of the output probabilities of computational basis states, it is also possible to compute approximate values for the expectation values of pauli observables. The main function for doing so is `approximate_stabilizer_pauli_expectation` from the `errgenproptools` module, the signature of which is nearly identical to that of `approximate_stabilizer_probability` described above, except taking instead a desired pauli observable to estimate the expectation value for. Here we'll again use the results of the second-order BCH approximation produced above and look are various order of the taylor series approximation for the pauli expectation value of 'XYZI' (the value for the ideal noise-free circuit is 1)." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "fa7c8e66-de23-41b5-8ec8-401d6e3c1bf7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.9185921793543655\n" + ] + } + ], + "source": [ + "first_order_approximate_pauli_expectation = eprop.approximate_stabilizer_pauli_expectation(propagated_errorgen_layer_second_order, c, 'XYZI', order=1)\n", + "print(first_order_approximate_pauli_expectation)" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "4b6bd6fb-f7e6-482a-97c4-20bcba5749a6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.9129083343657568\n" + ] + } + ], + "source": [ + "second_order_approximate_pauli_expectation = eprop.approximate_stabilizer_pauli_expectation(propagated_errorgen_layer_second_order, c, 'XYZI', order=2)\n", + "print(second_order_approximate_pauli_expectation)" + ] + }, + { + "cell_type": "markdown", + "id": "1c1efde8-7cf4-430a-94a6-376bdf991e67", + "metadata": {}, + "source": [ + "There aren't existing built-in functions in pyGSTi for outputing exact pauli expectation values handy, but we can write a short helper function for computing these for the sake of comparison with our above results." + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "17731ccb-2cd7-4d7e-8ed8-f21b199d95a3", + "metadata": {}, + "outputs": [], + "source": [ + "from pygsti.tools.basistools import change_basis\n", + "import numpy as np\n", + "from pygsti.baseobjs import Label\n", + "def pauli_expectation_exact(error_propagator, target_model, circuit, pauli):\n", + " #get the eoc error channel, and the process matrix for the ideal circuit:\n", + " eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True)\n", + " ideal_channel = target_model.sim.product(circuit)\n", + " #also get the ideal state prep and povm:\n", + " ideal_prep = target_model.circuit_layer_operator(Label('rho0'), typ='prep').copy()\n", + " \n", + " #finally need the superoperator for the selected pauli.\n", + " pauli_unitary = pauli.to_unitary_matrix(endian='big')\n", + " #flatten this row-wise\n", + " pauli_vec = np.ravel(pauli_unitary)\n", + " pauli_vec.reshape((len(pauli_vec),1))\n", + " #put this in pp basis (since these are paulis themselves I could just read this off directly).\n", + " pauli_vec = change_basis(pauli_vec, 'std', 'pp')\n", + " #print(pauli_vec)\n", + " dense_prep = ideal_prep.to_dense().copy()\n", + " expectation = np.linalg.multi_dot([pauli_vec.reshape((1,len(pauli_vec))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]).item()\n", + " return expectation" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "3ca1ba09-e416-475d-bb57-58eb28db08a0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.9136050958830395\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\ciostro\\Documents\\pyGSTi_wildcard_integration\\pygsti\\forwardsims\\mapforwardsim.py:732: UserWarning: Generating dense process matrix representations of circuits or gates \n", + "can be inefficient and should be avoided for the purposes of forward \n", + "simulation/calculation of circuit outcome probability distributions \n", + "when using the MapForwardSimulator.\n", + " _warnings.warn('Generating dense process matrix representations of circuits or gates \\n'\n" + ] + } + ], + "source": [ + "exact_pauli_expectation = pauli_expectation_exact(errorgen_propagator, target_model, c, stim.PauliString('XYZI'))\n", + "print(exact_pauli_expectation)" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "407e0d72-ced0-4105-8a4f-463085406f18", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): 0.004987083471326037\n", + "Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): 0.0006967615172827069\n" + ] + } + ], + "source": [ + "print(f'Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): {abs(exact_pauli_expectation-first_order_approximate_pauli_expectation)}')\n", + "print(f'Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): {abs(exact_pauli_expectation-second_order_approximate_pauli_expectation)}')" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "cf63b01d-244c-4943-b477-576b1be496f0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Relative Error Approx to Exact (First-order taylor, Second-order BCH): 0.5458686136711838%\n", + "Relative Error Approx to Exact (Second-order taylor, Second-order BCH): 0.07626506467865705%\n" + ] + } + ], + "source": [ + "print(f'Relative Error Approx to Exact (First-order taylor, Second-order BCH): {100*abs(exact_pauli_expectation-first_order_approximate_pauli_expectation)/exact_pauli_expectation}%')\n", + "print(f'Relative Error Approx to Exact (Second-order taylor, Second-order BCH): {100*abs(exact_pauli_expectation-second_order_approximate_pauli_expectation)/exact_pauli_expectation}%')" + ] + }, + { + "cell_type": "markdown", + "id": "da73f141-80a7-4383-94ae-a530273f3e3d", + "metadata": {}, + "source": [ + "In this case even with the first-order taylor approximation together with the second-order BCH approximation the relative error to the exact expecation value is roughly half a percent, dropping to below a tenth of a percent when we go up to the second order taylor approximation. As before, by going out to higher-order in either approximation one can achieve even higher levels of accuracy." + ] + }, + { + "cell_type": "markdown", + "id": "5a4f38cd-4625-4ecf-9f69-3046d4a45ebd", + "metadata": {}, + "source": [ + "## Other Helpful Utilities:\n", + "In this section we'll highlight a few additional utilities within the error generator propagation related modules which are often useful." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a72ab84c-e489-4b9e-90f2-246689872336", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "wildcard_integration", + "language": "python", + "name": "wildcard_integration" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 4a3506e9220b49d1ec7f1aabfbad808fe51e5249 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 16 Feb 2025 17:28:18 -0700 Subject: [PATCH 094/102] Finish errorgen propagation tutorial This commit corresponds to the completion of a tutorial for the error generator propagation module. Plus a small bug fix caught by the unit tests related to an errant kwarg. --- .../ErrorGeneratorPropagation.ipynb | 434 +++++++++--------- pygsti/tools/errgenproptools.py | 2 +- 2 files changed, 230 insertions(+), 206 deletions(-) diff --git a/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb b/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb index 4c021980d..409443770 100644 --- a/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb +++ b/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb @@ -20,7 +20,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "472bba68-9e69-4379-aa3c-7e062128ef7c", "metadata": {}, "outputs": [], @@ -43,7 +43,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "ae464763-b211-4bf4-a85c-47465aa7187b", "metadata": {}, "outputs": [], @@ -66,7 +66,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "e205a715-c231-40a1-82bf-27cf303e8820", "metadata": {}, "outputs": [], @@ -88,7 +88,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "5ebacf47-54ba-4cd1-b14c-26f1d0516d29", "metadata": {}, "outputs": [], @@ -106,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "4e88638b-cc35-49be-8976-ee658d3201a6", "metadata": {}, "outputs": [], @@ -116,22 +116,10 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "145d7875-599a-447f-b2ac-b2fc702d6dda", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Qubit 0 ---|Gxpi2|-|Gxpi2|-|Gypi2|---\n", - "Qubit 1 ---|Gypi2|-|Gypi2|-|Gxpi2|---\n", - "Qubit 2 ---|Gypi2|-|Gypi2|-| C3 |---\n", - "Qubit 3 ---|Gypi2|-|Gxpi2|-| C2 |---\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(c)" ] @@ -148,7 +136,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "c66532cd-876e-4002-89d2-b47eaafb5bf0", "metadata": {}, "outputs": [], @@ -166,7 +154,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "90dc2734-2d17-4555-b32c-ac307967c21e", "metadata": {}, "outputs": [], @@ -186,18 +174,10 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "b43a4a84-2795-440a-9db4-9d80cfceea6c", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{(H, (stim.PauliString(\"+Z___\"),)): 0.016038368053963015, (H, (stim.PauliString(\"+X___\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+Y___\"),)): -0.007408912958767258, (S, (stim.PauliString(\"+Z___\"),)): 0.0011866037029552281, (S, (stim.PauliString(\"+X___\"),)): 0.001371201178398027, (S, (stim.PauliString(\"+Y___\"),)): 0.00034799818155795706, (H, (stim.PauliString(\"+_Y__\"),)): -0.016038368053963015, (H, (stim.PauliString(\"+_Z__\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+_X__\"),)): 0.007408912958767258, (S, (stim.PauliString(\"+_Y__\"),)): 0.0011866037029552281, (S, (stim.PauliString(\"+_Z__\"),)): 0.001371201178398027, (S, (stim.PauliString(\"+_X__\"),)): 0.00034799818155795706, (H, (stim.PauliString(\"+__Z_\"),)): 0.016038368053963015, (H, (stim.PauliString(\"+__YZ\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+__XZ\"),)): 0.007408912958767258, (S, (stim.PauliString(\"+__Z_\"),)): 0.0011866037029552281, (S, (stim.PauliString(\"+__YZ\"),)): 0.001371201178398027, (S, (stim.PauliString(\"+__XZ\"),)): 0.00034799818155795706, (H, (stim.PauliString(\"+__ZX\"),)): -0.016038368053963015, (H, (stim.PauliString(\"+___Z\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+__ZY\"),)): -0.007408912958767258, (S, (stim.PauliString(\"+__ZX\"),)): 0.0011866037029552281, (S, (stim.PauliString(\"+___Z\"),)): 0.001371201178398027, (S, (stim.PauliString(\"+__ZY\"),)): 0.00034799818155795706}\n" - ] - } - ], + "outputs": [], "source": [ "print(propagated_errorgen_layers[1])" ] @@ -212,19 +192,10 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "a7710d39-9574-41aa-a922-c95f7bb8225f", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{}\n", - "{}\n" - ] - } - ], + "outputs": [], "source": [ "print(propagated_errorgen_layers[0])\n", "print(propagated_errorgen_layers[-1])" @@ -256,7 +227,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "b4b3ab03-399f-43dd-99d0-957bbf219ad5", "metadata": {}, "outputs": [], @@ -274,18 +245,10 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "d5600efa-93a4-4100-93f7-43173c83f948", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "((H, (stim.PauliString(\"+Z___\"),)), -1.0)\n" - ] - } - ], + "outputs": [], "source": [ "print(errorgen_transform_map[(_LSE('H', [stim.PauliString('XIII')]), 1)])" ] @@ -300,18 +263,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "432a7f5d-d27b-4f12-a262-810609318cf3", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Label(('Gxpi2', 0))]\n" - ] - } - ], + "outputs": [], "source": [ "print(errorgen_propagator.errorgen_gate_contributors(_LSE('H', [stim.PauliString('XIII')]), c, layer_idx=1))" ] @@ -337,7 +292,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "6ee0db5d-972c-4367-96da-f390fe1ac56e", "metadata": {}, "outputs": [], @@ -355,18 +310,10 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "b0d58c38-f3ce-494d-8daa-cbb5ef8d66b0", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{(H, (stim.PauliString(\"+___X\"),)): -0.016038368053963015, (H, (stim.PauliString(\"+___Y\"),)): 0.000640999140037641, (H, (stim.PauliString(\"+___Z\"),)): 0.015458825057572158, (H, (stim.PauliString(\"+__X_\"),)): 0.0015261919356565306, (H, (stim.PauliString(\"+__XX\"),)): 0.008637438913233318, (H, (stim.PauliString(\"+__XY\"),)): 0.02913099222503971, (H, (stim.PauliString(\"+__XZ\"),)): -0.023417688701839773, (H, (stim.PauliString(\"+__Y_\"),)): 0.009454729746458598, (H, (stim.PauliString(\"+__YX\"),)): -0.016661354573179642, (H, (stim.PauliString(\"+__YY\"),)): 0.0034374458145267966, (H, (stim.PauliString(\"+__YZ\"),)): -0.003842438812773296, (H, (stim.PauliString(\"+__Z_\"),)): 0.036684870579615995, (H, (stim.PauliString(\"+__ZX\"),)): -0.04067953804377626, (H, (stim.PauliString(\"+__ZY\"),)): -0.0015729818285460167, (H, (stim.PauliString(\"+__ZZ\"),)): -0.012651437175495219, (S, (stim.PauliString(\"+___X\"),)): 0.0024143330497938234, (S, (stim.PauliString(\"+___Y\"),)): 0.003683612869525075, (S, (stim.PauliString(\"+___Z\"),)): 0.005101920188444729, (S, (stim.PauliString(\"+__X_\"),)): 0.002661734791363736, (S, (stim.PauliString(\"+__XX\"),)): 0.003737168273719153, (S, (stim.PauliString(\"+__XY\"),)): 0.003159101454087135, (S, (stim.PauliString(\"+__XZ\"),)): 0.0056859632815692025, (S, (stim.PauliString(\"+__Y_\"),)): 0.00254938052108778, (S, (stim.PauliString(\"+__YX\"),)): 0.0030131230078306983, (S, (stim.PauliString(\"+__YY\"),)): 0.002275601598622893, (S, (stim.PauliString(\"+__YZ\"),)): 0.005999188119143293, (S, (stim.PauliString(\"+__Z_\"),)): 0.005452070821520289, (S, (stim.PauliString(\"+__ZX\"),)): 0.004522281466412299, (S, (stim.PauliString(\"+__ZY\"),)): 0.004436402092067647, (S, (stim.PauliString(\"+__ZZ\"),)): 0.00272331772024521, (H, (stim.PauliString(\"+X___\"),)): -0.007988455955158116, (H, (stim.PauliString(\"+Y___\"),)): -0.0061269146786919765, (H, (stim.PauliString(\"+Z___\"),)): 0.03948564906669329, (S, (stim.PauliString(\"+X___\"),)): 0.002905803062911212, (S, (stim.PauliString(\"+Y___\"),)): 0.003090400538354011, (S, (stim.PauliString(\"+Z___\"),)): 0.0027212055874684133, (H, (stim.PauliString(\"+_X__\"),)): -0.024667823149158774, (H, (stim.PauliString(\"+_Y__\"),)): -0.02280628187269263, (H, (stim.PauliString(\"+_Z__\"),)): 0.00869091123884254, (S, (stim.PauliString(\"+_X__\"),)): 0.0027212055874684133, (S, (stim.PauliString(\"+_Y__\"),)): 0.002905803062911212, (S, (stim.PauliString(\"+_Z__\"),)): 0.003090400538354011}\n" - ] - } - ], + "outputs": [], "source": [ "print(propagated_errorgen_layer_first_order)" ] @@ -384,7 +331,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "3b88d003-76d2-4464-9a77-3c4ce6e63745", "metadata": {}, "outputs": [], @@ -394,20 +341,12 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "2a60a527-051b-47e3-bbdc-41492bdeb2f1", "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{(H, (stim.PauliString(\"+___X\"),)): -0.016366628760355446, (H, (stim.PauliString(\"+___Y\"),)): 0.0012701883102106228, (H, (stim.PauliString(\"+___Z\"),)): 0.015811660858690683, (H, (stim.PauliString(\"+__X_\"),)): 0.0022810144249310817, (H, (stim.PauliString(\"+__XX\"),)): 0.00817058193708496, (H, (stim.PauliString(\"+__XY\"),)): 0.028939475463994727, (H, (stim.PauliString(\"+__XZ\"),)): -0.023550875758712682, (H, (stim.PauliString(\"+__Y_\"),)): 0.009994656773212925, (H, (stim.PauliString(\"+__YX\"),)): -0.016779562020536862, (H, (stim.PauliString(\"+__YY\"),)): 0.002926947172357588, (H, (stim.PauliString(\"+__YZ\"),)): -0.003297737298653086, (H, (stim.PauliString(\"+__Z_\"),)): 0.03660740203498932, (H, (stim.PauliString(\"+__ZX\"),)): -0.040531712484628495, (H, (stim.PauliString(\"+__ZY\"),)): -0.001851200491716163, (H, (stim.PauliString(\"+__ZZ\"),)): -0.012430018460989222, (S, (stim.PauliString(\"+___X\"),)): 0.0024141650598773568, (S, (stim.PauliString(\"+___Y\"),)): 0.0036830935030083747, (S, (stim.PauliString(\"+___Z\"),)): 0.005101371725612574, (S, (stim.PauliString(\"+__X_\"),)): 0.0026618925827871353, (S, (stim.PauliString(\"+__XX\"),)): 0.0037363923711846714, (S, (stim.PauliString(\"+__XY\"),)): 0.00315907976589997, (S, (stim.PauliString(\"+__XZ\"),)): 0.005685666597345913, (S, (stim.PauliString(\"+__Y_\"),)): 0.0025499711619194954, (S, (stim.PauliString(\"+__YX\"),)): 0.0030131078510913186, (S, (stim.PauliString(\"+__YY\"),)): 0.00227581501723727, (S, (stim.PauliString(\"+__YZ\"),)): 0.005999097874164904, (S, (stim.PauliString(\"+__Z_\"),)): 0.005452457750721968, (S, (stim.PauliString(\"+__ZX\"),)): 0.004522208837994344, (S, (stim.PauliString(\"+__ZY\"),)): 0.004437023183317757, (S, (stim.PauliString(\"+__ZZ\"),)): 0.0027238559732699116, (H, (stim.PauliString(\"+X___\"),)): -0.007783741055630419, (H, (stim.PauliString(\"+Y___\"),)): -0.005661459922019788, (H, (stim.PauliString(\"+Z___\"),)): 0.03953348672258176, (S, (stim.PauliString(\"+X___\"),)): 0.002905670793135387, (S, (stim.PauliString(\"+Y___\"),)): 0.0030905352244997224, (S, (stim.PauliString(\"+Z___\"),)): 0.002721203171098527, (H, (stim.PauliString(\"+_X__\"),)): -0.024485756015831597, (H, (stim.PauliString(\"+_Y__\"),)): -0.022833711046606444, (H, (stim.PauliString(\"+_Z__\"),)): 0.00938453200856714, (S, (stim.PauliString(\"+_X__\"),)): 0.002721018202212471, (S, (stim.PauliString(\"+_Y__\"),)): 0.002906281282011878, (S, (stim.PauliString(\"+_Z__\"),)): 0.003090109704509288, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__XZ\"))): 2.4623724334019127e-05, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__YZ\"))): 4.465348357724853e-05, (C, (stim.PauliString(\"+__ZY\"), stim.PauliString(\"+__ZZ\"))): -2.755957352422829e-05, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+___Z\"))): 2.8228349310421213e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__XZ\"))): 1.328318580782737e-06, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__YZ\"))): 2.688338229602585e-06, (C, (stim.PauliString(\"+__ZX\"), stim.PauliString(\"+__ZZ\"))): -2.2716555974974645e-06, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+___Z\"))): 2.01101914818271e-06, (C, (stim.PauliString(\"+__ZX\"), stim.PauliString(\"+__ZY\"))): 1.5753859413168797e-05, (C, (stim.PauliString(\"+__YZ\"), stim.PauliString(\"+__ZZ\"))): 5.184585649852202e-06, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__Z_\"))): -2.901062901596097e-06, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__ZX\"))): -4.532193667972443e-06, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZY\"))): -3.0219041584913162e-06, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__XZ\"))): -1.271227712663107e-05, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__Z_\"))): -1.3039770276036584e-05, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__ZX\"))): -1.9950145993005412e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__XY\"))): -1.4461367957463231e-05, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__XZ\"))): 4.4730567874280694e-05, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__Z_\"))): -4.4516539621792385e-05, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__ZY\"))): -5.0117079718570586e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__XX\"))): 4.966024254781249e-05, (C, (stim.PauliString(\"+__YZ\"), stim.PauliString(\"+__Z_\"))): -3.418975273080511e-05, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__ZZ\"))): -1.4893329106399474e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__Z_\"))): 1.4865094771659637e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZX\"))): 2.2906831593528624e-05, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZY\"))): 1.6545014507346572e-05, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__YZ\"))): 4.583807045912111e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__Z_\"))): -2.5623164611721798e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__ZX\"))): -3.96888593302071e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__YY\"))): 2.8676625088533037e-05, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__YZ\"))): 8.802782842580865e-06, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__Z_\"))): 5.033783897252847e-06, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__ZY\"))): 5.2823720213310345e-06, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__YX\"))): 5.653851705277768e-06, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__Z_\"))): 9.433585902582486e-07, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YZ\"))): -4.144067801371556e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__ZY\"))): 1.1421088209354261e-05, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__ZX\"))): -1.1202206170765282e-06, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__Y_\"))): -1.929138988780776e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__YZ\"))): 3.4332066193646426e-05, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__ZX\"))): -2.9871798204252534e-05, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__ZY\"))): 2.162542939305882e-05, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__XY\"))): -6.432567576172542e-06, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__YY\"))): -3.477433017513346e-08, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__ZZ\"))): 2.251878563983956e-06, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+___Y\"))): 1.0334876388949038e-05, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__XX\"))): 4.7248554866870034e-07, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__YX\"))): 8.855351194403839e-07, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__ZZ\"))): 3.073358774802754e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__ZZ\"))): -1.1752359454690021e-07, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__Y_\"))): -2.6329064367355074e-06, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__YX\"))): -1.123106172668559e-05, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__YY\"))): 2.7045660084993054e-06, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YX\"))): -1.6973158006614298e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__Y_\"))): 3.8068372532514665e-05, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__XY\"))): -4.7882599046936274e-06, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YY\"))): -2.0707353884034883e-05, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__Y_\"))): 4.045637708335117e-06, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__ZZ\"))): -1.500883723711078e-06, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__YY\"))): -5.982706551279516e-06, (C, (stim.PauliString(\"+Y___\"), stim.PauliString(\"+Z___\"))): -4.066605487703408e-06, (C, (stim.PauliString(\"+X___\"), stim.PauliString(\"+Z___\"))): 6.147140455456534e-07, (C, (stim.PauliString(\"+X___\"), stim.PauliString(\"+Y___\"))): 3.8649506941318813e-05, (C, (stim.PauliString(\"+_Y__\"), stim.PauliString(\"+_Z__\"))): 3.8634448389020015e-05, (C, (stim.PauliString(\"+_X__\"), stim.PauliString(\"+_Z__\"))): -2.364593599604513e-05, (C, (stim.PauliString(\"+_X__\"), stim.PauliString(\"+_Y__\"))): -4.6546798822731776e-07, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YY\"))): 3.182463873575446e-08, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YZ\"))): 2.1425205471734596e-08, (A, (stim.PauliString(\"+__Z_\"), stim.PauliString(\"+__ZX\"))): -1.6511901898579762e-09, (A, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XX\"))): 9.770462482071315e-09, (A, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YX\"))): -2.3532403638316404e-08, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__ZZ\"))): -2.592151120917717e-07, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__ZY\"))): 2.617007349379505e-07, (C, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YX\"))): -2.7452465733429826e-09, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YZ\"))): 1.2820453148221837e-07, (A, (stim.PauliString(\"+__Z_\"), stim.PauliString(\"+__ZY\"))): 4.117751923769017e-11, (A, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YY\"))): 3.4383444473268805e-10, (A, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XY\"))): -9.653252578904504e-10, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__ZX\"))): -2.485622846178766e-09, (C, (stim.PauliString(\"+__Z_\"), stim.PauliString(\"+__ZZ\"))): -1.6652135968731107e-08, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YZ\"))): -2.5886590148089146e-08, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__XZ\"))): -9.804193775862733e-11, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__XY\"))): -2.2983987525965355e-09, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__XX\"))): -8.186553210058193e-10, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZX\"))): -1.0370017316073812e-07, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__ZY\"))): 5.62296366051593e-08, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__Z_\"))): -1.3094977805556137e-07, (A, (stim.PauliString(\"+__YZ\"), stim.PauliString(\"+__ZY\"))): -4.412670990729238e-09, (A, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZZ\"))): -8.040184466997569e-09, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__X_\"))): 6.287118243550577e-10, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZX\"))): 5.478449762312655e-08, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__Y_\"))): -4.2438858069880333e-07, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__Z_\"))): -1.0399433264019869e-08, (A, (stim.PauliString(\"+__YZ\"), stim.PauliString(\"+__ZX\"))): 1.3754615244774694e-08, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZY\"))): 4.791730783219299e-07, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XZ\"))): 4.253872611682026e-08, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__YZ\"))): 4.83020627948673e-09, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__YY\"))): -6.882017282429429e-10, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__YX\"))): 8.800961953047258e-09, (A, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__ZY\"))): 1.0703185162195312e-09, (A, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZZ\"))): -3.938799776847555e-08, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__Y_\"))): 8.937215731408386e-09, (C, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__X_\"))): 1.5992980976589743e-07, (A, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__ZX\"))): 3.5389366224904316e-10, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__Y_\"))): -2.094068127878404e-09, (A, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZZ\"))): -5.04361554243656e-09, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__Z_\"))): 8.127230836031873e-10, (A, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YY\"))): 6.7862803623284764e-09, (A, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YX\"))): -2.9908419333357734e-08, (A, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZX\"))): -2.204992806438256e-09, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__XY\"))): -3.083598730536217e-08, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__XX\"))): 3.083598730536217e-08, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZZ\"))): 1.1352994504609885e-07, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__Y_\"))): -1.1352994504609885e-07, (A, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__ZX\"))): -7.457445591587116e-11, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__YY\"))): -2.743866343309294e-09, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__YX\"))): 2.743866343309294e-09, (C, (stim.PauliString(\"+__YY\"), stim.PauliString(\"+__ZZ\"))): 3.346877975963636e-10, (C, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__X_\"))): 3.346877975963636e-10, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__Z_\"))): 3.656117441390804e-11, (A, (stim.PauliString(\"+__Z_\"), stim.PauliString(\"+__ZZ\"))): 7.886232657898631e-09, (A, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__ZY\"))): 1.5855839253262436e-10, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZZ\"))): 3.1485517035998804e-08, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__Y_\"))): 3.1485517035998804e-08, (A, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__ZY\"))): 3.604187530004226e-10, (C, (stim.PauliString(\"+__YX\"), stim.PauliString(\"+__ZZ\"))): 1.2519833029231625e-08, (C, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__X_\"))): -1.2519833029231625e-08, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__Z_\"))): 1.575892955605348e-08, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__Y_\"))): -3.310362125011788e-11, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__ZZ\"))): 8.517377174682687e-10, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YX\"))): -3.1453316663726613e-07, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XX\"))): 3.1453316663726613e-07, (C, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YY\"))): -4.4426457799417374e-08, (C, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XY\"))): 4.4426457799417374e-08, (A, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YY\"))): -5.766458525051134e-09, (A, (stim.PauliString(\"+__XZ\"), stim.PauliString(\"+__YX\"))): 4.5689913956486385e-11, (A, (stim.PauliString(\"+___Y\"), stim.PauliString(\"+__ZY\"))): 5.4888582391826885e-09, (C, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YX\"))): -1.970475713209035e-08, (C, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YY\"))): -1.970475713209035e-08, (A, (stim.PauliString(\"+__XY\"), stim.PauliString(\"+__YZ\"))): -1.9492198346828996e-08, (A, (stim.PauliString(\"+__Y_\"), stim.PauliString(\"+__YZ\"))): -3.5916189326911846e-09, (A, (stim.PauliString(\"+___X\"), stim.PauliString(\"+__ZX\"))): 6.697655090709081e-09, (A, (stim.PauliString(\"+__XX\"), stim.PauliString(\"+__YZ\"))): -7.214634092569796e-11, (A, (stim.PauliString(\"+___Z\"), stim.PauliString(\"+__X_\"))): -5.149658211911818e-10, (A, (stim.PauliString(\"+__X_\"), stim.PauliString(\"+__XZ\"))): -4.582335063379096e-09}\n" - ] - } - ], + "outputs": [], "source": [ "print(propagated_errorgen_layer_second_order)" ] @@ -449,18 +388,10 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "id": "4cc9e0a0-2da5-40fc-8556-4d3272f1b1be", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.0036337950714817775\n" - ] - } - ], + "outputs": [], "source": [ "first_order_approximate_prob = eprop.approximate_stabilizer_probability(propagated_errorgen_layer_second_order, c, '0000', order=1)\n", "print(first_order_approximate_prob)" @@ -468,18 +399,10 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "ce153eee-90c3-4d8e-b25e-8ec849cde6c0", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.003403571491328329\n" - ] - } - ], + "outputs": [], "source": [ "second_order_approximate_prob = eprop.approximate_stabilizer_probability(propagated_errorgen_layer_second_order, c, '0000', order=2)\n", "print(second_order_approximate_prob)" @@ -495,18 +418,10 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "id": "2978aee0-7447-452e-85b4-41bbbb79f738", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.0034166543832386833\n" - ] - } - ], + "outputs": [], "source": [ "exact_probability = error_model.sim.probs(c)['0000']\n", "print(exact_probability)" @@ -514,19 +429,10 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "id": "5fb5fb21-bc96-446b-859c-2252c385c55a", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): 0.00021714068824309424\n", - "Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): 1.3082891910354364e-05\n" - ] - } - ], + "outputs": [], "source": [ "print(f'Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): {abs(exact_probability-first_order_approximate_prob)}')\n", "print(f'Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): {abs(exact_probability-second_order_approximate_prob)}')" @@ -534,19 +440,10 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "id": "87e43f33-5b0a-436a-8048-7836879fd205", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Relative Error Approx to Exact (First-order taylor, Second-order BCH): 6.355360065341588%\n", - "Relative Error Approx to Exact (Second-order taylor, Second-order BCH): 0.38291528620910587%\n" - ] - } - ], + "outputs": [], "source": [ "print(f'Relative Error Approx to Exact (First-order taylor, Second-order BCH): {100*abs(exact_probability-first_order_approximate_prob)/exact_probability}%')\n", "print(f'Relative Error Approx to Exact (Second-order taylor, Second-order BCH): {100*abs(exact_probability-second_order_approximate_prob)/exact_probability}%')" @@ -570,18 +467,10 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "id": "fa7c8e66-de23-41b5-8ec8-401d6e3c1bf7", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.9185921793543655\n" - ] - } - ], + "outputs": [], "source": [ "first_order_approximate_pauli_expectation = eprop.approximate_stabilizer_pauli_expectation(propagated_errorgen_layer_second_order, c, 'XYZI', order=1)\n", "print(first_order_approximate_pauli_expectation)" @@ -589,18 +478,10 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "id": "4b6bd6fb-f7e6-482a-97c4-20bcba5749a6", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.9129083343657568\n" - ] - } - ], + "outputs": [], "source": [ "second_order_approximate_pauli_expectation = eprop.approximate_stabilizer_pauli_expectation(propagated_errorgen_layer_second_order, c, 'XYZI', order=2)\n", "print(second_order_approximate_pauli_expectation)" @@ -616,7 +497,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": null, "id": "17731ccb-2cd7-4d7e-8ed8-f21b199d95a3", "metadata": {}, "outputs": [], @@ -646,29 +527,10 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": null, "id": "3ca1ba09-e416-475d-bb57-58eb28db08a0", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.9136050958830395\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "C:\\Users\\ciostro\\Documents\\pyGSTi_wildcard_integration\\pygsti\\forwardsims\\mapforwardsim.py:732: UserWarning: Generating dense process matrix representations of circuits or gates \n", - "can be inefficient and should be avoided for the purposes of forward \n", - "simulation/calculation of circuit outcome probability distributions \n", - "when using the MapForwardSimulator.\n", - " _warnings.warn('Generating dense process matrix representations of circuits or gates \\n'\n" - ] - } - ], + "outputs": [], "source": [ "exact_pauli_expectation = pauli_expectation_exact(errorgen_propagator, target_model, c, stim.PauliString('XYZI'))\n", "print(exact_pauli_expectation)" @@ -676,19 +538,10 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": null, "id": "407e0d72-ced0-4105-8a4f-463085406f18", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): 0.004987083471326037\n", - "Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): 0.0006967615172827069\n" - ] - } - ], + "outputs": [], "source": [ "print(f'Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): {abs(exact_pauli_expectation-first_order_approximate_pauli_expectation)}')\n", "print(f'Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): {abs(exact_pauli_expectation-second_order_approximate_pauli_expectation)}')" @@ -696,19 +549,10 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": null, "id": "cf63b01d-244c-4943-b477-576b1be496f0", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Relative Error Approx to Exact (First-order taylor, Second-order BCH): 0.5458686136711838%\n", - "Relative Error Approx to Exact (Second-order taylor, Second-order BCH): 0.07626506467865705%\n" - ] - } - ], + "outputs": [], "source": [ "print(f'Relative Error Approx to Exact (First-order taylor, Second-order BCH): {100*abs(exact_pauli_expectation-first_order_approximate_pauli_expectation)/exact_pauli_expectation}%')\n", "print(f'Relative Error Approx to Exact (Second-order taylor, Second-order BCH): {100*abs(exact_pauli_expectation-second_order_approximate_pauli_expectation)/exact_pauli_expectation}%')" @@ -728,13 +572,193 @@ "metadata": {}, "source": [ "## Other Helpful Utilities:\n", - "In this section we'll highlight a few additional utilities within the error generator propagation related modules which are often useful." + "In this section we'll highlight a few additional utilities within the error generator propagation related modules which are often useful (some of these you may have even seen us use above!).\n", + "\n", + "We'll specifically cover:\n", + "- `eoc_error_channel`\n", + "- `errorgen_layer_dict_to_errorgen`\n", + "- `approximate_stabilizer_probabilities`\n", + "- `error_generator_commutator`\n", + "- `error_generator_composition`" + ] + }, + { + "cell_type": "markdown", + "id": "c8ab436d-9b2c-4125-9fb8-69e281225308", + "metadata": {}, + "source": [ + "#### `eoc_error_channel` : \n", + "This method provides a simple single function call for generating a dense representation of the end-of-circuit error channel (i.e. the exponentiated end-of-circuit error generator). This can be useful in few-qubit testing, but obviously doesn't not scale beyond a few qubits. This end-of-circuit error channel can be produced either exactly or without the BCH approximation. In the former case this is acheived by exponentiating and multiplying together all of the propagated error generator layers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ed42df89-31b9-48af-9f3d-1baf538dc64b", + "metadata": {}, + "outputs": [], + "source": [ + "dense_end_of_circuit_channel_exact = errorgen_propagator.eoc_error_channel(c, use_bch=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73cd2961-5280-4d38-8f55-49b6fc4fb7b4", + "metadata": {}, + "outputs": [], + "source": [ + "dense_end_of_circuit_channel_first_order_BCH = errorgen_propagator.eoc_error_channel(c, use_bch=True, bch_kwargs={'bch_order':1})\n", + "dense_end_of_circuit_channel_second_order_BCH = errorgen_propagator.eoc_error_channel(c, use_bch=True, bch_kwargs={'bch_order':2})" + ] + }, + { + "cell_type": "markdown", + "id": "b3a2d84e-2266-4337-9be7-08f7047141b4", + "metadata": {}, + "source": [ + "This can be useful in testing settings, for example, where we can use these as yet another way to measure the accuracy of our approximation methods." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38926ad9-09e1-4174-b845-47bc2aad305e", + "metadata": {}, + "outputs": [], + "source": [ + "print(f'Frobenius norm between exact and 1st-order BCH EOC channels: {np.linalg.norm(dense_end_of_circuit_channel_exact-dense_end_of_circuit_channel_first_order_BCH)}')\n", + "print(f'Frobenius norm between exact and 2nd-order BCH EOC channels: {np.linalg.norm(dense_end_of_circuit_channel_exact-dense_end_of_circuit_channel_second_order_BCH)}')" + ] + }, + { + "cell_type": "markdown", + "id": "1649c1e7-29a3-492d-b4fb-0cfe2b491c7e", + "metadata": {}, + "source": [ + "#### `errorgen_layer_dict_to_errorgen`\n", + "Throughout the error generator propagation framework we generate a lot of sparse error generator representations in terms of dictionaries of elementary error generator coefficients and corresponding rates. For testing purposes (with just a few qubits, this obviously does not scale) it is often useful to convert these into a dense representation as a numpy array. This method helps do so in just a single line." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51361d27-6f0f-4765-8b35-81d1f2f62362", + "metadata": {}, + "outputs": [], + "source": [ + "dense_end_of_circuit_errorgen_first_order_BCH = errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layer_first_order)" + ] + }, + { + "cell_type": "markdown", + "id": "8cedfee7-22db-42a5-98e1-950fdef54149", + "metadata": {}, + "source": [ + "By default this returns the error generator in the normalized pauli-product basis, but this can be changed using the optional `mx_basis` kwarg.\n", + "Note: There is another function called `errorgen_layer_to_matrix` available in the `errgenproptools` module with similar functionality to this method, but with a somewhat different interface. That function can be particularly useful in situations where you may want to compute a lot of dense error generator representations from the outputs of the error generator propagation framework, so check out the documentation of that function for more." + ] + }, + { + "cell_type": "markdown", + "id": "b2f914cc-2c69-4e80-ab99-9be1fe0b3a9a", + "metadata": {}, + "source": [ + "#### `approximate_stabilizer_probabilities`\n", + "This one is straightforward. Above we showed the use of the function `approximate_stabilizer_probability` from the `errgenproptools` module for calculating approximate output probabilities for a given computational bitstring. If you happen to want *all* of the bit string probabilities you can save yourself a for loop by using the function `approximate_stabilizer_probabilities` from this module instead!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21fcd7cc-6799-4aca-b524-69868d0a0169", + "metadata": {}, + "outputs": [], + "source": [ + "approximate_probabilities = eprop.approximate_stabilizer_probabilities(propagated_errorgen_layer_first_order, c, order=1)\n", + "print(approximate_probabilities)" + ] + }, + { + "cell_type": "markdown", + "id": "464e2b37-3d00-4503-9a4d-40f60aa4c3a3", + "metadata": {}, + "source": [ + "Note the returned values are given in right-LSB convention (i.e. '0000' -> '0001' ->'0010', etc.)" + ] + }, + { + "cell_type": "markdown", + "id": "3984dac3-6114-4f4e-80e0-4ba31a79886a", + "metadata": {}, + "source": [ + "#### `error_generator_commutator` and `error_generator_composition`\n", + "These two functions from the `errgenproptools` module return the result of analytically computing the commutator and composition of two elementary error generators, respectively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83ba69f1-bbd0-4133-8ef3-abd9dc020888", + "metadata": {}, + "outputs": [], + "source": [ + "errorgen_1 = _LSE('H', [stim.PauliString('X')])\n", + "errorgen_2 = _LSE('S', [stim.PauliString('Z')])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ecd3b00-4a7b-4c8e-8a5f-bdba13565fb4", + "metadata": {}, + "outputs": [], + "source": [ + "print(eprop.error_generator_commutator(errorgen_1, errorgen_2))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f9862ff3-8717-48e7-b0a8-ec3a2d07c974", + "metadata": {}, + "outputs": [], + "source": [ + "print(eprop.error_generator_composition(errorgen_1, errorgen_2))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a020355-1796-4ddd-ad85-178b37266e35", + "metadata": {}, + "outputs": [], + "source": [ + "print(eprop.error_generator_composition(errorgen_1, errorgen_1))" + ] + }, + { + "cell_type": "markdown", + "id": "9723d08d-09ef-4ea7-ad0e-5624fa6f8501", + "metadata": {}, + "source": [ + "Both of these methods return their output as a list of two-element tuples. This list is a specification for the linear combination of elementary error generator coefficients corresponding to the commutator or composition of the two input elementary error generators. (First tuple element is an elementary error generator in the linear combination, and the second element is the coefficient of that elementary error generator in the linear combination).\n", + "\n", + "In the examples above we can see that the commutator of the specified H and S error generators gives rise to a pauli-correlation (C) error generator. This could potentially give rise to emergent C error generators when applying second-or-higher order BCH approximations for the effective end-of-circuit error generator, for example. Likewise the composition of these to error generators is a linear combination of a C error generator and an H error generator. And finally we see that squaring an H error generator (composing it with itself) gives rise to a pauli-stochastic (S) error generator." + ] + }, + { + "cell_type": "markdown", + "id": "cdfce65a-f619-4ef8-b52d-dff5d02a314f", + "metadata": {}, + "source": [ + "There's a whole bunch of other functionality and utilities available, particularly in the `errgenproptools` module which have not been covered in this tutorial, so please check out the documentation for additional capabilities!" ] }, { "cell_type": "code", "execution_count": null, - "id": "a72ab84c-e489-4b9e-90f2-246689872336", + "id": "2f305946-e23d-4572-909e-6389dfa6a26b", "metadata": {}, "outputs": [], "source": [] @@ -742,9 +766,9 @@ ], "metadata": { "kernelspec": { - "display_name": "wildcard_integration", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "wildcard_integration" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 763cb2b0d..8e7b25697 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -6468,7 +6468,7 @@ def bch_numerical(propagated_errorgen_layers, error_propagator, bch_order=1): #iterate through each of the propagated error generator layers and turn these into dense numpy arrays errorgen_layer_mats = [] for layer in propagated_errorgen_layers: - errorgen_layer_mats.append(error_propagator.errorgen_layer_dict_to_errorgen(layer, mx_basis='pp', return_dense=True)) + errorgen_layer_mats.append(error_propagator.errorgen_layer_dict_to_errorgen(layer, mx_basis='pp')) #initialize a matrix for storing the result of doing BCH. bch_result = _np.zeros((4**num_qubits, 4**num_qubits), dtype=_np.complex128) From d71b0f5f137d68a2365244a293f221711febea41 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Sun, 16 Feb 2025 17:48:46 -0700 Subject: [PATCH 095/102] Temporarily disable LFH extras package Temporarily comment out the code that is only associated with the LFH error generator and simulation extras package. It was probably a mistake to build the error generator propagation functionality on top of this branch in hindsight (but I though I needed some of the backend changes associated with this branch at the start). This will be resuscitated on a different branch when I have time to write proper tests and ensure the functionality is ready for integration. --- pygsti/extras/lfh/lfherrorgen.py | 6 ++++-- pygsti/extras/lfh/lfhforwardsims.py | 3 ++- pygsti/extras/lfh/lfhmodel.py | 6 ++++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pygsti/extras/lfh/lfherrorgen.py b/pygsti/extras/lfh/lfherrorgen.py index 2fc373b5c..bed08bc59 100644 --- a/pygsti/extras/lfh/lfherrorgen.py +++ b/pygsti/extras/lfh/lfherrorgen.py @@ -10,7 +10,7 @@ # in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** - +''' import numpy as _np import collections as _collections import itertools as _itertools @@ -208,4 +208,6 @@ def sample_hamiltonian_rates(self):#, dirty_value=True): blk.from_vector(u[off: off + blk.num_params]) off += blk.num_params self._update_rep() - #self.dirty = dirty_value \ No newline at end of file + #self.dirty = dirty_value + +''' \ No newline at end of file diff --git a/pygsti/extras/lfh/lfhforwardsims.py b/pygsti/extras/lfh/lfhforwardsims.py index 315217080..fca301c0d 100644 --- a/pygsti/extras/lfh/lfhforwardsims.py +++ b/pygsti/extras/lfh/lfhforwardsims.py @@ -10,7 +10,7 @@ # in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** - +''' import numpy as _np import collections as _collections @@ -923,3 +923,4 @@ def bulk_fill_hprobs(self, array_to_fill, layout, self.bulk_fill_dprobs(dprobs2, layout) array_to_fill[:, i, :] = (dprobs2 - dprobs) / eps self.model.from_vector(orig_vec, close=True) +''' \ No newline at end of file diff --git a/pygsti/extras/lfh/lfhmodel.py b/pygsti/extras/lfh/lfhmodel.py index d4b38c8fd..6410d2540 100644 --- a/pygsti/extras/lfh/lfhmodel.py +++ b/pygsti/extras/lfh/lfhmodel.py @@ -10,7 +10,7 @@ # in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** - +''' import numpy as np import collections as _collections @@ -75,4 +75,6 @@ def sample_hamiltonian_rates(self): #.reinit_factor_op_reps([op._rep for op in member.factorops]) #need a version of the circuit_layer_operator method which doesn't call clean_paramvec - #since I think this is what is causing the value of the \ No newline at end of file + #since I think this is what is causing the value of the + +''' \ No newline at end of file From 4a1598b8dbb4363906969a075f71ee53b398b14c Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 24 Feb 2025 20:04:47 -0700 Subject: [PATCH 096/102] Patch random error generator rates Patch an issue in the random error generator construction code which had previously meant that we were getting non-CP maps when we didn't have both C and A present. It looks like with this new construction scheme we're getting CP error generators even when only using one or the other (and also when having mismatched weights). --- pygsti/tools/lindbladtools.py | 24 +++++++++++------------- test/unit/tools/test_lindbladtools.py | 8 ++++++++ 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index 34e688100..3c92d0412 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -644,12 +644,8 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') if max_weights is not None: assert max_weights['C'] <= max_weights['S'] and max_weights['A'] <= max_weights['S'], 'The maximum weight of the C and A terms should be less than or equal to the maximum weight of S.' - assert max_weights['C'] == max_weights['A'], 'Maximum weight and C of A terms must be the same at present.' rng = _np.random.default_rng(seed) - - if 'C' in errorgen_types or 'A' in errorgen_types: - assert 'C' in errorgen_types and 'A' in errorgen_types, 'Support only currently available for random C and A terms if both sectors present.' - + #create a state space with this dimension. state_space = _QubitSpace.cast(num_qubits) @@ -692,15 +688,17 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') random_rates_dicts['H'] = {lbl: val for lbl,val in zip(errgen_labels_H, rng.normal(loc=H_params[0], scale=H_params[1], size = num_H_rates))} #Create a random matrix with complex gaussian entries which will be used to generator a PSD matrix for the SCA rates. - random_SCA_gen_mat = rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + \ - 1j* rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) - - random_SCA_mat = random_SCA_gen_mat @ random_SCA_gen_mat.conj().T - #The random S rates are just the diagonal of random_SCA_mat. - random_rates_dicts['S'] = {lbl: val for lbl,val in zip(errgen_labels_S, _np.real(_np.diag(random_SCA_mat)).copy())} + random_SC_gen_mat = rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + random_SA_gen_mat = rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + random_SC_mat = random_SC_gen_mat @ random_SC_gen_mat.T + random_SA_mat = random_SA_gen_mat @ random_SA_gen_mat.T + random_S_rates = _np.real(_np.diag(random_SC_mat) + _np.diag(random_SA_mat)) + + #The random S rates are just the sum of the diagonals of random SC and SA mats. + random_rates_dicts['S'] = {lbl: val for lbl,val in zip(errgen_labels_S, random_S_rates)} #The random C rates are the real part of the off diagonal entries, and the A rates the imaginary part. - random_rates_dicts['C'] = {lbl: val for lbl,val in zip(errgen_labels_C, random_SCA_mat[_np.triu_indices_from(random_SCA_mat, k=1)].real)} - random_rates_dicts['A'] = {lbl: val for lbl,val in zip(errgen_labels_A, random_SCA_mat[_np.triu_indices_from(random_SCA_mat, k=1)].imag)} + random_rates_dicts['C'] = {lbl: val for lbl,val in zip(errgen_labels_C, random_SC_mat[_np.triu_indices_from(random_SC_mat, k=1)])} + random_rates_dicts['A'] = {lbl: val for lbl,val in zip(errgen_labels_A, random_SA_mat[_np.triu_indices_from(random_SA_mat, k=1)])} #manually check conditions on C and A for lbl, rate in random_rates_dicts['C'].items(): first_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],))] diff --git a/test/unit/tools/test_lindbladtools.py b/test/unit/tools/test_lindbladtools.py index 2a90add65..6e4cb7b1e 100644 --- a/test/unit/tools/test_lindbladtools.py +++ b/test/unit/tools/test_lindbladtools.py @@ -128,6 +128,14 @@ def test_sector_restrictions(self): #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + #H+S+A + random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S','A'), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 135) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + def test_error_metric_restrictions(self): #test generator_infidelity random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), From 96d831114a50d9242c4947c6a4bf075d59efc02a Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Mon, 24 Feb 2025 21:56:00 -0700 Subject: [PATCH 097/102] One more random error generator patch Fix an error that occurred when not all maximum weights were specified and/or specifiable. --- pygsti/tools/lindbladtools.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index 3c92d0412..282e3ca52 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -641,9 +641,12 @@ def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A') _warnings.warn('The relative_HS_contribution kwarg is only utilized when error_metric is not None, the specified value is ignored otherwise.') else: assert abs(1-sum(relative_HS_contribution))<=1e-7, 'The relative_HS_contribution should sum to 1.' - + + if 'C' in errorgen_types or 'A' in errorgen_types: + assert 'S' in errorgen_types, 'Must include S terms when C and A present. Cannot have a CP error generator otherwise.' + if max_weights is not None: - assert max_weights['C'] <= max_weights['S'] and max_weights['A'] <= max_weights['S'], 'The maximum weight of the C and A terms should be less than or equal to the maximum weight of S.' + assert max_weights.get('C', 0) <= max_weights.get('S', 0) and max_weights.get('A', 0) <= max_weights.get('S', 0), 'The maximum weight of the C and A terms should be less than or equal to the maximum weight of S.' rng = _np.random.default_rng(seed) #create a state space with this dimension. From 58180b12872c8dec4b977fc5685af96e0e9eea62 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 4 Mar 2025 17:40:58 -0700 Subject: [PATCH 098/102] Add ECR gate to stim conversions Add the ECR gate to the stim conversion dictionary. --- pygsti/tools/internalgates.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pygsti/tools/internalgates.py b/pygsti/tools/internalgates.py index 337699629..4931c0c34 100644 --- a/pygsti/tools/internalgates.py +++ b/pygsti/tools/internalgates.py @@ -405,6 +405,10 @@ def standard_gatenames_stim_conversions(): 'Gswap' : stim.Tableau.from_named_gate('SWAP'), 'Gcphase' : stim.Tableau.from_named_gate('CZ') } + ecr_unitary = _np.array([[0, 1, 0., 1j], [1., 0, -1j, 0.], + [0., 1j, 0, 1], [-1j, 0., 1, 0]], complex)/_np.sqrt(2) + gate_dict['Gecres'] = stim.Tableau.from_unitary_matrix(ecr_unitary, endian='big') + return gate_dict def standard_gatenames_cirq_conversions(): From b80f2d51f91e5f4dc3f31951e28ddd96fada5fc8 Mon Sep 17 00:00:00 2001 From: "Stefan K. Seritan" Date: Tue, 18 Mar 2025 15:22:45 -0700 Subject: [PATCH 099/102] Finish merge for stim dependency --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 8eb61fce6..dc760b8c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ authors = [ dependencies=[ 'numpy>=1.15.0', 'scipy', + 'stim', 'plotly', 'pandas', 'networkx' From 2db13a12c7222290c6af2b2db52fb1c277b038c7 Mon Sep 17 00:00:00 2001 From: "Stefan K. Seritan" Date: Tue, 18 Mar 2025 16:25:47 -0700 Subject: [PATCH 100/102] Copyright update for new files. --- pygsti/errorgenpropagation/__init__.py | 2 +- pygsti/errorgenpropagation/errorpropagator.py | 9 +++++++++ pygsti/errorgenpropagation/localstimerrorgen.py | 9 +++++++++ pygsti/extras/lfh/__init__.py | 2 +- pygsti/extras/lfh/lfherrorgen.py | 2 +- pygsti/extras/lfh/lfhforwardsims.py | 2 +- pygsti/extras/lfh/lfhmodel.py | 2 +- pygsti/tools/errgenproptools.py | 2 +- 8 files changed, 24 insertions(+), 6 deletions(-) diff --git a/pygsti/errorgenpropagation/__init__.py b/pygsti/errorgenpropagation/__init__.py index f6b034ea3..834cec2d7 100644 --- a/pygsti/errorgenpropagation/__init__.py +++ b/pygsti/errorgenpropagation/__init__.py @@ -1,6 +1,6 @@ """ Error Generator Propagation Sub-package """ #*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index 54fc431cf..bf952ca1e 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -1,3 +1,12 @@ +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + import stim import numpy as _np import scipy.linalg as _spl diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index 1a2dceb67..129642b9d 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -1,3 +1,12 @@ +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel as _ElementaryErrorgenLabel, GlobalElementaryErrorgenLabel as _GEEL,\ LocalElementaryErrorgenLabel as _LEEL import stim diff --git a/pygsti/extras/lfh/__init__.py b/pygsti/extras/lfh/__init__.py index 6e09a0017..b5143ca45 100644 --- a/pygsti/extras/lfh/__init__.py +++ b/pygsti/extras/lfh/__init__.py @@ -1,6 +1,6 @@ """ Low Frequency Hamiltonian Sub-package """ #*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except diff --git a/pygsti/extras/lfh/lfherrorgen.py b/pygsti/extras/lfh/lfherrorgen.py index bed08bc59..a00df744f 100644 --- a/pygsti/extras/lfh/lfherrorgen.py +++ b/pygsti/extras/lfh/lfherrorgen.py @@ -3,7 +3,7 @@ support for fluctuating Hamiltonian parameters. """ #*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except diff --git a/pygsti/extras/lfh/lfhforwardsims.py b/pygsti/extras/lfh/lfhforwardsims.py index fca301c0d..e9099eb3a 100644 --- a/pygsti/extras/lfh/lfhforwardsims.py +++ b/pygsti/extras/lfh/lfhforwardsims.py @@ -3,7 +3,7 @@ fluctuating Hamiltonian parameters. """ #*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except diff --git a/pygsti/extras/lfh/lfhmodel.py b/pygsti/extras/lfh/lfhmodel.py index 6410d2540..5856d2c79 100644 --- a/pygsti/extras/lfh/lfhmodel.py +++ b/pygsti/extras/lfh/lfhmodel.py @@ -3,7 +3,7 @@ support for fluctuating Hamiltonian parameters. """ #*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 8e7b25697..5769aa76b 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -2,7 +2,7 @@ Tools for the propagation of error generators through circuits. """ #*************************************************************************************************** -# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights # in this software. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except From 2b975172c446aef4718b997a53feb7f04b9a2f7d Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 25 Mar 2025 21:35:26 -0600 Subject: [PATCH 101/102] Address Feedback Add a number of changes to address feedback on PR. Adds some try-excepts around stim imports, fixes a few bugs and renames a function. --- pygsti/baseobjs/errorgenbasis.py | 11 +++---- pygsti/errorgenpropagation/errorpropagator.py | 10 +++++-- .../errorgenpropagation/localstimerrorgen.py | 5 +++- pygsti/tools/errgenproptools.py | 10 ++++++- pygsti/tools/jamiolkowski.py | 2 +- pygsti/tools/lindbladtools.py | 2 +- test/unit/objects/test_errorgenbasis.py | 4 ++- test/unit/tools/test_lindbladtools.py | 30 +++++++++---------- 8 files changed, 47 insertions(+), 27 deletions(-) diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 91af9d92c..f86bee6c5 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -724,7 +724,7 @@ def elemgen_supports_and_matrices(self): """ return tuple(zip(self.elemgen_supports, self.elemgen_matrices)) - def label_index(self, label, ok_if_missing=False): + def label_index(self, label, ok_if_missing=False, identity_label='I'): """ Return the index of the specified elementary error generator label in this basis' `labels` list. @@ -736,12 +736,13 @@ def label_index(self, label, ok_if_missing=False): ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. + + identity_label : str, optional (default 'I') + An optional string specifying the label used to denote the identity in basis element labels. """ - #CIO: I don't entirely understand the intention behind this method, so rather than trying to make it work - #using `LocalElementaryErrorgenLabel` I'll just assert it is a global one for now... if isinstance(label, _LocalElementaryErrorgenLabel): - raise NotImplementedError('This method is not currently implemented for `LocalElementaryErrorgenLabel` inputs.') - + label = _GlobalElementaryErrorgenLabel.cast(label, self.sslbls, identity_label=identity_label) + support = label.sslbls eetype = label.errorgen_type bels = label.basis_element_labels diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py index bf952ca1e..dcd93a960 100644 --- a/pygsti/errorgenpropagation/errorpropagator.py +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -6,8 +6,14 @@ # in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** - -import stim +import warnings +try: + import stim +except ImportError: + msg = "Stim is required for use of the error generator propagation module, " \ + "and it does not appear to be installed. If you intend to use this module please update" \ + " your environment." + warnings.warn(msg) import numpy as _np import scipy.linalg as _spl from .localstimerrorgen import LocalStimErrorgenLabel as _LSE diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py index 129642b9d..e481fe30d 100644 --- a/pygsti/errorgenpropagation/localstimerrorgen.py +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -9,7 +9,10 @@ from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel as _ElementaryErrorgenLabel, GlobalElementaryErrorgenLabel as _GEEL,\ LocalElementaryErrorgenLabel as _LEEL -import stim +try: + import stim +except ImportError: + pass import numpy as _np from pygsti.tools import change_basis from pygsti.tools.lindbladtools import create_elementary_errorgen diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py index 5769aa76b..018a78235 100644 --- a/pygsti/tools/errgenproptools.py +++ b/pygsti/tools/errgenproptools.py @@ -10,7 +10,15 @@ # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** -import stim +import warnings +try: + import stim +except ImportError: + msg = "Stim is required for use of the error generator propagation tools module, " \ + "and it does not appear to be installed. If you intend to use this module please update" \ + " your environment." + warnings.warn(msg) + import numpy as _np from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GEEL, LocalElementaryErrorgenLabel as _LEEL from pygsti.baseobjs import QubitSpace as _QubitSpace diff --git a/pygsti/tools/jamiolkowski.py b/pygsti/tools/jamiolkowski.py index 2a964ea9e..abe70ca08 100644 --- a/pygsti/tools/jamiolkowski.py +++ b/pygsti/tools/jamiolkowski.py @@ -327,7 +327,7 @@ def sums_of_negative_choi_eigenvalues(model): """ ret = [] for (_, gate) in model.operations.items(): - J = fast_jamiolkowski_iso_std(gate.to_dense(), model.basis) # Choi mx basis doesn't matter + J = fast_jamiolkowski_iso_std(gate.to_dense(on_space='HilbertSchmidt'), model.basis) # Choi mx basis doesn't matter evals = _np.linalg.eigvals(J) # could use eigvalsh, but wary of this since eigh can be wrong... sumOfNeg = 0.0 for ev in evals: diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index 4c6e59118..6a8a443d6 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -506,7 +506,7 @@ def create_lindbladian_term_errorgen(typ, Lm, Ln=None, sparse=False): # noqa N8 return lind_errgen -def random_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A'), max_weights=None, +def random_CPTP_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A'), max_weights=None, H_params=(0.,.01), SCA_params=(0.,.01), error_metric=None, error_metric_value=None, relative_HS_contribution=None, fixed_errorgen_rates=None, sslbl_overlap=None, label_type='global', seed=None, qubit_labels=None): diff --git a/test/unit/objects/test_errorgenbasis.py b/test/unit/objects/test_errorgenbasis.py index fec7caf1c..4d2f1da7f 100644 --- a/test/unit/objects/test_errorgenbasis.py +++ b/test/unit/objects/test_errorgenbasis.py @@ -107,10 +107,12 @@ def test_label_index(self): labels = self.complete_errorgen_basis_default_1Q.labels test_eg = GlobalElementaryErrorgenLabel('C', ['X', 'Y'], (0,)) + test_eg_local = LocalElementaryErrorgenLabel('C', ['XI', 'YI']) test_eg_missing = GlobalElementaryErrorgenLabel('C', ['X', 'Y'], (1,)) lbl_idx = self.complete_errorgen_basis_default_1Q.label_index(test_eg) - + lbl_idx_1 = self.complete_errorgen_basis_default_1Q.label_index(test_eg_local) + assert lbl_idx == lbl_idx_1 assert lbl_idx == labels.index(test_eg) with self.assertRaises(KeyError): diff --git a/test/unit/tools/test_lindbladtools.py b/test/unit/tools/test_lindbladtools.py index 6e4cb7b1e..e1999e11b 100644 --- a/test/unit/tools/test_lindbladtools.py +++ b/test/unit/tools/test_lindbladtools.py @@ -94,7 +94,7 @@ def test_elementary_errorgen_bases(self): class RandomErrorgenRatesTester(BaseCase): def test_default_settings(self): - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, seed=1234, label_type='local') + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, seed=1234, label_type='local') #make sure that we get the expected number of rates: self.assertEqual(len(random_errorgen_rates), 240) @@ -105,7 +105,7 @@ def test_default_settings(self): def test_sector_restrictions(self): #H-only: - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H',), seed=1234) + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H',), seed=1234) #make sure that we get the expected number of rates: self.assertEqual(len(random_errorgen_rates), 15) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail @@ -113,7 +113,7 @@ def test_sector_restrictions(self): errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) #S-only - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('S',), seed=1234) + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('S',), seed=1234) #make sure that we get the expected number of rates: self.assertEqual(len(random_errorgen_rates), 15) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail @@ -121,7 +121,7 @@ def test_sector_restrictions(self): errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) #H+S - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), seed=1234) + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), seed=1234) #make sure that we get the expected number of rates: self.assertEqual(len(random_errorgen_rates), 30) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail @@ -129,7 +129,7 @@ def test_sector_restrictions(self): errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) #H+S+A - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S','A'), seed=1234) + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S','A'), seed=1234) #make sure that we get the expected number of rates: self.assertEqual(len(random_errorgen_rates), 135) #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail @@ -138,7 +138,7 @@ def test_sector_restrictions(self): def test_error_metric_restrictions(self): #test generator_infidelity - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), error_metric= 'generator_infidelity', error_metric_value=.99, seed=1234) #confirm this has the correct generator infidelity. @@ -152,7 +152,7 @@ def test_error_metric_restrictions(self): assert abs(gen_infdl-.99)<1e-5 #test generator_error - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), error_metric= 'total_generator_error', error_metric_value=.99, seed=1234) #confirm this has the correct generator infidelity. @@ -166,7 +166,7 @@ def test_error_metric_restrictions(self): assert abs(gen_error-.99)<1e-5 #test relative_HS_contribution: - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), error_metric= 'generator_infidelity', error_metric_value=.99, relative_HS_contribution=(.5, .5), seed=1234) @@ -181,7 +181,7 @@ def test_error_metric_restrictions(self): assert abs(gen_infdl_S - gen_infdl_H)<1e-5 - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), error_metric= 'total_generator_error', error_metric_value=.99, relative_HS_contribution=(.5, .5), seed=1234) @@ -198,7 +198,7 @@ def test_error_metric_restrictions(self): def test_fixed_errorgen_rates(self): fixed_rates_dict = {GlobalElementaryErrorgenLabel('H', ('X',), (0,)): 1} - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), fixed_errorgen_rates=fixed_rates_dict, seed=1234) @@ -206,33 +206,33 @@ def test_fixed_errorgen_rates(self): def test_label_type(self): - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), label_type='local', seed=1234) assert isinstance(next(iter(random_errorgen_rates)), LocalElementaryErrorgenLabel) def test_sslbl_overlap(self): - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), sslbl_overlap=(0,), seed=1234) for coeff in random_errorgen_rates: assert 0 in coeff.sslbls def test_weight_restrictions(self): - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S','C','A'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S','C','A'), label_type='local', seed=1234, max_weights={'H':1, 'S':1, 'C':1, 'A':1}) assert len(random_errorgen_rates) == 24 #confirm still CPTP errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, errorgen_types=('H','S','C','A'), + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S','C','A'), label_type='local', seed=1234, max_weights={'H':2, 'S':2, 'C':1, 'A':1}) assert len(random_errorgen_rates) == 42 errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) def test_global_labels(self): - random_errorgen_rates = lt.random_error_generator_rates(num_qubits=2, seed=1234, label_type='global') + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, seed=1234, label_type='global') #make sure that we get the expected number of rates: self.assertEqual(len(random_errorgen_rates), 240) From 2f16d35805f94f48d3960a34cf180daa58ee4708 Mon Sep 17 00:00:00 2001 From: Corey Ostrove Date: Tue, 1 Apr 2025 11:11:48 -0700 Subject: [PATCH 102/102] Minor test change Minor change to test tolerance. --- test/unit/tools/test_errgenproptools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py index 32e660868..38c33d717 100644 --- a/test/unit/tools/test_errgenproptools.py +++ b/test/unit/tools/test_errgenproptools.py @@ -251,7 +251,7 @@ def _assert_correct_tableau_fidelity(u, v): ut = stim.Tableau.from_state_vector(u, endian='little') vt = stim.Tableau.from_state_vector(v, endian='little') actual = _eprop.tableau_fidelity(ut, vt) - np.testing.assert_allclose(actual, expected, rtol=1e-5) + np.testing.assert_allclose(actual, expected, atol=1e-14, rtol=1e-5) s = 0.5**0.5 _assert_correct_tableau_fidelity([1, 0], [0, 1])