diff --git a/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb b/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb new file mode 100644 index 000000000..409443770 --- /dev/null +++ b/jupyter_notebooks/Tutorials/algorithms/ErrorGeneratorPropagation.ipynb @@ -0,0 +1,788 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a2bf624d-6098-46e5-820e-e775e4fb41f0", + "metadata": {}, + "source": [ + "# Error Generator Propagation\n", + "In this tutorial we will provide an overview of the core functionality available through pyGSTi's error generator propagation module.\n", + "\n", + "Error generator propagation is a technique which leverages the analytical properties of the error generator formalism to enable efficient forward simulation by propagating general markovian error channels through Clifford circuits. Efficiency of this technique relies on two conditions:\n", + "\n", + "- Sparsity: At most a polynomial number of error generator rates (in the number of qubits) can be nonzero for any given circuit layer.\n", + "- Clifford-only: The propagation of error generators relies on the analytic properties of the elementary error generators when conjugated by cliffords.\n", + "\n", + "That is pretty much it though. Coherent errors, non-unital errors (e.g. amplitude damping), dephasing, all fair game. Practically there is a third requirement as well and that is that the error generator rates are relatively small. The larger the error generator rates, the higher-order the approximation you'll require (BCH and/or taylor series) to achieve a given precision target when using the functionality described herein for efficiently performing strong simulation in the error generator propagation framework. \n", + "\n", + "Please note: The implementation of the error generator propagation framework in pyGSTi requires the `stim` python package, so please ensure this is installed in your environment before proceeding." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "472bba68-9e69-4379-aa3c-7e062128ef7c", + "metadata": {}, + "outputs": [], + "source": [ + "import pygsti\n", + "import stim\n", + "from pygsti.tools import errgenproptools as eprop\n", + "from pygsti.tools.lindbladtools import random_error_generator_rates\n", + "from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator\n", + "from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE" + ] + }, + { + "cell_type": "markdown", + "id": "1a4682fa-f5e3-4a44-a596-e095f2cc4890", + "metadata": {}, + "source": [ + "To begin we need an error model, and particularly one parameterized using error generators (or otherwise capable of outputing error generators for a circuit layer). For this tutorial we'll work with a 4-qubit crosstalk-free model for a gate set consisting of $\\pi/2$ rotations about X and Y on each qubit, and a two-qubit CPHASE gate. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ae464763-b211-4bf4-a85c-47465aa7187b", + "metadata": {}, + "outputs": [], + "source": [ + "num_qubits = 4\n", + "gate_names = ['Gcphase', 'Gxpi2', 'Gypi2']\n", + "availability = {'Gcphase':[(0,1), (1,2), (2,3), (3,0)]}\n", + "pspec = pygsti.processors.QubitProcessorSpec(num_qubits, gate_names, availability=availability)\n", + "target_model = pygsti.models.create_crosstalk_free_model(processor_spec = pspec)" + ] + }, + { + "cell_type": "markdown", + "id": "f24def6b-b268-4a62-bbe5-37d0c188c15b", + "metadata": {}, + "source": [ + "Now that we have a target model we'll also want a noisy model to simulate as well. For this example we'll randomly sample a weight-2 H+S (coherent + pauli stochastic) error model, but the error generator propagation framework can also handle C and A error generators as well (i.e. general lindbladian errors). \n", + "The specific specification we'll need for the model construction routine we're about to use is a dictionary whose keys are gate labels. Each value of this dictionary is itself a dictionary whose keys are elementary error generator labels, and whose values are error generator rates." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e205a715-c231-40a1-82bf-27cf303e8820", + "metadata": {}, + "outputs": [], + "source": [ + "qubit_labels = pspec.qubit_labels\n", + "error_rates_dict = {}\n", + "for gate, availability in pspec.availability.items():\n", + " n = pspec.gate_num_qubits(gate)\n", + " if availability == 'all-edges':\n", + " assert(n == 1), \"Currently require all 2-qubit gates have a specified availability!\"\n", + " qubits_for_gate = qubit_labels\n", + " else:\n", + " qubits_for_gate = availability \n", + " for qs in qubits_for_gate:\n", + " label = pygsti.baseobjs.Label(gate, qs)\n", + " # Sample error rates.\n", + " error_rates_dict[label] = random_error_generator_rates(num_qubits=n, errorgen_types=('H', 'S'), label_type='local', seed=1234)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5ebacf47-54ba-4cd1-b14c-26f1d0516d29", + "metadata": {}, + "outputs": [], + "source": [ + "error_model = pygsti.models.create_crosstalk_free_model(pspec, lindblad_error_coeffs=error_rates_dict)" + ] + }, + { + "cell_type": "markdown", + "id": "ab4dc617-6d7c-410c-a976-282a169c8bdf", + "metadata": {}, + "source": [ + "We'll also need an example circuit for the rest of our examples, so will construct one at random." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e88638b-cc35-49be-8976-ee658d3201a6", + "metadata": {}, + "outputs": [], + "source": [ + "c = pygsti.algorithms.randomcircuit.create_random_circuit(pspec, 3, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "145d7875-599a-447f-b2ac-b2fc702d6dda", + "metadata": {}, + "outputs": [], + "source": [ + "print(c)" + ] + }, + { + "cell_type": "markdown", + "id": "192d74b6-3f36-499f-9db6-335a01b87c3f", + "metadata": {}, + "source": [ + "## Basic Propagation\n", + "In this section we'll introduce the basic syntax of the `ErrorGeneratorPropagator` class and usage of the basic error generator propagation functionality.\n", + "Our first step will be to create an instance of the `ErrorGeneratorPropagator` class. This is as simple as passing in our error model into the constructor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c66532cd-876e-4002-89d2-b47eaafb5bf0", + "metadata": {}, + "outputs": [], + "source": [ + "errorgen_propagator = ErrorGeneratorPropagator(error_model)" + ] + }, + { + "cell_type": "markdown", + "id": "7fc2f197-6351-4c16-b351-7e539f1db839", + "metadata": {}, + "source": [ + "The very first thing we can do is propagate the error generators for each circuit layer to the end of the circuit. This is done using the `propagate_errorgens` method." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90dc2734-2d17-4555-b32c-ac307967c21e", + "metadata": {}, + "outputs": [], + "source": [ + "propagated_errorgen_layers = errorgen_propagator.propagate_errorgens(c)" + ] + }, + { + "cell_type": "markdown", + "id": "044babe6-675d-4145-bb68-bc3e5e80efff", + "metadata": {}, + "source": [ + "The output of this method is a list of dictionaries, one for each original error generator layer in the circuit, containing an updated set of elementary error generator coefficients and rates corresponding to the result of propagating each error generator through the circuit. Note this list is returned in circuit ordering, so there is a one-to-one correspondence between the position an error generator appears in the original circuit and where it appears in this final list.\n", + "\n", + "So, to see the result of propagating the error generator corresponding to the noise induced after the first layer of gates to the very end we could query this list as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b43a4a84-2795-440a-9db4-9d80cfceea6c", + "metadata": {}, + "outputs": [], + "source": [ + "print(propagated_errorgen_layers[1])" + ] + }, + { + "cell_type": "markdown", + "id": "981a3cb7-a629-44cd-bb17-bc52dd74e9a2", + "metadata": {}, + "source": [ + "There are a few things worth noting at this point. First, we stated we'd be looking at the output of propagating the *first* circuit layer to the end but we indexed into the *second* position of the final list, what gives? This is because by default the `propagate_errorgens` method prepends and appends the error generator layers corresponding to state preparation and measurement respectively *before* beginning the propagation. As such the first layer in the final output corresponds to the error generator associated with state prep, and the final one with measurement. We never actually specified error generator rates for the SPAM, so you'll notice the corresponding dictionaries in the final output are both empty in this case." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7710d39-9574-41aa-a922-c95f7bb8225f", + "metadata": {}, + "outputs": [], + "source": [ + "print(propagated_errorgen_layers[0])\n", + "print(propagated_errorgen_layers[-1])" + ] + }, + { + "cell_type": "markdown", + "id": "85b38f4f-9797-416b-b04d-395caed0438b", + "metadata": {}, + "source": [ + "To change this behavior so that the SPAM layers are not included you can set the optional kwarg `include_spam` to `FALSE` in `propgate_errorgens` and other related methods." + ] + }, + { + "cell_type": "markdown", + "id": "1076dfa4-da39-4c3d-9466-0de50c415521", + "metadata": {}, + "source": [ + "The next things worth noting are the keys of the final dictionary. Notice that the basis element labels for each of the elementary error generator coefficient labels are instances of `stim.PauliString`, very much unlike the other elementary error generator labels used in pyGSTi. These labels are instances of the class `LocalStimErrorgenLabel`, a specialized label class with additional metadata and methods used throughout the error generator propagation framework. For applications where you need to take the output of this module and utilize it elsewhere in pyGSTi you can utilize the `to_local_eel` and `to_global_eel` methods of the `LocalStimErrorgenLabel` class to convert these into instances of `LocalElementaryErrorgenLabel` and `GlobalElementaryErrorgenLabel`, respectively, for use within other parts of pyGSTi." + ] + }, + { + "cell_type": "markdown", + "id": "af5abf19-7c96-4c06-aa41-4a2b47890205", + "metadata": {}, + "source": [ + "While the output of `propgate_errorgens` is in and of itself incredibly useful, often we want to know more about how specific errors have been transformed by propagation through the circuit. Fortunately the analytic structure of error generator propagation through a clifford operation is such that it acts as a generalized permutation of each elementary error generator within it's sector (i.e. propagation can't in and of itself map H errors to anything other than H errors, for example). To view the input-output corresponding to the transformation of each error generator we can use the `errorgen_transform_map` method." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b4b3ab03-399f-43dd-99d0-957bbf219ad5", + "metadata": {}, + "outputs": [], + "source": [ + "errorgen_transform_map = errorgen_propagator.errorgen_transform_map(c)" + ] + }, + { + "cell_type": "markdown", + "id": "eeddc38f-78cc-46e9-ae36-c506a352c7a5", + "metadata": {}, + "source": [ + "This method returns a dictionary with the following structure: Keys are tuples of the form (, ), and values are of the form (, ), where overall_phase corresponds to the overall sign accumulated on the final error generator rate as a result of propagation. So, for example, we can see that as a result of propagation through the circuit the H(XIII) error generator at circuit layer 1 is mapped to an H(ZIII) error generator accruing and overall phase of -1.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d5600efa-93a4-4100-93f7-43173c83f948", + "metadata": {}, + "outputs": [], + "source": [ + "print(errorgen_transform_map[(_LSE('H', [stim.PauliString('XIII')]), 1)])" + ] + }, + { + "cell_type": "markdown", + "id": "16b35551-56e1-4cb9-ab69-9e0bb71205d6", + "metadata": {}, + "source": [ + "For some purposes it can be useful to go another step further and identity which gate a particular error might be associated with in the original error model. For this purpose `ErrorGeneratorPropagator` has a helper method available called `errorgen_gate_contributors`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "432a7f5d-d27b-4f12-a262-810609318cf3", + "metadata": {}, + "outputs": [], + "source": [ + "print(errorgen_propagator.errorgen_gate_contributors(_LSE('H', [stim.PauliString('XIII')]), c, layer_idx=1))" + ] + }, + { + "cell_type": "markdown", + "id": "bd1e42b3-2309-4c67-9d78-67ac5510bbb9", + "metadata": {}, + "source": [ + "Here this method returns the fact that in our particular error model the only gate at layer index 1 which could have contributed this particular error generator was the 'Gxpi2' gate acting on qubit 0. In some error models it may be possible for multiple gates to contribute to a particular rate, in which case this method should return all such gates." + ] + }, + { + "cell_type": "markdown", + "id": "90c2be36-4333-40b7-b3a4-742bc60623b1", + "metadata": {}, + "source": [ + "## BCH Approximation\n", + "In the previous section we showed how to use the `ErrorGeneratorPropagator` class to transform a circuit with a series of post-gate error generators into an equivalent representation of this noisy circuit with instead a series of post-circuit error generator layers. What if we want a single effective end-of-circuit error generator which approximates the overall action of the composition of each of the propagated error generators? To do so the `ErrorGeneratorPropagator` class supports the option to iteratively apply the BCH approximation at various orders to perform this recombination.\n", + "\n", + "The main method for performing propagation together with the iterative application of the BCH approximation is called `propagate_errorgens_bch`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ee0db5d-972c-4367-96da-f390fe1ac56e", + "metadata": {}, + "outputs": [], + "source": [ + "propagated_errorgen_layer_first_order = errorgen_propagator.propagate_errorgens_bch(c)" + ] + }, + { + "cell_type": "markdown", + "id": "5c5f5c55-fbc1-4371-8f7e-d274c6c361bc", + "metadata": {}, + "source": [ + "As before this method propagated all of a circuits error generator layers to the very end, but follows this up with an iterative application of the BCH approximation resulting in a single final error generator. Without any additional optional arguments specified this uses the first-order BCH approximation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0d58c38-f3ce-494d-8daa-cbb5ef8d66b0", + "metadata": {}, + "outputs": [], + "source": [ + "print(propagated_errorgen_layer_first_order)" + ] + }, + { + "cell_type": "markdown", + "id": "e7b85f44-fe5e-4d2e-9555-8cae50e8f03b", + "metadata": {}, + "source": [ + "This method supports a number of additional arguments beyond those already for `propagate_errorgens`:\n", + "- `bch_order`: An integer from 1 to 5 specifying the order of the BCH approximation to apply (5 is the current maximum). Note that the computational cost of higher order BCH can scale rapidly, so keep this in mind when balancing the need for accuracy and speed of computation.\n", + "- `truncation_threshold`: This argument allows you to specify a minimum threshold (in terms of error generator rate) below which rates are truncated to zero. This can improve performance by allowing one to skip the computation of terms corresponding to very small corrections.\n", + "Some interesting emergent behavior starts to occur when we begin to look at higher-order BCH corrections." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b88d003-76d2-4464-9a77-3c4ce6e63745", + "metadata": {}, + "outputs": [], + "source": [ + "propagated_errorgen_layer_second_order = errorgen_propagator.propagate_errorgens_bch(c, bch_order=2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2a60a527-051b-47e3-bbdc-41492bdeb2f1", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "print(propagated_errorgen_layer_second_order)" + ] + }, + { + "cell_type": "markdown", + "id": "90409574-98b3-4fec-9b05-9c5adc9764af", + "metadata": {}, + "source": [ + "Aside from the fact that there are now significantly more terms than was found for the first-order BCH approximation, notice that there are also now emergent second (and higher) order contributions due to C and A error generators which arise from the composition of purely H and S error generators. These additional terms arise from the non-commutivity of the elementary error generators, particularly the non-commutivity of H and S elementary error generators. For more on this phenomenon see [insert paper reference here]." + ] + }, + { + "cell_type": "markdown", + "id": "1e818c20-a1a7-4b1c-a068-e0ba548614f8", + "metadata": {}, + "source": [ + "## Approximate Probabilities and Expectation Values\n", + "Now you have an efficient representation for an approximation to the effective end-of-circuit error generator for your circuit, what can you do with it? In this section we show how to use this sparse representation to efficiently compute corrections to the outcome probability distributions and pauli observable expectation values of noisy clifford circuits." + ] + }, + { + "cell_type": "markdown", + "id": "405b95e6-15fc-4a2e-84d5-766a0573ee00", + "metadata": {}, + "source": [ + "We'll start off by demonstrating how to perform strong simulation using the results of error generator propagation to estimate the output probabilities for a desired computational basis state. \n", + "\n", + "To do so we'll be making use of the function `approximate_stabilizer_probability` from the `errgenproptools` module. This function takes as input the following arguments:\n", + "\n", + "- errorgen_dict : A dictionary of elementary error generator coefficients and their corresponding rates (as outputted, for example, by `propagate_errorgens_bch`.\n", + "- circuit : The circuit to compute the output probability for. This can by a pyGSTi `Circuit` object, or alternatively a `stim.Tableau`.\n", + "- desired_bitstring : A string corresponding to the desired computational basis state.\n", + "- order : Order of the taylor series approximation for the exponentiated error generator to use in computing the approximate output probability. In principle this function can compute arbitary-order approximation (but practically the cost of the computation scales in the order).\n", + "- truncation_threshold : As described above, this is a minimum value below which contributions are truncated to zero which can sometimes improve performance by reducing the number of terms computed with very small overall corrections to the calculated probability. \n", + "\n", + "Let's use the results of the application of the second-order BCH approximation above and compute the approximate probability of reading out the all-zeros state from our circuit. For the ideal circuit, the probability of observing the all-zeros state is 0." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4cc9e0a0-2da5-40fc-8556-4d3272f1b1be", + "metadata": {}, + "outputs": [], + "source": [ + "first_order_approximate_prob = eprop.approximate_stabilizer_probability(propagated_errorgen_layer_second_order, c, '0000', order=1)\n", + "print(first_order_approximate_prob)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce153eee-90c3-4d8e-b25e-8ec849cde6c0", + "metadata": {}, + "outputs": [], + "source": [ + "second_order_approximate_prob = eprop.approximate_stabilizer_probability(propagated_errorgen_layer_second_order, c, '0000', order=2)\n", + "print(second_order_approximate_prob)" + ] + }, + { + "cell_type": "markdown", + "id": "091dea97-7421-45f3-a1ab-0d17f90d1e88", + "metadata": {}, + "source": [ + "In this few qubit test case we also have the luxury compare this to the results of the (effectively) exact forward simulation for the error model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2978aee0-7447-452e-85b4-41bbbb79f738", + "metadata": {}, + "outputs": [], + "source": [ + "exact_probability = error_model.sim.probs(c)['0000']\n", + "print(exact_probability)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5fb5fb21-bc96-446b-859c-2252c385c55a", + "metadata": {}, + "outputs": [], + "source": [ + "print(f'Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): {abs(exact_probability-first_order_approximate_prob)}')\n", + "print(f'Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): {abs(exact_probability-second_order_approximate_prob)}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87e43f33-5b0a-436a-8048-7836879fd205", + "metadata": {}, + "outputs": [], + "source": [ + "print(f'Relative Error Approx to Exact (First-order taylor, Second-order BCH): {100*abs(exact_probability-first_order_approximate_prob)/exact_probability}%')\n", + "print(f'Relative Error Approx to Exact (Second-order taylor, Second-order BCH): {100*abs(exact_probability-second_order_approximate_prob)/exact_probability}%')" + ] + }, + { + "cell_type": "markdown", + "id": "6bc34c95-9553-4c82-a80c-539c801b9029", + "metadata": {}, + "source": [ + "Here we can see that with the combination of second-order BCH and second-order taylor approximations our estimated probability is accurate to well below a 1 percent relative error. By going out to higher-order in either approximation one can achieve even higher levels of accuracy." + ] + }, + { + "cell_type": "markdown", + "id": "f858f5fd-2bb0-4e38-bab6-9b061aa5a273", + "metadata": {}, + "source": [ + "In addition to strong simulation of the output probabilities of computational basis states, it is also possible to compute approximate values for the expectation values of pauli observables. The main function for doing so is `approximate_stabilizer_pauli_expectation` from the `errgenproptools` module, the signature of which is nearly identical to that of `approximate_stabilizer_probability` described above, except taking instead a desired pauli observable to estimate the expectation value for. Here we'll again use the results of the second-order BCH approximation produced above and look are various order of the taylor series approximation for the pauli expectation value of 'XYZI' (the value for the ideal noise-free circuit is 1)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa7c8e66-de23-41b5-8ec8-401d6e3c1bf7", + "metadata": {}, + "outputs": [], + "source": [ + "first_order_approximate_pauli_expectation = eprop.approximate_stabilizer_pauli_expectation(propagated_errorgen_layer_second_order, c, 'XYZI', order=1)\n", + "print(first_order_approximate_pauli_expectation)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b6bd6fb-f7e6-482a-97c4-20bcba5749a6", + "metadata": {}, + "outputs": [], + "source": [ + "second_order_approximate_pauli_expectation = eprop.approximate_stabilizer_pauli_expectation(propagated_errorgen_layer_second_order, c, 'XYZI', order=2)\n", + "print(second_order_approximate_pauli_expectation)" + ] + }, + { + "cell_type": "markdown", + "id": "1c1efde8-7cf4-430a-94a6-376bdf991e67", + "metadata": {}, + "source": [ + "There aren't existing built-in functions in pyGSTi for outputing exact pauli expectation values handy, but we can write a short helper function for computing these for the sake of comparison with our above results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17731ccb-2cd7-4d7e-8ed8-f21b199d95a3", + "metadata": {}, + "outputs": [], + "source": [ + "from pygsti.tools.basistools import change_basis\n", + "import numpy as np\n", + "from pygsti.baseobjs import Label\n", + "def pauli_expectation_exact(error_propagator, target_model, circuit, pauli):\n", + " #get the eoc error channel, and the process matrix for the ideal circuit:\n", + " eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True)\n", + " ideal_channel = target_model.sim.product(circuit)\n", + " #also get the ideal state prep and povm:\n", + " ideal_prep = target_model.circuit_layer_operator(Label('rho0'), typ='prep').copy()\n", + " \n", + " #finally need the superoperator for the selected pauli.\n", + " pauli_unitary = pauli.to_unitary_matrix(endian='big')\n", + " #flatten this row-wise\n", + " pauli_vec = np.ravel(pauli_unitary)\n", + " pauli_vec.reshape((len(pauli_vec),1))\n", + " #put this in pp basis (since these are paulis themselves I could just read this off directly).\n", + " pauli_vec = change_basis(pauli_vec, 'std', 'pp')\n", + " #print(pauli_vec)\n", + " dense_prep = ideal_prep.to_dense().copy()\n", + " expectation = np.linalg.multi_dot([pauli_vec.reshape((1,len(pauli_vec))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]).item()\n", + " return expectation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ca1ba09-e416-475d-bb57-58eb28db08a0", + "metadata": {}, + "outputs": [], + "source": [ + "exact_pauli_expectation = pauli_expectation_exact(errorgen_propagator, target_model, c, stim.PauliString('XYZI'))\n", + "print(exact_pauli_expectation)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "407e0d72-ced0-4105-8a4f-463085406f18", + "metadata": {}, + "outputs": [], + "source": [ + "print(f'Absolute Error Approx to Exact (First-order Taylor, Second-order BCH): {abs(exact_pauli_expectation-first_order_approximate_pauli_expectation)}')\n", + "print(f'Absolute Error Approx to Exact (Second-order Taylor, Second-order BCH): {abs(exact_pauli_expectation-second_order_approximate_pauli_expectation)}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf63b01d-244c-4943-b477-576b1be496f0", + "metadata": {}, + "outputs": [], + "source": [ + "print(f'Relative Error Approx to Exact (First-order taylor, Second-order BCH): {100*abs(exact_pauli_expectation-first_order_approximate_pauli_expectation)/exact_pauli_expectation}%')\n", + "print(f'Relative Error Approx to Exact (Second-order taylor, Second-order BCH): {100*abs(exact_pauli_expectation-second_order_approximate_pauli_expectation)/exact_pauli_expectation}%')" + ] + }, + { + "cell_type": "markdown", + "id": "da73f141-80a7-4383-94ae-a530273f3e3d", + "metadata": {}, + "source": [ + "In this case even with the first-order taylor approximation together with the second-order BCH approximation the relative error to the exact expecation value is roughly half a percent, dropping to below a tenth of a percent when we go up to the second order taylor approximation. As before, by going out to higher-order in either approximation one can achieve even higher levels of accuracy." + ] + }, + { + "cell_type": "markdown", + "id": "5a4f38cd-4625-4ecf-9f69-3046d4a45ebd", + "metadata": {}, + "source": [ + "## Other Helpful Utilities:\n", + "In this section we'll highlight a few additional utilities within the error generator propagation related modules which are often useful (some of these you may have even seen us use above!).\n", + "\n", + "We'll specifically cover:\n", + "- `eoc_error_channel`\n", + "- `errorgen_layer_dict_to_errorgen`\n", + "- `approximate_stabilizer_probabilities`\n", + "- `error_generator_commutator`\n", + "- `error_generator_composition`" + ] + }, + { + "cell_type": "markdown", + "id": "c8ab436d-9b2c-4125-9fb8-69e281225308", + "metadata": {}, + "source": [ + "#### `eoc_error_channel` : \n", + "This method provides a simple single function call for generating a dense representation of the end-of-circuit error channel (i.e. the exponentiated end-of-circuit error generator). This can be useful in few-qubit testing, but obviously doesn't not scale beyond a few qubits. This end-of-circuit error channel can be produced either exactly or without the BCH approximation. In the former case this is acheived by exponentiating and multiplying together all of the propagated error generator layers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ed42df89-31b9-48af-9f3d-1baf538dc64b", + "metadata": {}, + "outputs": [], + "source": [ + "dense_end_of_circuit_channel_exact = errorgen_propagator.eoc_error_channel(c, use_bch=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73cd2961-5280-4d38-8f55-49b6fc4fb7b4", + "metadata": {}, + "outputs": [], + "source": [ + "dense_end_of_circuit_channel_first_order_BCH = errorgen_propagator.eoc_error_channel(c, use_bch=True, bch_kwargs={'bch_order':1})\n", + "dense_end_of_circuit_channel_second_order_BCH = errorgen_propagator.eoc_error_channel(c, use_bch=True, bch_kwargs={'bch_order':2})" + ] + }, + { + "cell_type": "markdown", + "id": "b3a2d84e-2266-4337-9be7-08f7047141b4", + "metadata": {}, + "source": [ + "This can be useful in testing settings, for example, where we can use these as yet another way to measure the accuracy of our approximation methods." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38926ad9-09e1-4174-b845-47bc2aad305e", + "metadata": {}, + "outputs": [], + "source": [ + "print(f'Frobenius norm between exact and 1st-order BCH EOC channels: {np.linalg.norm(dense_end_of_circuit_channel_exact-dense_end_of_circuit_channel_first_order_BCH)}')\n", + "print(f'Frobenius norm between exact and 2nd-order BCH EOC channels: {np.linalg.norm(dense_end_of_circuit_channel_exact-dense_end_of_circuit_channel_second_order_BCH)}')" + ] + }, + { + "cell_type": "markdown", + "id": "1649c1e7-29a3-492d-b4fb-0cfe2b491c7e", + "metadata": {}, + "source": [ + "#### `errorgen_layer_dict_to_errorgen`\n", + "Throughout the error generator propagation framework we generate a lot of sparse error generator representations in terms of dictionaries of elementary error generator coefficients and corresponding rates. For testing purposes (with just a few qubits, this obviously does not scale) it is often useful to convert these into a dense representation as a numpy array. This method helps do so in just a single line." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51361d27-6f0f-4765-8b35-81d1f2f62362", + "metadata": {}, + "outputs": [], + "source": [ + "dense_end_of_circuit_errorgen_first_order_BCH = errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layer_first_order)" + ] + }, + { + "cell_type": "markdown", + "id": "8cedfee7-22db-42a5-98e1-950fdef54149", + "metadata": {}, + "source": [ + "By default this returns the error generator in the normalized pauli-product basis, but this can be changed using the optional `mx_basis` kwarg.\n", + "Note: There is another function called `errorgen_layer_to_matrix` available in the `errgenproptools` module with similar functionality to this method, but with a somewhat different interface. That function can be particularly useful in situations where you may want to compute a lot of dense error generator representations from the outputs of the error generator propagation framework, so check out the documentation of that function for more." + ] + }, + { + "cell_type": "markdown", + "id": "b2f914cc-2c69-4e80-ab99-9be1fe0b3a9a", + "metadata": {}, + "source": [ + "#### `approximate_stabilizer_probabilities`\n", + "This one is straightforward. Above we showed the use of the function `approximate_stabilizer_probability` from the `errgenproptools` module for calculating approximate output probabilities for a given computational bitstring. If you happen to want *all* of the bit string probabilities you can save yourself a for loop by using the function `approximate_stabilizer_probabilities` from this module instead!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21fcd7cc-6799-4aca-b524-69868d0a0169", + "metadata": {}, + "outputs": [], + "source": [ + "approximate_probabilities = eprop.approximate_stabilizer_probabilities(propagated_errorgen_layer_first_order, c, order=1)\n", + "print(approximate_probabilities)" + ] + }, + { + "cell_type": "markdown", + "id": "464e2b37-3d00-4503-9a4d-40f60aa4c3a3", + "metadata": {}, + "source": [ + "Note the returned values are given in right-LSB convention (i.e. '0000' -> '0001' ->'0010', etc.)" + ] + }, + { + "cell_type": "markdown", + "id": "3984dac3-6114-4f4e-80e0-4ba31a79886a", + "metadata": {}, + "source": [ + "#### `error_generator_commutator` and `error_generator_composition`\n", + "These two functions from the `errgenproptools` module return the result of analytically computing the commutator and composition of two elementary error generators, respectively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83ba69f1-bbd0-4133-8ef3-abd9dc020888", + "metadata": {}, + "outputs": [], + "source": [ + "errorgen_1 = _LSE('H', [stim.PauliString('X')])\n", + "errorgen_2 = _LSE('S', [stim.PauliString('Z')])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ecd3b00-4a7b-4c8e-8a5f-bdba13565fb4", + "metadata": {}, + "outputs": [], + "source": [ + "print(eprop.error_generator_commutator(errorgen_1, errorgen_2))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f9862ff3-8717-48e7-b0a8-ec3a2d07c974", + "metadata": {}, + "outputs": [], + "source": [ + "print(eprop.error_generator_composition(errorgen_1, errorgen_2))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a020355-1796-4ddd-ad85-178b37266e35", + "metadata": {}, + "outputs": [], + "source": [ + "print(eprop.error_generator_composition(errorgen_1, errorgen_1))" + ] + }, + { + "cell_type": "markdown", + "id": "9723d08d-09ef-4ea7-ad0e-5624fa6f8501", + "metadata": {}, + "source": [ + "Both of these methods return their output as a list of two-element tuples. This list is a specification for the linear combination of elementary error generator coefficients corresponding to the commutator or composition of the two input elementary error generators. (First tuple element is an elementary error generator in the linear combination, and the second element is the coefficient of that elementary error generator in the linear combination).\n", + "\n", + "In the examples above we can see that the commutator of the specified H and S error generators gives rise to a pauli-correlation (C) error generator. This could potentially give rise to emergent C error generators when applying second-or-higher order BCH approximations for the effective end-of-circuit error generator, for example. Likewise the composition of these to error generators is a linear combination of a C error generator and an H error generator. And finally we see that squaring an H error generator (composing it with itself) gives rise to a pauli-stochastic (S) error generator." + ] + }, + { + "cell_type": "markdown", + "id": "cdfce65a-f619-4ef8-b52d-dff5d02a314f", + "metadata": {}, + "source": [ + "There's a whole bunch of other functionality and utilities available, particularly in the `errgenproptools` module which have not been covered in this tutorial, so please check out the documentation for additional capabilities!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f305946-e23d-4572-909e-6389dfa6a26b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pygsti/algorithms/randomcircuit.py b/pygsti/algorithms/randomcircuit.py index 866f55526..36005dd03 100644 --- a/pygsti/algorithms/randomcircuit.py +++ b/pygsti/algorithms/randomcircuit.py @@ -711,8 +711,9 @@ def create_random_circuit(pspec, length, qubit_labels=None, sampler='Qeliminatio 1-element list consisting of a list of the relevant gate names (e.g., `lsargs` = ['Gi, 'Gxpi, 'Gypi', 'Gzpi']). - rand_state: RandomState, optional - A np.random.RandomState object for seeding RNG + rand_state: RandomState or int, optional (default None) + A np.random.RandomState object for seeding RNG. If an integer is passed in + this is used to set the seed for a newly constructed RNG. Returns ------- @@ -726,6 +727,8 @@ def create_random_circuit(pspec, length, qubit_labels=None, sampler='Qeliminatio lsargs = [] if rand_state is None: rand_state = _np.random.RandomState() + if isinstance(rand_state, int): + rand_state = _np.random.RandomState(rand_state) if isinstance(sampler, str): diff --git a/pygsti/baseobjs/basis.py b/pygsti/baseobjs/basis.py index 47d024847..3796ed3a7 100644 --- a/pygsti/baseobjs/basis.py +++ b/pygsti/baseobjs/basis.py @@ -454,7 +454,8 @@ def is_equivalent(self, other, sparseness_must_match=True): return _np.array_equal(self.elements, other.elements) else: return _np.array_equal(self.elements, other) - + + @lru_cache(maxsize=4) def create_transform_matrix(self, to_basis): """ Get the matrix that transforms a vector from this basis to `to_basis`. @@ -484,6 +485,7 @@ def create_transform_matrix(self, to_basis): else: return _np.dot(to_basis.from_std_transform_matrix, self.to_std_transform_matrix) + @lru_cache(maxsize=4) def reverse_transform_matrix(self, from_basis): """ Get the matrix that transforms a vector from `from_basis` to this basis. @@ -1076,6 +1078,11 @@ def __init__(self, name, dim_or_statespace, sparse=False): super(BuiltinBasis, self).__init__(name, longname, real, sparse) + #precompute some properties + self._size, self._dim, self._elshape = _basis_constructor_dict[self.name].sizes(dim=self.state_space.dim, sparse=self.sparse) + #Check that sparse is True only when elements are *matrices* + assert(not self.sparse or len(self._elshape) == 2), "`sparse == True` is only allowed for *matrix*-valued bases!" + def _to_nice_serialization(self): state = super()._to_nice_serialization() state.update({'name': self.name, @@ -1096,16 +1103,14 @@ def dim(self): spans. Equivalently, the length of the `vector_elements` of the basis. """ - size, dim, elshape = _basis_constructor_dict[self.name].sizes(dim=self.state_space.dim, sparse=self.sparse) - return dim + return self._dim @property def size(self): """ The number of elements (or vector-elements) in the basis. """ - size, dim, elshape = _basis_constructor_dict[self.name].sizes(dim=self.state_space.dim, sparse=self.sparse) - return size + return self._size @property def elshape(self): @@ -1115,12 +1120,7 @@ def elshape(self): Note that *vector elements* always have shape `(dim,)` (or `(dim,1)` in the sparse case). """ - size, dim, elshape = _basis_constructor_dict[self.name].sizes(dim=self.state_space.dim, sparse=self.sparse) - - #Check that sparse is True only when elements are *matrices* - assert(not self.sparse or len(elshape) == 2), "`sparse == True` is only allowed for *matrix*-valued bases!" - - return elshape + return self._elshape @property def first_element_is_identity(self): @@ -1227,26 +1227,29 @@ def __init__(self, component_bases, name=None, longname=None): ''' assert(len(component_bases) > 0), "Must supply at least one component basis" - self.component_bases = [] + self._component_bases = [] self._vector_elements = None # vectorized elements: 1D arrays for compbasis in component_bases: if isinstance(compbasis, Basis): - self.component_bases.append(compbasis) + self._component_bases.append(compbasis) else: #compbasis can be a list/tuple of args to Basis.cast, e.g. ('pp',2) - self.component_bases.append(Basis.cast(*compbasis)) + self._component_bases.append(Basis.cast(*compbasis)) if name is None: - name = "+".join([c.name for c in self.component_bases]) + name = "+".join([c.name for c in self._component_bases]) if longname is None: longname = "Direct-sum basis with components " + ", ".join( - [c.name for c in self.component_bases]) + [c.name for c in self._component_bases]) - real = all([c.real for c in self.component_bases]) - sparse = all([c.sparse for c in self.component_bases]) - assert(all([c.real == real for c in self.component_bases])), "Inconsistent `real` value among component bases!" - assert(all([c.sparse == sparse for c in self.component_bases])), "Inconsistent sparsity among component bases!" + real = all([c.real for c in self._component_bases]) + sparse = all([c.sparse for c in self._component_bases]) + assert(all([c.real == real for c in self._component_bases])), "Inconsistent `real` value among component bases!" + assert(all([c.sparse == sparse for c in self._component_bases])), "Inconsistent sparsity among component bases!" + + #precompute various basis properties. can add more as they are deemed frequently accessed. + self._dim = sum([c.dim for c in self._component_bases]) #Init everything but elements and labels & their number/size super(DirectSumBasis, self).__init__(name, longname, real, sparse) @@ -1255,7 +1258,7 @@ def _to_nice_serialization(self): state = super()._to_nice_serialization() state.update({'name': self.name, 'longname': self.longname, - 'component_bases': [b.to_nice_serialization() for b in self.component_bases] + 'component_bases': [b.to_nice_serialization() for b in self._component_bases] }) return state @@ -1264,6 +1267,11 @@ def _from_nice_serialization(cls, state): component_bases = [Basis.from_nice_serialization(b) for b in state['component_bases']] return cls(component_bases, state['name'], state['longname']) + @property + def component_bases(self): + """A list of the component bases.""" + return self._component_bases + @property def dim(self): """ @@ -1271,14 +1279,14 @@ def dim(self): spans. Equivalently, the length of the `vector_elements` of the basis. """ - return sum([c.dim for c in self.component_bases]) + return self._dim @property def size(self): """ The number of elements (or vector-elements) in the basis. """ - return sum([c.size for c in self.component_bases]) + return sum([c.size for c in self._component_bases]) @property def elshape(self): @@ -1288,13 +1296,13 @@ def elshape(self): Note that *vector elements* always have shape `(dim,)` (or `(dim,1)` in the sparse case). """ - elndim = len(self.component_bases[0].elshape) - assert(all([len(c.elshape) == elndim for c in self.component_bases]) + elndim = len(self._component_bases[0].elshape) + assert(all([len(c.elshape) == elndim for c in self._component_bases]) ), "Inconsistent element ndims among component bases!" - return tuple([sum([c.elshape[k] for c in self.component_bases]) for k in range(elndim)]) + return tuple([sum([c.elshape[k] for c in self._component_bases]) for k in range(elndim)]) def __hash__(self): - return hash(tuple((hash(comp) for comp in self.component_bases))) + return hash((self.name,)+tuple((hash(comp) for comp in self._component_bases))) def _lazy_build_vector_elements(self): if self.sparse: @@ -1303,7 +1311,7 @@ def _lazy_build_vector_elements(self): compMxs = _np.zeros((self.size, self.dim), 'complex') i, start = 0, 0 - for compbasis in self.component_bases: + for compbasis in self._component_bases: for lbl, vel in zip(compbasis.labels, compbasis.vector_elements): assert(_sps.issparse(vel) == self.sparse), "Inconsistent sparsity!" if self.sparse: @@ -1325,7 +1333,7 @@ def _lazy_build_elements(self): vstart = 0 if self.sparse: # build block-diagonal sparse mx diagBlks = [] - for compbasis in self.component_bases: + for compbasis in self._component_bases: cs = compbasis.elshape comp_vel = vel[vstart:vstart + compbasis.dim] diagBlks.append(comp_vel.reshape(cs)) @@ -1335,7 +1343,7 @@ def _lazy_build_elements(self): else: start = [0] * self.elndim el = _np.zeros(self.elshape, 'complex') - for compbasis in self.component_bases: + for compbasis in self._component_bases: cs = compbasis.elshape comp_vel = vel[vstart:vstart + compbasis.dim] slc = tuple([slice(start[k], start[k] + cs[k]) for k in range(self.elndim)]) @@ -1349,12 +1357,12 @@ def _lazy_build_elements(self): def _lazy_build_labels(self): self._labels = [] - for i, compbasis in enumerate(self.component_bases): + for i, compbasis in enumerate(self._component_bases): for lbl in compbasis.labels: self._labels.append(lbl + "/%d" % i) def _copy_with_toggled_sparsity(self): - return DirectSumBasis([cb._copy_with_toggled_sparsity() for cb in self.component_bases], + return DirectSumBasis([cb._copy_with_toggled_sparsity() for cb in self._component_bases], self.name, self.longname) def is_equivalent(self, other, sparseness_must_match=True): @@ -1376,9 +1384,9 @@ def is_equivalent(self, other, sparseness_must_match=True): """ otherIsBasis = isinstance(other, DirectSumBasis) if not otherIsBasis: return False # can't be equal to a non-DirectSumBasis - if len(self.component_bases) != len(other.component_bases): return False + if len(self._component_bases) != len(other.component_bases): return False return all([c1.is_equivalent(c2, sparseness_must_match) - for (c1, c2) in zip(self.component_bases, other.component_bases)]) + for (c1, c2) in zip(self._component_bases, other.component_bases)]) @property def vector_elements(self): @@ -1469,7 +1477,7 @@ def create_equivalent(self, builtin_basis_name): ------- DirectSumBasis """ - equiv_components = [c.create_equivalent(builtin_basis_name) for c in self.component_bases] + equiv_components = [c.create_equivalent(builtin_basis_name) for c in self._component_bases] return DirectSumBasis(equiv_components) def create_simple_equivalent(self, builtin_basis_name=None): @@ -1497,9 +1505,9 @@ def create_simple_equivalent(self, builtin_basis_name=None): """ if builtin_basis_name is None: builtin_basis_name = self.name # default - if len(self.component_bases) > 0: - first_comp_name = self.component_bases[0].name - if all([c.name == first_comp_name for c in self.component_bases]): + if len(self._component_bases) > 0: + first_comp_name = self._component_bases[0].name + if all([c.name == first_comp_name for c in self._component_bases]): builtin_basis_name = first_comp_name # if all components have the same name return BuiltinBasis(builtin_basis_name, self.elsize, sparse=self.sparse) # Note: changes dimension @@ -1554,24 +1562,36 @@ def __init__(self, component_bases, name=None, longname=None): ''' assert(len(component_bases) > 0), "Must supply at least one component basis" - self.component_bases = [] + self._component_bases = [] for compbasis in component_bases: if isinstance(compbasis, Basis): - self.component_bases.append(compbasis) + self._component_bases.append(compbasis) else: #compbasis can be a list/tuple of args to Basis.cast, e.g. ('pp',2) - self.component_bases.append(Basis.cast(*compbasis)) + self._component_bases.append(Basis.cast(*compbasis)) if name is None: - name = "*".join([c.name for c in self.component_bases]) + name = "*".join([c.name for c in self._component_bases]) if longname is None: longname = "Tensor-product basis with components " + ", ".join( - [c.name for c in self.component_bases]) + [c.name for c in self._component_bases]) - real = all([c.real for c in self.component_bases]) - sparse = all([c.sparse for c in self.component_bases]) - #assert(all([c.real == real for c in self.component_bases])), "Inconsistent `real` value among component bases!" - assert(all([c.sparse == sparse for c in self.component_bases])), "Inconsistent sparsity among component bases!" + real = all([c.real for c in self._component_bases]) + sparse = all([c.sparse for c in self._component_bases]) + #assert(all([c.real == real for c in self._component_bases])), "Inconsistent `real` value among component bases!" + assert(all([c.sparse == sparse for c in self._component_bases])), "Inconsistent sparsity among component bases!" + + #precompute certain properties. Can add more as deemed frequently accessed. + self._dim = int(_np.prod([c.dim for c in self._component_bases])) + + #NOTE: this is actually to restrictive -- what we need is a test/flag for whether the elements of a + # basis are in their "natrual" representation where it makes sense to take tensor products. For + # example, a direct-sum basis may hold elements in a compact way that violate this... but I'm not sure if they + # do and this needs to be checked. For now, we could just disable this overly-restrictive assert: + assert(all([c.is_simple() for c in self._component_bases])), \ + "Components of a tensor product basis must be *simple* (have vector-dimension == size of elements)" + # because we use the natural representation to take tensor (kronecker) products. + # Note: this assertion also means dim == product(component_elsizes) == elsize, so basis is *simple* super(TensorProdBasis, self).__init__(name, longname, real, sparse) @@ -1579,7 +1599,7 @@ def _to_nice_serialization(self): state = super()._to_nice_serialization() state.update({'name': self.name, 'longname': self.longname, - 'component_bases': [b.to_nice_serialization() for b in self.component_bases] + 'component_bases': [b.to_nice_serialization() for b in self._component_bases] }) return state @@ -1588,6 +1608,11 @@ def _from_nice_serialization(cls, state): component_bases = [Basis.from_nice_serialization(b) for b in state['component_bases']] return cls(component_bases, state['name'], state['longname']) + @property + def component_bases(self): + """A list of the component bases.""" + return self._component_bases + @property def dim(self): """ @@ -1595,25 +1620,14 @@ def dim(self): spans. Equivalently, the length of the `vector_elements` of the basis. """ - dim = int(_np.prod([c.dim for c in self.component_bases])) - - #NOTE: this is actually to restrictive -- what we need is a test/flag for whether the elements of a - # basis are in their "natrual" representation where it makes sense to take tensor products. For - # example, a direct-sum basis may hold elements in a compact way that violate this... but I'm not sure if they - # do and this needs to be checked. For now, we could just disable this overly-restrictive assert: - assert(all([c.is_simple() for c in self.component_bases])), \ - "Components of a tensor product basis must be *simple* (have vector-dimension == size of elements)" - # because we use the natural representation to take tensor (kronecker) products. - # Note: this assertion also means dim == product(component_elsizes) == elsize, so basis is *simple* - - return dim + return self._dim @property def size(self): """ The number of elements (or vector-elements) in the basis. """ - return int(_np.prod([c.size for c in self.component_bases])) + return int(_np.prod([c.size for c in self._component_bases])) @property def elshape(self): @@ -1623,16 +1637,16 @@ def elshape(self): Note that *vector elements* always have shape `(dim,)` (or `(dim,1)` in the sparse case). """ - elndim = max([c.elndim for c in self.component_bases]) + elndim = max([c.elndim for c in self._component_bases]) elshape = [1] * elndim - for c in self.component_bases: + for c in self._component_bases: off = elndim - c.elndim for k, d in enumerate(c.elshape): elshape[k + off] *= d return tuple(elshape) def __hash__(self): - return hash(tuple((hash(comp) for comp in self.component_bases))) + return hash((self.name,) + tuple((hash(comp) for comp in self._component_bases))) def _lazy_build_elements(self): #LAZY building of elements (in case we never need them) @@ -1644,7 +1658,7 @@ def _lazy_build_elements(self): #Take kronecker product of *natural* reps of component-basis elements # then reshape to vectors at the end. This requires that the vector- # dimension of the component spaces equals the "natural space" dimension. - comp_els = [c.elements for c in self.component_bases] + comp_els = [c.elements for c in self._component_bases] for i, factors in enumerate(_itertools.product(*comp_els)): if self.sparse: M = _sps.identity(1, 'complex', 'csr') @@ -1660,12 +1674,12 @@ def _lazy_build_elements(self): def _lazy_build_labels(self): self._labels = [] - comp_lbls = [c.labels for c in self.component_bases] + comp_lbls = [c.labels for c in self._component_bases] for i, factor_lbls in enumerate(_itertools.product(*comp_lbls)): self._labels.append(''.join(factor_lbls)) def _copy_with_toggled_sparsity(self): - return TensorProdBasis([cb._copy_with_toggled_sparsity() for cb in self.component_bases], + return TensorProdBasis([cb._copy_with_toggled_sparsity() for cb in self._component_bases], self.name, self.longname) def is_equivalent(self, other, sparseness_must_match=True): @@ -1687,10 +1701,10 @@ def is_equivalent(self, other, sparseness_must_match=True): """ otherIsBasis = isinstance(other, TensorProdBasis) if not otherIsBasis: return False # can't be equal to a non-DirectSumBasis - if len(self.component_bases) != len(other.component_bases): return False + if len(self._component_bases) != len(other.component_bases): return False if self.sparse != other.sparse: return False return all([c1.is_equivalent(c2, sparseness_must_match) - for (c1, c2) in zip(self.component_bases, other.component_bases)]) + for (c1, c2) in zip(self._component_bases, other.component_bases)]) def create_equivalent(self, builtin_basis_name): """ @@ -1714,11 +1728,11 @@ def create_equivalent(self, builtin_basis_name): # This is a part of what woudl go into that... but it's not complete. # if builtin_basis_name == 'std': # special case when we change classical components to 'cl' # equiv_components = [] - # for c in self.component_bases: + # for c in self._component_bases: # if c.elndim == 1: equiv_components.append(c.create_equivalent('cl')) # else: equiv_components.append(c.create_equivalent('std')) # else: - equiv_components = [c.create_equivalent(builtin_basis_name) for c in self.component_bases] + equiv_components = [c.create_equivalent(builtin_basis_name) for c in self._component_bases] return TensorProdBasis(equiv_components) def create_simple_equivalent(self, builtin_basis_name=None): @@ -1746,7 +1760,7 @@ def create_simple_equivalent(self, builtin_basis_name=None): """ #if builtin_basis_name == 'std': # special case when we change classical components to 'clmx' # equiv_components = [] - # for c in self.component_bases: + # for c in self._component_bases: # if c.elndim == 1: equiv_components.append(BuiltinBasis('clmx', c.dim**2, sparse=self.sparse)) # # c.create_simple_equivalent('clmx')) # else: equiv_components.append(c.create_simple_equivalent('std')) @@ -1755,8 +1769,8 @@ def create_simple_equivalent(self, builtin_basis_name=None): if builtin_basis_name is None: builtin_basis_name = self.name # default - if len(self.component_bases) > 0: - first_comp_name = self.component_bases[0].name - if all([c.name == first_comp_name for c in self.component_bases]): + if len(self._component_bases) > 0: + first_comp_name = self._component_bases[0].name + if all([c.name == first_comp_name for c in self._component_bases]): builtin_basis_name = first_comp_name # if all components have the same name return BuiltinBasis(builtin_basis_name, self.elsize, sparse=self.sparse) diff --git a/pygsti/baseobjs/basisconstructors.py b/pygsti/baseobjs/basisconstructors.py index 47e5a1066..012c9401c 100644 --- a/pygsti/baseobjs/basisconstructors.py +++ b/pygsti/baseobjs/basisconstructors.py @@ -442,7 +442,7 @@ def sizes(self, dim, sparse): def std_matrices(matrix_dim): """ Get the elements of the matrix unit, or "standard", basis of matrix-dimension `matrix_dim`. - The matrices are ordered so that the row index changes the fastest. + The matrices are ordered so that the column index changes the fastest. Constructs the standard basis spanning the density-matrix space given by `matrix_dim` x `matrix_dim` matrices. diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index bfece8188..f86bee6c5 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -12,10 +12,11 @@ import numpy as _np import itertools as _itertools -import collections as _collections from pygsti.baseobjs import Basis as _Basis -from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel,\ +LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel + from pygsti.tools import optools as _ot @@ -28,50 +29,178 @@ class ElementaryErrorgenBasis(object): """ def label_indices(self, labels, ok_if_missing=False): - """ TODO: docstring """ + """ + Return a list of indices into this basis's label list + for the specifed list of `ElementaryErrorgenLabels`. + + Parameters + ---------- + labels : list of `ElementaryErrorgenLabel` + A list of elementary error generator labels to extract the + indices of. + + ok_if_missing : bool + If True, then returns `None` instead of an integer when the given label is not present + """ return [self.label_index(lbl, ok_if_missing) for lbl in labels] def __len__(self): - """ Number of elementary errorgen elements in this basis """ + """ + Number of elementary errorgen elements in this basis. + """ return len(self.labels) +#helper function for checking label types. +def _all_elements_same_type(lst): + if not lst: # Check if the list is empty + return True # An empty list can be considered to have all elements of the same type + + first_type = type(lst[0]) # Get the type of the first element + for element in lst: + if type(element) != first_type: + return False + return True class ExplicitElementaryErrorgenBasis(ElementaryErrorgenBasis): + """ + This basis object contains the information necessary for building, + storing and accessing a set of explicitly represented basis elements for a user + specified set of of elementary error generators. + """ + + def __init__(self, state_space, labels, basis_1q=None): + """ + Instantiate a new explicit elementary error generator basis. + + Parameters + ---------- + state_space : `StateSpace` + An object describing the struture of the entire state space upon which the elements + of this error generator basis act. + + labels : list or tuple of `ElementaryErrorgenLabel` + A list of elementary error generator labels for which basis elements will be + constructed. + + basis1q : `Basis` or str, optional (default None) + A `Basis` object, or str which can be cast to one + corresponding to the single-qubit basis elements which + comprise the basis element labels for the values of the + `ElementaryErrorgenLabels` in `labels`. + """ + labels = tuple(labels) + + #add an assertion that the labels are ElementaryErrorgenLabels and that all of the labels are the same type. + msg = '`labels` should be either LocalElementaryErrorgenLabel or GlobalElementaryErrorgenLabel objects.' + if labels: + assert isinstance(labels[0], (_GlobalElementaryErrorgenLabel, _LocalElementaryErrorgenLabel)), msg + assert _all_elements_same_type(labels), 'All of the elementary error generator labels should be of the same type.' - def __init__(self, state_space, labels, basis1q=None): - # TODO: docstring - labels must be of form (sslbls, elementary_errorgen_lbl) - self._labels = tuple(labels) if not isinstance(labels, tuple) else labels - self._label_indices = _collections.OrderedDict([(lbl, i) for i, lbl in enumerate(self._labels)]) - self.basis_1q = basis1q if (basis1q is not None) else _Basis.cast('pp', 4) + self._labels = labels + self._label_indices = {lbl: i for i, lbl in enumerate(self._labels)} + + if isinstance(basis_1q, _Basis): + self._basis_1q = basis_1q + elif isinstance(basis_1q, str): + self._basis_1q = _Basis.cast(basis_1q, 4) + else: + self._basis_1q = _Basis.cast('PP', 4) self.state_space = state_space assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" sslbls = self.state_space.sole_tensor_product_block_labels # all the model's state space labels self.sslbls = sslbls # the "support" of this space - the qubit labels - self._cached_elements = None + + #Caching + self._cached_matrices = None + self._cached_dual_matrices = None + self._cached_supports = None @property def labels(self): return self._labels + + @property + def elemgen_supports(self): + """ + Returns a tuple of tuples, each corresponding to the support + of the elementary error generators in this basis, returned in + the same order as they appear in `labels`. + """ + if self._cached_supports is None: + if isinstance(self._labels[0], _GlobalElementaryErrorgenLabel): + self._cached_supports = tuple([elemgen_label.sslbls for elemgen_label in self._labels]) + #Otherwise these are LocalElementaryErrorgenLabels + else: + #LocalElementaryErrorgenLabel doesn't have a sslbls attribute indicating + #support like GlobalElementaryErrorgenLabel does, do index into the `sslbls` + #attribute for this object. + self._cached_supports = tuple([tuple([self.sslbls[i] for i in elemgen_label.support_indices()]) + for elemgen_label in self._labels]) + return self._cached_supports + + #TODO: The implementations of some of the following properties are the same as in + #CompleteElementaryErrorgen, refactor some of this into the parent class. + @property + def elemgen_dual_matrices(self): + """ + Returns a tuple of matrices, each corresponding to the + of the matrix representation of the dual elementary error generators + in this basis, returned in the same order as they appear in `labels`. + """ + if self._cached_dual_matrices is None: + elemgen_types = [elemgen_label.errorgen_type for elemgen_label in self._labels] + elemgen_labels = [elemgen_label.basis_element_labels for elemgen_label in self._labels] + self._cached_dual_matrices = tuple(_ot.bulk_create_elementary_errorgen_nqudit_dual( + elemgen_types, elemgen_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True)) + return self._cached_dual_matrices + + @property + def elemgen_matrices(self): + """ + Returns a tuple of matrices, each corresponding to the + of the matrix representation of the elementary error generators + in this basis, returned in the same order as they appear in `labels`. + """ + if self._cached_matrices is None: + elemgen_types = [elemgen_label.errorgen_type for elemgen_label in self._labels] + elemgen_labels = [elemgen_label.basis_element_labels for elemgen_label in self._labels] + self._cached_matrices = tuple(_ot.bulk_create_elementary_errorgen_nqudit( + elemgen_types, elemgen_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True)) + return self._cached_matrices + + @property + def elemgen_supports_and_dual_matrices(self): + """ + Returns a tuple of tuples, each containing a tuple of support and a dual matrix representation + each corresponding to an elementary error generator in this basis, returned in the same + order as they appear in `labels`. + """ + return tuple(zip(self.elemgen_supports, self.elemgen_dual_matrices)) @property def elemgen_supports_and_matrices(self): - if self._cached_elements is None: - self._cached_elements = tuple( - ((elemgen_label.sslbls, _ot.lindblad_error_generator( - elemgen_label.errorgen_type, elemgen_label.basis_element_labels, - self.basis_1q, normalize=True, sparse=False, tensorprod_basis=True)) - for elemgen_label in self.labels)) - return self._cached_elements + """ + Returns a tuple of tuples, each containing a tuple of support and a matrix representation + each corresponding to an elementary error generator in this basis, returned in the same + order as they appear in `labels`. + """ + return tuple(zip(self.elemgen_supports, self.elemgen_matrices)) def label_index(self, label, ok_if_missing=False): """ - TODO: docstring + Return the index of the specified elementary error generator label + in this basis' `labels` list. Parameters ---------- - label - + label : `ElementaryErrorgenLabel` + Elementary error generator label to return index for. + ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. """ @@ -79,62 +208,101 @@ def label_index(self, label, ok_if_missing=False): return None return self._label_indices[label] - def create_subbasis(self, must_overlap_with_these_sslbls): + def create_subbasis(self, sslbl_overlap): """ Create a sub-basis of this basis by including only the elements that overlap the given support (state space labels) - """ - sub_sslbls = set(must_overlap_with_these_sslbls) - def overlaps(sslbls): - ret = len(set(sslbls).intersection(must_overlap_with_these_sslbls)) > 0 - if ret: sub_sslbls.update(sslbls) # keep track of all overlaps - return ret - - sub_labels, sub_indices = zip(*[(lbl, i) for i, lbl in enumerate(self._labels) - if overlaps(lbl[0])]) - - sub_state_space = self.state_space.create_subspace(sub_sslbls) - return ExplicitElementaryErrorgenBasis(sub_state_space, sub_labels, self.basis_1q) + Parameters + ---------- + sslbl_overlap : list of sslbls + A list of state space labels corresponding to qudits the support of + an error generator must overlap with (i.e. the support must include at least + one of these qudits) in order to be included in this subbasis. - def union(self, other_basis): - present_labels = self._label_indices.copy() # an OrderedDict, indices don't matter here - if isinstance(other_basis, ExplicitElementaryErrorgenBasis): - present_labels.update(other_basis._label_indices) + """ + #need different logic for LocalElementaryErrorgenLabels + if isinstance(self.labels[0], _GlobalElementaryErrorgenLabel): + sub_sslbls = set(sslbl_overlap) + def overlaps(sslbls): + ret = len(set(sslbls).intersection(sslbl_overlap)) > 0 + if ret: sub_sslbls.update(sslbls) # keep track of all overlaps + return ret + + sub_labels, sub_indices = zip(*[(lbl, i) for i, lbl in enumerate(self._labels) + if overlaps(lbl[0])]) + sub_state_space = self.state_space.create_subspace(sub_sslbls) else: + sub_labels = [] + for lbl in self.labels: + non_trivial_bel_indices = lbl.support_indices() + for sslbl in sslbl_overlap: + if sslbl in non_trivial_bel_indices: + sub_labels.append(lbl) + break + #since using local labels keep the full original state space (the labels won't have gotten any shorter). + sub_state_space = self.state_space.copy() + + return ExplicitElementaryErrorgenBasis(sub_state_space, sub_labels, self._basis_1q) - for other_lbl in other_basis.labels: - if other_lbl not in present_labels: - present_labels[other_lbl] = True + def union(self, other_basis): + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the union of + this basis with another. + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the union with. + """ + #assert that these two bases have compatible label types. + msg = 'Incompatible `ElementaryErrrogenLabel` types, the two `ElementaryErrorgenBasis` should have the same label type.' + assert type(self._labels[0]) == type(other_basis.labels[0]), msg + + #Get the union of the two bases labels. + union_labels = set(self._labels) | set(other_basis.labels) union_state_space = self.state_space.union(other_basis.state_space) - return ExplicitElementaryErrorgenBasis(union_state_space, tuple(present_labels.keys()), self.basis_1q) + return ExplicitElementaryErrorgenBasis(union_state_space, union_labels, self._basis_1q) def intersection(self, other_basis): - if isinstance(other_basis, ExplicitElementaryErrorgenBasis): - common_labels = tuple((lbl for lbl in self.labels if lbl in other_basis._label_indices)) - else: - other_labels = set(other_basis.labels) - common_labels = tuple((lbl for lbl in self.labels if lbl in other_labels)) + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the intersection of + this basis with another. + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the intersection with. + """ + + intersection_labels = set(self._labels) & set(other_basis.labels) intersection_state_space = self.state_space.intersection(other_basis.state_space) - return ExplicitElementaryErrorgenBasis(intersection_state_space, common_labels, self.basis_1q) + return ExplicitElementaryErrorgenBasis(intersection_state_space, intersection_labels, self._basis_1q) def difference(self, other_basis): - if isinstance(other_basis, ExplicitElementaryErrorgenBasis): - remaining_labels = tuple((lbl for lbl in self.labels if lbl not in other_basis._label_indices)) - else: - other_labels = set(other_basis.labels) - remaining_labels = tuple((lbl for lbl in self.labels if lbl not in other_labels)) - - remaining_state_space = self.state_space # TODO: see if we can reduce this space based on remaining_labels? - return ExplicitElementaryErrorgenBasis(remaining_state_space, remaining_labels, self.basis_1q) + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the difference of + this basis with another. (i.e. A basis consisting of the labels contained in this basis + but not the other) + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the difference with. + """ + difference_labels = set(self._labels) - set(other_basis.labels) + #TODO: Making the state space equal to the true difference breaks some stuff in the FOGI code + #that relied on the old (kind of incorrect behavior). Revert back to old version temporarily. + #difference_state_space = self.state_space.difference(other_basis.state_space) + difference_state_space = self.state_space + return ExplicitElementaryErrorgenBasis(difference_state_space, difference_labels, self._basis_1q) class CompleteElementaryErrorgenBasis(ElementaryErrorgenBasis): """ - Spanned by the elementary error generators of given type(s) (e.g. "Hamiltonian" and/or "other") - and with elements corresponding to a `Basis`, usually of Paulis. + This basis object contains the information necessary for building, + storing and accessing a set of explicitly represented basis elements + for a basis of elementary error generators spanned by the elementary + error generators of given type(s) (e.g. "Hamiltonian" and/or "other"). """ @classmethod @@ -198,45 +366,22 @@ def _count_uptriangle_labels_for_support(cls, support, left_support, type_str, t return cnt - #UNUSED NOW - @classmethod - def _create_all_labels_for_support(cls, support, left_support, type_str, trivial_bel, nontrivial_bels): - n = len(support) # == weight - all_bels = trivial_bel + nontrivial_bels - left_weight = len(left_support) - if left_weight < n: # n1 < n - factors = [nontrivial_bels if x in left_support else trivial_bel for x in support] \ - + [all_bels if x in left_support else nontrivial_bels for x in support] - return [_GlobalElementaryErrorgenLabel(type_str, (''.join(beltup[0:n]), ''.join(beltup[n:])), support) - for beltup in _itertools.product(*factors)] - # (factors == left_factors + right_factors above) - else: # n1 == n - ret = [] - for left_beltup in _itertools.product(*([nontrivial_bels] * n)): # better itertools call here TODO - left_bel = ''.join(left_beltup) - right_it = _itertools.product(*([all_bels] * n)) # better itertools call here TODO - next(right_it) # advance past first (all I) element - assume trivial el = first!! - ret.extend([_GlobalElementaryErrorgenLabel(type_str, (left_bel, ''.join(right_beltup)), support) - for right_beltup in right_it]) - return ret @classmethod def _create_ordered_labels(cls, type_str, basis_1q, state_space, - max_weight=None, must_overlap_with_these_sslbls=None, + max_weight=None, sslbl_overlap=None, include_offsets=False, initial_offset=0): offsets = {'BEGIN': initial_offset} labels = [] - #labels_by_support = _collections.OrderedDict() - #all_bels = basis_1q.labels[0:] trivial_bel = [basis_1q.labels[0]] nontrivial_bels = basis_1q.labels[1:] # assume first element is identity - if must_overlap_with_these_sslbls is not None and not isinstance(must_overlap_with_these_sslbls, set): - must_overlap_with_these_sslbls = set(must_overlap_with_these_sslbls) + if sslbl_overlap is not None and not isinstance(sslbl_overlap, set): + sslbl_overlap = set(sslbl_overlap) + assert(state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" + sslbls = state_space.sole_tensor_product_block_labels # all the model's state space labels if max_weight is None: - assert(state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" - sslbls = state_space.sole_tensor_product_block_labels # all the model's state space labels max_weight = len(sslbls) # Let k be len(nontrivial_bels) @@ -244,8 +389,8 @@ def _create_ordered_labels(cls, type_str, basis_1q, state_space, # --> for each set of n qubit labels, there are k^n Hamiltonian terms with weight n for weight in range(1, max_weight + 1): for support in _itertools.combinations(sslbls, weight): # NOTE: combinations *MUST* be deterministic - if (must_overlap_with_these_sslbls is not None - and len(must_overlap_with_these_sslbls.intersection(support)) == 0): + if (sslbl_overlap is not None + and len(sslbl_overlap.intersection(support)) == 0): continue offsets[support] = len(labels) + initial_offset labels.extend(cls._create_diag_labels_for_support(support, type_str, nontrivial_bels)) @@ -264,8 +409,8 @@ def _create_ordered_labels(cls, type_str, basis_1q, state_space, # (see _create_ordered_label_offsets) for weight in range(1, max_weight + 1): for support in _itertools.combinations(sslbls, weight): - if (must_overlap_with_these_sslbls is not None - and len(must_overlap_with_these_sslbls.intersection(support)) == 0): + if (sslbl_overlap is not None + and len(sslbl_overlap.intersection(support)) == 0): continue for left_weight in range(1, weight + 1): @@ -281,7 +426,7 @@ def _create_ordered_labels(cls, type_str, basis_1q, state_space, @classmethod def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, - max_weight=None, must_overlap_with_these_sslbls=None, + max_weight=None, sslbl_overlap=None, return_total_support=False, initial_offset=0): """ same as _create_ordered_labels but doesn't actually create the labels - just counts them to get offsets. """ offsets = {'BEGIN': initial_offset} @@ -292,12 +437,12 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, n1Q_nontrivial_bels = n1Q_bels - 1 # assume first element is identity total_support = set() - if must_overlap_with_these_sslbls is not None and not isinstance(must_overlap_with_these_sslbls, set): - must_overlap_with_these_sslbls = set(must_overlap_with_these_sslbls) + if sslbl_overlap is not None and not isinstance(sslbl_overlap, set): + sslbl_overlap = set(sslbl_overlap) + assert(state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" + sslbls = state_space.sole_tensor_product_block_labels # all the model's state space labels if max_weight is None: - assert(state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" - sslbls = state_space.sole_tensor_product_block_labels # all the model's state space labels max_weight = len(sslbls) # Let k be len(nontrivial_bels) @@ -305,8 +450,8 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, # --> for each set of n qubit labels, there are k^n Hamiltonian terms with weight n for weight in range(1, max_weight + 1): for support in _itertools.combinations(sslbls, weight): # NOTE: combinations *MUST* be deterministic - if (must_overlap_with_these_sslbls is not None - and len(must_overlap_with_these_sslbls.intersection(support)) == 0): + if (sslbl_overlap is not None + and len(sslbl_overlap.intersection(support)) == 0): continue offsets[support] = off + initial_offset off += n1Q_nontrivial_bels**weight @@ -315,8 +460,8 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, elif type_str in ('C', 'A'): for weight in range(1, max_weight + 1): for support in _itertools.combinations(sslbls, weight): - if (must_overlap_with_these_sslbls is not None - and len(must_overlap_with_these_sslbls.intersection(support)) == 0): + if (sslbl_overlap is not None + and len(sslbl_overlap.intersection(support)) == 0): continue total_support.update(support) @@ -332,45 +477,76 @@ def _create_ordered_label_offsets(cls, type_str, basis_1q, state_space, return (offsets, total_support) if return_total_support else offsets def __init__(self, basis_1q, state_space, elementary_errorgen_types=('H', 'S', 'C', 'A'), - max_ham_weight=None, max_other_weight=None, must_overlap_with_these_sslbls=None): - self._basis_1q = basis_1q + max_weights=None, sslbl_overlap=None, default_label_type='global'): + """ + Parameters + ---------- + basis_1q : `Basis` or str + A `Basis` object, or str which can be cast to one + corresponding to the single-qubit basis elements which + comprise the basis element labels for the values of the + `ElementaryErrorgenLabels` in `labels`. + + state_space : `StateSpace` + An object describing the struture of the entire state space upon which the elements + of this error generator basis act. + + elementary_errorgen_types : tuple of str, optional (default ('H', 'S', 'C', 'A')) + Tuple of strings designating elementary error generator types to include in this + basis. + + max_weights : dict, optional (default None) + A dictionary containing the maximum weight for each of the different error generator + types to include in the constructed basis. If None then + there is no maximum weight. If specified, any error generator + types without entries will have no maximum weight associated + with them. + + sslbl_overlap : list of sslbls, optional (default None) + A list of state space labels corresponding to qudits the support of + an error generator must overlap with (i.e. the support must include at least + one of these qudits) in order to be included in this basis. + + default_label_type : str, optional (default 'global') + String specifying the type of error generator label to use by default. + i.e. the type of label returned by `labels`. This also impacts the + construction of the error generator matrices. + Supported options are 'global' or 'local', which correspond to + `GlobalElementaryErrorgenLabel` and `LocalElementaryErrorgenLabel`, + respectively. + """ + + if isinstance(basis_1q, _Basis): + self._basis_1q = basis_1q + elif isinstance(basis_1q, str): + self._basis_1q = _Basis.cast(basis_1q, 4) + else: + self._basis_1q = _Basis.cast('pp', 4) + self._elementary_errorgen_types = tuple(elementary_errorgen_types) # so works for strings like "HSCA" - #REMOVE self._other_mode = other_mode self.state_space = state_space - self._max_ham_weight = max_ham_weight - self._max_other_weight = max_other_weight - self._must_overlap_with_these_sslbls = must_overlap_with_these_sslbls + self.max_weights = max_weights if max_weights is not None else dict() + self._sslbl_overlap = sslbl_overlap + self._default_lbl_typ = default_label_type assert(self.state_space.is_entirely_qubits), "FOGI only works for models containing just qubits (so far)" assert(all([eetyp in ('H', 'S', 'C', 'A') for eetyp in elementary_errorgen_types])), \ "Invalid elementary errorgen type in %s" % str(elementary_errorgen_types) - self._offsets = _collections.OrderedDict() + self._offsets = dict() present_sslbls = set() istart = 0 for eetyp in elementary_errorgen_types: self._offsets[eetyp], sup = self._create_ordered_label_offsets( eetyp, self._basis_1q, self.state_space, - (self._max_ham_weight if eetyp == 'H' else self._max_other_weight), - self._must_overlap_with_these_sslbls, return_total_support=True, initial_offset=istart) + self.max_weights.get(eetyp, None), + self._sslbl_overlap, return_total_support=True, initial_offset=istart) present_sslbls = present_sslbls.union(sup) # set union istart = self._offsets[eetyp]['END'] -#TODO REMOVE -# self._h_offsets, hsup = self._create_ordered_label_offsets('H', self._basis_1q, self.state_space, -# 'diagonal', self._max_ham_weight, -# self._must_overlap_with_these_sslbls, -# return_total_support=True) -# self._hs_border = self._h_offsets['END'] -# self._s_offsets, ssup = self._create_ordered_label_offsets('S', self._basis_1q, self.state_space, -# other_mode, self._max_other_weight, -# self._must_overlap_with_these_sslbls, -# return_total_support=True) -# present_sslbls = hsup.union(ssup) # set union - #Note: state space can have additional labels that aren't in support - # (this is, I think, only true when must_overlap_with_these_sslbls != None) + # (this is, I think, only true when sslbl_overlap != None) sslbls = self.state_space.sole_tensor_product_block_labels # all the model's state space labels if set(sslbls) == present_sslbls: @@ -382,9 +558,11 @@ def __init__(self, basis_1q, state_space, elementary_errorgen_types=('H', 'S', ' # this should never happen - somehow the statespace doesn't have all the labels! assert(False), "Logic error! State space doesn't contain all of the present labels!!" - #FUTURE: cache these for speed? - but could just create an explicit basis which would be more transparent - #self._cached_labels = None - #self._cached_elements = None + self._cached_global_labels = None + self._cached_local_labels = None + self._cached_matrices = None + self._cached_dual_matrices = None + self._cached_supports = None # Notes on ordering of labels: # - let there be k nontrivial 1-qubit basis elements (usually k=3) @@ -415,49 +593,159 @@ def __len__(self): return self._offsets[self._elementary_errorgen_types[-1]]['END'] def to_explicit_basis(self): + """ + Creates a new `ExplicitElementaryErrorgenBasis` based on this Basis' elements. + """ return ExplicitElementaryErrorgenBasis(self.state_space, self.labels, self._basis_1q) + #TODO: Why can't this be done at initialization time? @property def labels(self): - labels = [] - for eetype in self._elementary_errorgen_types: - labels.extend(self._create_ordered_labels(eetype, self._basis_1q, self.state_space, - self._max_ham_weight if eetype == 'H' else self._max_other_weight, - self._must_overlap_with_these_sslbls)) - return tuple(labels) + """ + Tuple of either `GlobalElementaryErrorgenLabel` or `LocalElementaryErrorgenLabel` objects + for this basis, with which one determined by the `default_label_type` specified on basis + construction. + + For specific label types see the `global_labels` and `local_labels` methods. + """ + + if self._default_lbl_typ == 'global': + return self.global_labels() + else: + return self.local_labels() + + def global_labels(self): + """ + Return a list of labels for this basis as `GlobalElementaryErrorgenLabel` + objects. + """ + if self._cached_global_labels is None: + labels = [] + for eetyp in self._elementary_errorgen_types: + labels.extend(self._create_ordered_labels(eetyp, self._basis_1q, self.state_space, + self.max_weights.get(eetyp, None), + self._sslbl_overlap)) + + self._cached_global_labels = tuple(labels) + return self._cached_global_labels + + def local_labels(self): + """ + Return a list of labels for this basis as `LocalElementaryErrorgenLabel` + objects. + """ + if self._cached_local_labels is None: + if self._cached_global_labels is None: + self._cached_global_labels = self.global_labels() + self._cached_local_labels = tuple([_LocalElementaryErrorgenLabel.cast(lbl, sslbls=self.sslbls) for lbl in self._cached_global_labels]) + return self._cached_local_labels + + def sublabels(self, errorgen_type): + """ + Return a tuple of labels within this basis for the specified error generator + type (may be empty). + + Parameters + ---------- + errorgen_type : 'H', 'S', 'C' or 'A' + String specifying the error generator type to return the labels for. + + Returns + ------- + tuple of either `GlobalElementaryErrorgenLabels` or `LocalElementaryErrorgenLabels` + """ + #TODO: It should be possible to do this much faster than regenerating these from scratch. + #Perhaps by caching the error generators by type at construction time. + labels = self._create_ordered_labels(errorgen_type, self._basis_1q, self.state_space, + self.max_weights.get(errorgen_type, None), + self._sslbl_overlap) + if self._default_lbl_typ == 'local': + labels = tuple([_LocalElementaryErrorgenLabel.cast(lbl, sslbls=self.sslbls) for lbl in labels]) + return labels + + @property + def elemgen_supports(self): + """ + Returns a tuple of tuples, each corresponding to the support + of the elementary error generators in this basis, returned in + the same order as they appear in `labels`. + """ + if self._cached_supports is None: + self._cached_supports = tuple([elemgen_label.sslbls for elemgen_label in self.global_labels()]) + return self._cached_supports + + @property + def elemgen_dual_matrices(self): + """ + Returns a tuple of matrices, each corresponding to the + of the matrix representation of the dual elementary error generators + in this basis, returned in the same order as they appear in `labels`. + """ + if self._cached_dual_matrices is None: + elemgen_types = [elemgen_label.errorgen_type for elemgen_label in self.labels] + elemgen_labels = [elemgen_label.basis_element_labels for elemgen_label in self.labels] + self._cached_dual_matrices = tuple(_ot.bulk_create_elementary_errorgen_nqudit_dual( + elemgen_types, elemgen_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True)) + return self._cached_dual_matrices + + @property + def elemgen_matrices(self): + """ + Returns a tuple of matrices, each corresponding to the + of the matrix representation of the elementary error generators + in this basis, returned in the same order as they appear in `labels`. + """ + if self._cached_matrices is None: + elemgen_types = [elemgen_label.errorgen_type for elemgen_label in self.labels] + elemgen_labels = [elemgen_label.basis_element_labels for elemgen_label in self.labels] + self._cached_matrices = tuple(_ot.bulk_create_elementary_errorgen_nqudit( + elemgen_types, elemgen_labels, + self._basis_1q, normalize=False, sparse=False, + tensorprod_basis=True)) + return self._cached_matrices @property def elemgen_supports_and_dual_matrices(self): - return tuple(((elemgen_label.sslbls, - _ot.create_elementary_errorgen_nqudit_dual( - elemgen_label.errorgen_type, elemgen_label.basis_element_labels, - self._basis_1q, normalize=False, sparse=False, - tensorprod_basis=True)) # Note: normalize was set to True... - for elemgen_label in self.labels)) + """ + Returns a tuple of tuples, each containing a tuple of support and a dual matrix representation + each corresponding to an elementary error generator in this basis, returned in the same + order as they appear in `labels`. + """ + return tuple(zip(self.elemgen_supports, self.elemgen_dual_matrices)) @property def elemgen_supports_and_matrices(self): - return tuple(((elemgen_label.sslbls, - _ot.create_elementary_errorgen_nqudit( - elemgen_label.errorgen_type, elemgen_label.basis_element_labels, - self._basis_1q, normalize=False, sparse=False, - tensorprod_basis=True)) # Note: normalize was set to True... - for elemgen_label in self.labels)) + """ + Returns a tuple of tuples, each containing a tuple of support and a matrix representation + each corresponding to an elementary error generator in this basis, returned in the same + order as they appear in `labels`. + """ + return tuple(zip(self.elemgen_supports, self.elemgen_matrices)) - def label_index(self, elemgen_label, ok_if_missing=False): + def label_index(self, label, ok_if_missing=False, identity_label='I'): """ - TODO: docstring + Return the index of the specified elementary error generator label + in this basis' `labels` list. Parameters ---------- - elemgen_label + label : `ElementaryErrorgenLabel` + Elementary error generator label to return index for. ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. + + identity_label : str, optional (default 'I') + An optional string specifying the label used to denote the identity in basis element labels. """ - support = elemgen_label.sslbls - eetype = elemgen_label.errorgen_type - bels = elemgen_label.basis_element_labels + if isinstance(label, _LocalElementaryErrorgenLabel): + label = _GlobalElementaryErrorgenLabel.cast(label, self.sslbls, identity_label=identity_label) + + support = label.sslbls + eetype = label.errorgen_type + bels = label.basis_element_labels trivial_bel = self._basis_1q.labels[0] # assumes first element is identity nontrivial_bels = self._basis_1q.labels[1:] @@ -484,21 +772,29 @@ def label_index(self, elemgen_label, ok_if_missing=False): else: raise ValueError("Invalid elementary errorgen type: %s" % str(eetype)) - return base + indices[elemgen_label] + return base + indices[label] - def create_subbasis(self, must_overlap_with_these_sslbls, retain_max_weights=True): + def create_subbasis(self, sslbl_overlap, retain_max_weights=True): """ Create a sub-basis of this basis by including only the elements that overlap the given support (state space labels) """ #Note: state_space is automatically reduced within __init__ when necessary, e.g., when - # `must_overlap_with_these_sslbls` is non-None and considerably reduces the basis. + # `sslbl_overlap` is non-None and considerably reduces the basis. return CompleteElementaryErrorgenBasis(self._basis_1q, self.state_space, self._elementary_errorgen_types, - self._max_ham_weight if retain_max_weights else None, - self._max_other_weight if retain_max_weights else None, - must_overlap_with_these_sslbls) + self.max_weights if retain_max_weights else None, + sslbl_overlap) def union(self, other_basis): + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the union of + this basis with another. + + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the union with. + """ # don't convert this basis to an explicit one unless it's necessary - # if `other_basis` is already an explicit basis then let it do the work. if isinstance(other_basis, ExplicitElementaryErrorgenBasis): @@ -507,10 +803,29 @@ def union(self, other_basis): return self.to_explicit_basis().union(other_basis) def intersection(self, other_basis): + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the intersection of + this basis with another. + + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the intersection with. + """ if isinstance(other_basis, ExplicitElementaryErrorgenBasis): return other_basis.intersection(self) else: return self.to_explicit_basis().intersection(other_basis) def difference(self, other_basis): + """ + Create a new `ExplicitElementaryErrorgenBasis` corresponding to the difference of + this basis with another. (i.e. A basis consisting of the labels contained in this basis + but not the other) + + Parameters + ---------- + other_basis : `ElementaryErrorgenBasis` + `ElementaryErrorgenBasis` to construct the difference with. + """ return self.to_explicit_basis().difference(other_basis) diff --git a/pygsti/baseobjs/errorgenlabel.py b/pygsti/baseobjs/errorgenlabel.py index c128b0828..9cb7e8da0 100644 --- a/pygsti/baseobjs/errorgenlabel.py +++ b/pygsti/baseobjs/errorgenlabel.py @@ -29,6 +29,37 @@ class LocalElementaryErrorgenLabel(ElementaryErrorgenLabel): """ @classmethod def cast(cls, obj, sslbls=None, identity_label='I'): + """ + Method for casting an object to an instance of LocalElementaryErrorgenLabel + + Parameters + ---------- + obj : `LocalElementaryErrorgenLabel`, `GlobalElementaryErrorgenLabel`, str, tuple or list + Object to cast. If a `GlobalElementaryErrorgenLabel` then a value for the `sslbls` + argument should be passed with the full list of state space labels for the system. + Other castable options include: + + -str: A string formatted as '([,])'. E.g. 'H(XX)' or + 'C(X,Y)' + -tuple/list: These can be specified either in 'global-style' or 'local-style'. + - local-style: format is (, [,]) + - global-style:format is (, (,[]), ()) + Where sslbls above is specifically the subset of state space labels this error + generator acts on nontrivially. When specifying global-style tuple labels the sslbls kwarg of this method + which contains the complete set of state-space labels must also be specified. + + sslbls : tuple or list, optional (default None) + A complete set of state space labels. Used when casting from a GlobalElementaryErrorgenLabel + or from a tuple of length 3 (wherein the final element is interpreted as the set of ssblbs the error + generator acts upon). + + identity_label : str, optional (default 'I') + An optional string specifying the label used to denote the identity in basis element labels. + + Returns + ------- + LocalElementaryErrorgenLabel + """ if isinstance(obj, LocalElementaryErrorgenLabel): return obj elif isinstance(obj, GlobalElementaryErrorgenLabel): @@ -65,11 +96,37 @@ def cast(cls, obj, sslbls=None, identity_label='I'): raise ValueError("Cannot convert %s to a local elementary errorgen label!" % str(obj)) def __init__(self, errorgen_type, basis_element_labels): + """ + Parameters + ---------- + errorgen_type : str + A string corresponding to the error generator sector this error generator label is + an element of. Allowed values are 'H', 'S', 'C' and 'A'. + + basis_element_labels : tuple or list + A list or tuple of strings labeling basis elements used to label this error generator. + This is either length-1 for 'H' and 'S' type error generators, or length-2 for 'C' and 'A' + type. + """ + #TODO: Store non-standard identity labels with object so we don't need to specify this in + #support_indices. self.errorgen_type = str(errorgen_type) self.basis_element_labels = tuple(basis_element_labels) + self._hash = hash((self.errorgen_type, self.basis_element_labels)) def __hash__(self): - return hash((self.errorgen_type, self.basis_element_labels)) + return self._hash + + #pickle management functions + def __getstate__(self): + state_dict = self.__dict__ + return state_dict + + def __setstate__(self, state_dict): + for k, v in state_dict.items(): + self.__dict__[k] = v + #reinitialize the hash + self._hash = hash((self.errorgen_type, self.basis_element_labels)) def __eq__(self, other): return (self.errorgen_type == other.errorgen_type @@ -80,6 +137,16 @@ def __str__(self): def __repr__(self): return str((self.errorgen_type, self.basis_element_labels)) + + def support_indices(self, identity_label='I'): + """ + Returns a sorted tuple of the elements of indices of the nontrivial basis + element label entries for this label. + """ + nonidentity_indices = [i for i in range(len(self.basis_element_labels[0])) + if any([bel[i] != identity_label for bel in self.basis_element_labels])] + + return tuple(nonidentity_indices) class GlobalElementaryErrorgenLabel(ElementaryErrorgenLabel): @@ -91,7 +158,47 @@ class GlobalElementaryErrorgenLabel(ElementaryErrorgenLabel): @classmethod def cast(cls, obj, sslbls=None, identity_label='I'): - """ TODO: docstring - lots in this module """ + """ + Method for casting an object to an instance of GlobalElementaryErrorgenLabel + + Parameters + ---------- + obj : `GlobalElementaryErrorgenLabel`, `LocalElementaryErrorgenLabel`, tuple or list + Object to cast. If a `LocalElementaryErrorgenLabel` then a value for the `sslbls` + argument should be passed with the full list of state space labels for the system. + Other castable options include: + + -str: Following formatting options are supported. + - A string formatted as '([,]:())' where + is the subset of state-space labels this error generator acts on nontrivially + specified as a comma-separated list. E.g. 'H(XX:0,1)' or 'S(XIY):0,2'. + - A string formatted as :, where + is the subset of state-space labels this error generator acts on nontrivially + specified as a comma-separated list. E.g. 'HXX:0,1' or 'SIX:1'. Note this style + is only compatible with basis element label error generators, and this only H and S. + - A string formatted as . For this style the basis element label + is assumed to correspond to the entire state space, and as such the sslbls kwarg + for this method must also be specified. Like the previous example this is also + only compatible with H and S terms. + -tuple/list: These can be specified either in 'global-style' or 'local-style'. + - local-style: format is (, [,]) + - global-style:format is (, (,[]), ()) + Where sslbls above is specifically the subset of state space labels this error + generator acts on nontrivially. When specifying global-style tuple labels the sslbls kwarg of this method + which contains the complete set of state-space labels must also be specified. + + sslbls : tuple or list, optional (default None) + A complete set of state space labels. Used when casting from a LocalElementaryErrorgenLabel + or from a tuple of length 2 (wherein the final element is interpreted as the set of ssblbs the error + generator acts upon). + + identity_label : str, optional (default 'I') + An optional string specifying the label used to denote the identity in basis element labels. + + Returns + ------- + GlobalElementaryErrorgenLabel + """ if isinstance(obj, GlobalElementaryErrorgenLabel): return obj elif isinstance(obj, LocalElementaryErrorgenLabel): @@ -116,7 +223,7 @@ def cast(cls, obj, sslbls=None, identity_label='I'): return cls.cast(LocalElementaryErrorgenLabel.cast(obj), sslbls, identity_label) else: # no parenthesis, assume of form "HXX:Q0,Q1" or local label, e.g. "HXX" if ':' in obj: - typ_bel_str, sslbl_str = in_parens.split(':') + typ_bel_str, sslbl_str = obj.split(':') sslbls = [_to_int_or_strip(x) for x in sslbl_str.split(',')] return cls(typ_bel_str[0], (typ_bel_str[1:],), sslbls) else: # treat as a local label @@ -132,6 +239,27 @@ def cast(cls, obj, sslbls=None, identity_label='I'): raise ValueError("Cannot convert %s to a global elementary errorgen label!" % str(obj)) def __init__(self, errorgen_type, basis_element_labels, sslbls, sort=True): + """ + Parameters + ---------- + errorgen_type : str + A string corresponding to the error generator sector this error generator label is + an element of. Allowed values are 'H', 'S', 'C' and 'A'. + + basis_element_labels : tuple or list + A list or tuple of strings labeling basis elements used to label this error generator. + This is either length-1 for 'H' and 'S' type error generators, or length-2 for 'C' and 'A' + type. + + sslbls : tuple or list + A tuple or list of state space labels corresponding to the qudits upon which this error generator + is supported. + + sort : bool, optional (default True) + If True then the input state space labels are first sorted, and then the used basis element labels + are sorted to match the order to the newly sorted state space labels. + """ + if sort: sorted_indices, sslbls = zip(*sorted(enumerate(sslbls), key=lambda x: x[1])) basis_element_labels = [''.join([bel[i] for i in sorted_indices]) for bel in basis_element_labels] @@ -141,9 +269,21 @@ def __init__(self, errorgen_type, basis_element_labels, sslbls, sort=True): self.sslbls = tuple(sslbls) # Note: each element of basis_element_labels must be an iterable over # 1-qubit basis labels of length len(self.sslbls) (?) + self._hash = hash((self.errorgen_type, self.basis_element_labels, self.sslbls)) def __hash__(self): - return hash((self.errorgen_type, self.basis_element_labels, self.sslbls)) + return self._hash + + #pickle management functions + def __getstate__(self): + state_dict = self.__dict__ + return state_dict + + def __setstate__(self, state_dict): + for k, v in state_dict.items(): + self.__dict__[k] = v + #reinitialize the hash + self._hash = hash((self.errorgen_type, self.basis_element_labels, self.sslbls)) def __eq__(self, other): return (self.errorgen_type == other.errorgen_type diff --git a/pygsti/baseobjs/statespace.py b/pygsti/baseobjs/statespace.py index 7081b8f17..e261daf07 100644 --- a/pygsti/baseobjs/statespace.py +++ b/pygsti/baseobjs/statespace.py @@ -469,7 +469,7 @@ def intersection(self, other_state_space): other_udim = other_state_space.label_udimension(lbl) other_typ = other_state_space.label_type(lbl) if other_iTPB != iTPB or other_udim != udim or other_typ != typ: - raise ValueError(("Cannot take state space union: repeated label '%s' has inconsistent index," + raise ValueError(("Cannot take state space intersection: repeated label '%s' has inconsistent index," " dim, or type!") % str(lbl)) ret_lbls.append(lbl) ret_udims.append(udim) @@ -534,6 +534,58 @@ def union(self, other_state_space): ret_tpb_types[iTPB].append(typ) return ExplicitStateSpace(ret_tpb_labels, ret_tpb_udims, ret_tpb_types) + + + def difference(self, other_state_space): + """ + Create a state space whose labels are the difference of the labels of this space and one other. + I.e. a state space containing the labels of this space which don't appear in the other. + + Dimensions associated with the labels are preserved, as is the tensor product block index. + If the two spaces have the same label, but their dimensions or indices do not agree, an + error is raised. + + Parameters + ---------- + other_state_space : StateSpace + The other state space. + + Returns + ------- + StateSpace + """ + ret_tpb_labels = [] + ret_tpb_udims = [] + ret_tpb_types = [] + + for iTPB, (lbls, udims, typs) in enumerate(zip(self.tensor_product_blocks_labels, + self.tensor_product_blocks_udimensions, + self.tensor_product_blocks_types)): + ret_lbls = []; ret_udims = []; ret_types = [] + for lbl, udim, typ in zip(lbls, udims, typs): + #If the label does appear in the other state space, verify that the + #properties of the label are consistently defined accross the two state spaces + #otherwise raise an error. + if other_state_space.contains_label(lbl): + other_iTPB = other_state_space.label_tensor_product_block_index(lbl) + other_udim = other_state_space.label_udimension(lbl) + other_typ = other_state_space.label_type(lbl) + if other_iTPB != iTPB or other_udim != udim or other_typ != typ: + raise ValueError(("Cannot take state space difference: repeated label '%s' has inconsistent index," + " dim, or type!") % str(lbl)) + continue + #Otherwise add this to the state space. + else: + ret_lbls.append(lbl) + ret_udims.append(udim) + ret_types.append(typ) + + if len(ret_lbls) > 0: + ret_tpb_labels.append(ret_lbls) + ret_tpb_udims.append(ret_udims) + ret_tpb_types.append(ret_types) + + return ExplicitStateSpace(ret_tpb_labels, ret_tpb_udims, ret_tpb_types) def create_stencil_subspace(self, labels): """ @@ -604,33 +656,63 @@ class QuditSpace(StateSpace): def __init__(self, nqudits_or_labels, udim_or_udims): super().__init__() if isinstance(nqudits_or_labels, int): - self.qudit_labels = tuple(range(nqudits_or_labels)) + self._qudit_labels = tuple(range(nqudits_or_labels)) else: - self.qudit_labels = tuple(nqudits_or_labels) + self._qudit_labels = tuple(nqudits_or_labels) if isinstance(udim_or_udims, int): - self.qudit_udims = tuple([udim_or_udims] * len(self.qudit_labels)) + self._qudit_udims = tuple([udim_or_udims] * len(self._qudit_labels)) else: - self.qudit_udims = tuple(udim_or_udims) - assert(len(self.qudit_udims) == len(self.qudit_labels)), \ + self._qudit_udims = tuple(udim_or_udims) + assert(len(self._qudit_udims) == len(self._qudit_labels)), \ "`udim_or_udims` must either be an interger or have length equal to the number of qudits!" + + #This state space is effectively static, so we can precompute the hash for it for performance + self._hash = hash((self.tensor_product_blocks_labels, + self.tensor_product_blocks_dimensions, + self.tensor_product_blocks_types)) + + def __hash__(self): + return self._hash + + #pickle management functions + def __getstate__(self): + state_dict = self.__dict__ + return state_dict + + def __setstate__(self, state_dict): + for k, v in state_dict.items(): + self.__dict__[k] = v + #reinitialize the hash + self._hash = hash((self.tensor_product_blocks_labels, + self.tensor_product_blocks_dimensions, + self.tensor_product_blocks_types)) def _to_nice_serialization(self): state = super()._to_nice_serialization() - state.update({'qudit_labels': self.qudit_labels, - 'qudit_udims': self.qudit_udims}) + state.update({'qudit_labels': self._qudit_labels, + 'qudit_udims': self._qudit_udims}) return state @classmethod def _from_nice_serialization(cls, state): return cls(state['qudit_labels'], state['qudit_udims']) + @property + def qudit_labels(self): + """The labels of the qudits in this state space.""" + return self._qudit_labels + + @property + def qudit_udims(self): + """Integer Hilbert (unitary operator) space dimensions of the qudits in ths quantum state space.""" + @property def udim(self): """ Integer Hilbert (unitary operator) space dimension of this quantum state space. """ - return _np.prod(self.qudit_udims) + return _np.prod(self._qudit_udims) @property def dim(self): @@ -642,7 +724,7 @@ def num_qudits(self): # may raise ValueError if the state space doesn't consist """ The number of qubits in this quantum state space. """ - return len(self.qudit_labels) + return len(self._qudit_labels) @property def num_tensor_product_blocks(self): @@ -664,7 +746,7 @@ def tensor_product_blocks_labels(self): ------- tuple of tuples """ - return (self.qudit_labels,) + return (self._qudit_labels,) @property def tensor_product_blocks_dimensions(self): @@ -675,7 +757,7 @@ def tensor_product_blocks_dimensions(self): ------- tuple of tuples """ - return (tuple([udim**2 for udim in self.qudit_udims]),) + return (tuple([udim**2 for udim in self._qudit_udims]),) @property def tensor_product_blocks_udimensions(self): @@ -686,7 +768,7 @@ def tensor_product_blocks_udimensions(self): ------- tuple of tuples """ - return (self.qudit_udims,) + return (self._qudit_udims,) @property def tensor_product_blocks_types(self): @@ -697,7 +779,7 @@ def tensor_product_blocks_types(self): ------- tuple of tuples """ - return (('Q',) * len(self.qudit_labels)) + return (('Q',) * len(self._qudit_labels)) def label_dimension(self, label): """ @@ -712,9 +794,9 @@ def label_dimension(self, label): ------- int """ - if label in self.qudit_labels: - i = self.qudit_labels.index(label) - return self.qudit_udims[i]**2 + if label in self._qudit_labels: + i = self._qudit_labels.index(label) + return self._qudit_udims[i]**2 else: raise KeyError("Invalid qudit label: %s" % label) @@ -731,9 +813,9 @@ def label_udimension(self, label): ------- int """ - if label in self.qudit_labels: - i = self.qudit_labels.index(label) - return self.qudit_udims[i] + if label in self._qudit_labels: + i = self._qudit_labels.index(label) + return self._qudit_udims[i] else: raise KeyError("Invalid qudit label: %s" % label) @@ -750,7 +832,7 @@ def label_tensor_product_block_index(self, label): ------- int """ - if label in self.qudit_labels: + if label in self._qudit_labels: return 0 else: raise KeyError("Invalid qudit label: %s" % label) @@ -768,13 +850,13 @@ def label_type(self, label): ------- str """ - if label in self.qudit_labels: + if label in self._qudit_labels: return 'Q' else: raise KeyError("Invalid qudit label: %s" % label) def __str__(self): - return 'QuditSpace(' + str(self.qudit_labels) + ")" + return 'QuditSpace(' + str(self._qudit_labels) + ")" class QubitSpace(QuditSpace): @@ -809,7 +891,7 @@ def dim(self): @property def qubit_labels(self): """The labels of the qubits""" - return self.qudit_labels + return self._qudit_labels @property def num_qubits(self): # may raise ValueError if the state space doesn't consist entirely of qubits @@ -1035,10 +1117,10 @@ def is_label(x): if udims is not None: udims = [udims] if types is not None: types = [types] - self.labels = tuple([tuple(tpbLabels) for tpbLabels in label_list]) + self._labels = tuple([tuple(tpbLabels) for tpbLabels in label_list]) #Type check - labels must be strings or ints - for tpbLabels in self.labels: # loop over tensor-prod-blocks + for tpbLabels in self._labels: # loop over tensor-prod-blocks for lbl in tpbLabels: if not is_label(lbl): raise ValueError("'%s' is an invalid state-space label (must be a string or integer)" % lbl) @@ -1046,11 +1128,11 @@ def is_label(x): # Get the type of each labeled space self.label_types = {} if types is None: # use defaults - for tpbLabels in self.labels: # loop over tensor-prod-blocks + for tpbLabels in self._labels: # loop over tensor-prod-blocks for lbl in tpbLabels: self.label_types[lbl] = 'C' if (isinstance(lbl, str) and lbl.startswith('C')) else 'Q' # default else: - for tpbLabels, tpbTypes in zip(self.labels, types): + for tpbLabels, tpbTypes in zip(self._labels, types): for lbl, typ in zip(tpbLabels, tpbTypes): self.label_types[lbl] = typ @@ -1058,7 +1140,7 @@ def is_label(x): self.label_udims = {} self.label_dims = {} if udims is None: - for tpbLabels in self.labels: # loop over tensor-prod-blocks + for tpbLabels in self._labels: # loop over tensor-prod-blocks for lbl in tpbLabels: if isinstance(lbl, _numbers.Integral): d = 2 # ints = qubits elif lbl.startswith('T'): d = 3 # qutrit @@ -1069,7 +1151,7 @@ def is_label(x): self.label_udims[lbl] = d self.label_dims[lbl] = d**2 if (isinstance(lbl, _numbers.Integral) or lbl[0] in ('Q', 'T')) else d else: - for tpbLabels, tpbDims in zip(self.labels, udims): + for tpbLabels, tpbDims in zip(self._labels, udims): for lbl, udim in zip(tpbLabels, tpbDims): self.label_udims[lbl] = udim self.label_dims[lbl] = udim**2 @@ -1080,7 +1162,7 @@ def is_label(x): self.tpb_dims = [] self.tpb_udims = [] - for iTPB, tpbLabels in enumerate(self.labels): + for iTPB, tpbLabels in enumerate(self._labels): float_prod = _np.prod(_np.array([self.label_dims[lbl] for lbl in tpbLabels], 'd')) if float_prod >= float(_sys.maxsize): # too many qubits to hold dimension in an integer self.tpb_dims.append(_np.inf) @@ -1099,17 +1181,38 @@ def is_label(x): self._udim = sum(self.tpb_udims) self._nqubits = self._nqudits = None - if len(self.labels) == 1: + if len(self._labels) == 1: if all([v == 2 for v in self.label_udims.values()]): - self._nqudits = self._nqubits = len(self.labels[0]) # there's a well-defined number of qubits + self._nqudits = self._nqubits = len(self._labels[0]) # there's a well-defined number of qubits elif all([typ == 'Q' for typ in self.label_types.values()]): - self._nqudits = len(self.labels[0]) + self._nqudits = len(self._labels[0]) + + #This state space is effectively static, so we can precompute the hash for it for performance + self._hash = hash((self.tensor_product_blocks_labels, + self.tensor_product_blocks_dimensions, + self.tensor_product_blocks_types)) + + def __hash__(self): + return self._hash + + #pickle management functions + def __getstate__(self): + state_dict = self.__dict__ + return state_dict + + def __setstate__(self, state_dict): + for k, v in state_dict.items(): + self.__dict__[k] = v + #reinitialize the hash + self._hash = hash((self.tensor_product_blocks_labels, + self.tensor_product_blocks_dimensions, + self.tensor_product_blocks_types)) def _to_nice_serialization(self): state = super()._to_nice_serialization() - state.update({'labels': self.labels, - 'unitary_space_dimensions': [[self.label_udims[l] for l in tpb] for tpb in self.labels], - 'types': [[self.label_types[l] for l in tpb] for tpb in self.labels] + state.update({'labels': self._labels, + 'unitary_space_dimensions': [[self.label_udims[l] for l in tpb] for tpb in self._labels], + 'types': [[self.label_types[l] for l in tpb] for tpb in self._labels] }) return state @@ -1117,6 +1220,17 @@ def _to_nice_serialization(self): def _from_nice_serialization(cls, state): return cls(state['labels'], state['unitary_space_dimensions'], state['types']) + @property + def labels(self): + """ + The labels for all the tensor-product blocks. + + Returns + ------- + tuple of tuples + """ + return self._labels + @property def udim(self): """ @@ -1162,7 +1276,7 @@ def num_tensor_product_blocks(self): ------- int """ - return len(self.labels) + return len(self._labels) @property def tensor_product_blocks_labels(self): @@ -1173,7 +1287,7 @@ def tensor_product_blocks_labels(self): ------- tuple of tuples """ - return self.labels + return self._labels @property def tensor_product_blocks_dimensions(self): @@ -1184,7 +1298,7 @@ def tensor_product_blocks_dimensions(self): ------- tuple of tuples """ - return tuple([tuple([self.label_dims[lbl] for lbl in tpb_labels]) for tpb_labels in self.labels]) + return tuple([tuple([self.label_dims[lbl] for lbl in tpb_labels]) for tpb_labels in self._labels]) @property def tensor_product_blocks_udimensions(self): @@ -1195,7 +1309,7 @@ def tensor_product_blocks_udimensions(self): ------- tuple of tuples """ - return tuple([tuple([self.label_udims[lbl] for lbl in tpb_labels]) for tpb_labels in self.labels]) + return tuple([tuple([self.label_udims[lbl] for lbl in tpb_labels]) for tpb_labels in self._labels]) @property def tensor_product_blocks_types(self): @@ -1206,7 +1320,7 @@ def tensor_product_blocks_types(self): ------- tuple of tuples """ - return tuple([tuple([self.label_types[lbl] for lbl in tpb_labels]) for tpb_labels in self.labels]) + return tuple([tuple([self.label_types[lbl] for lbl in tpb_labels]) for tpb_labels in self._labels]) def label_dimension(self, label): """ @@ -1269,10 +1383,10 @@ def label_type(self, label): return self.label_types[label] def __str__(self): - if len(self.labels) == 0: return "ZeroDimSpace" + if len(self._labels) == 0: return "ZeroDimSpace" return ' + '.join( ['*'.join(["%s(%d%s)" % (lbl, self.label_dims[lbl], 'c' if (self.label_types[lbl] == 'C') else '') - for lbl in tpb]) for tpb in self.labels]) + for lbl in tpb]) for tpb in self._labels]) def default_space_for_dim(dim): diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index f6c2e2043..e0d5d71b9 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -3761,6 +3761,72 @@ def _write_q_circuit_tex(self, filename): # TODO f.write("\\end{document}") f.close() + + def convert_to_stim_tableau_layers(self, gate_name_conversions=None, num_qubits=None): + """ + Converts this circuit to a list of stim tableau layers + + Parameters + ---------- + gate_name_conversions : dict, optional (default None) + A map from pygsti gatenames to standard stim tableaus. + If None a standard set of gate names is used from + `pygsti.tools.internalgates` + + Returns + ------- + A layer by layer list of stim tableaus + """ + try: + import stim + except ImportError: + raise ImportError("Stim is required for this operation, and it does not appear to be installed.") + if gate_name_conversions is None: + gate_name_conversions = _itgs.standard_gatenames_stim_conversions() + + if num_qubits is None: + line_labels = self._line_labels + assert line_labels != ('*',), "Cannot convert circuits with placeholder line label to stim Tableau unless number of qubits is specified." + num_qubits=len(line_labels) + + stim_layers=[] + + if self._static: + circuit_layers = [layer.components for layer in self._labels] + else: + circuit_layers = self._labels + empty_tableau = stim.Tableau(num_qubits) + for layer in circuit_layers: + stim_layer = empty_tableau.copy() + for sub_lbl in layer: + temp = gate_name_conversions[sub_lbl.name] + stim_layer.append(temp, sub_lbl.qubits) + stim_layers.append(stim_layer) + return stim_layers + + def convert_to_stim_tableau(self, gate_name_conversions=None): + """ + Converts this circuit to a stim tableau + + Parameters + ---------- + gate_name_conversions : dict, optional (default None) + A map from pygsti gatenames to standard stim tableaus. + If None a standard set of gate names is used from + `pygsti.tools.internalgates` + + Returns + ------- + A single stim.Tableau representing the entire circuit. + """ + layers=self.convert_to_stim_tableau_layers(gate_name_conversions) + if layers: + tableau=layers[0] + for layer in layers[1:]: + tableau= layer*tableau + return tableau + + def convert_to_cirq(self, qubit_conversion, wait_duration=None, diff --git a/pygsti/errorgenpropagation/__init__.py b/pygsti/errorgenpropagation/__init__.py new file mode 100644 index 000000000..834cec2d7 --- /dev/null +++ b/pygsti/errorgenpropagation/__init__.py @@ -0,0 +1,10 @@ +""" Error Generator Propagation Sub-package """ +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + diff --git a/pygsti/errorgenpropagation/errorpropagator.py b/pygsti/errorgenpropagation/errorpropagator.py new file mode 100644 index 000000000..dcd93a960 --- /dev/null +++ b/pygsti/errorgenpropagation/errorpropagator.py @@ -0,0 +1,721 @@ +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** +import warnings +try: + import stim +except ImportError: + msg = "Stim is required for use of the error generator propagation module, " \ + "and it does not appear to be installed. If you intend to use this module please update" \ + " your environment." + warnings.warn(msg) +import numpy as _np +import scipy.linalg as _spl +from .localstimerrorgen import LocalStimErrorgenLabel as _LSE +from numpy import zeros, complex128 +from numpy.linalg import multi_dot +from scipy.linalg import expm +from pygsti.tools.internalgates import standard_gatenames_stim_conversions +import copy as _copy +from pygsti.baseobjs import Label, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis, BuiltinBasis as _BuiltinBasis +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel +import pygsti.tools.errgenproptools as _eprop +import pygsti.tools.basistools as _bt +import pygsti.tools.matrixtools as _mt +import pygsti.tools.optools as _ot +from pygsti.models.model import OpModel as _OpModel +from pygsti.models import ExplicitOpModel as _ExplicitOpModel, ImplicitOpModel as _ImplicitOpModel +from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen +from itertools import islice + +class ErrorGeneratorPropagator: + + def __init__(self, model): + """ + Initialize an instance of `ErrorGeneratorPropagator`. This class is instantiated with a noise model + and manages operations related to propagating error generators through circuits, and constructing + effective end-of-circuit error generators. + + Parameters + ---------- + model: `OpModel` + This model is used to construct error generators for each layer of a circuit + through which error generators are to be propagated. + """ + self.model = model + + def eoc_error_channel(self, circuit, include_spam=True, use_bch=False, + bch_kwargs=None, mx_basis='pp'): + """ + Propagate all of the error generators for each circuit layer to the end of the circuit + and return the result of exponentiating these error generators, and if necessary taking + their products, to return the end of circuit error channel. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct a set of post gate error generators for. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + + use_bch : bool, optional (default False) + If True use the BCH approximation as part of the propagation algorithm. + + bch_kwarg : dict, optional (default None) + Only used is `use_bch` is True, this dictionary contains a set of + BCH-specific kwargs which are passed to `propagate_errorgens_bch`. + + mx_basis : Basis or str, optional (default 'pp') + Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the + basis in which to return the process matrix for the error channel. + + Returns + ------- + eoc_error_channel : numpy.ndarray + A numpy array corresponding to the end-of-circuit error channel resulting + from the propagated error generators. This is + """ + + if use_bch: + #should return a single dictionary of error generator rates + propagated_error_generator = self.propagate_errorgens_bch(circuit, **bch_kwargs) + #convert this to a process matrix + return _spl.expm(self.errorgen_layer_dict_to_errorgen(propagated_error_generator, mx_basis='pp')) + + else: + propagated_error_generators = self.propagate_errorgens(circuit, include_spam) + #loop though the propagated error generator layers and construct their error generators. + #Then exponentiate + exp_error_generators = [] + for err_gen_layer in propagated_error_generators: + if err_gen_layer: #if not empty. + #Keep the error generator in the standard basis until after the end-of-circuit + #channel is constructed so we can reduce the overhead of changing basis. + exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer, mx_basis='pp'))) + #Next take the product of these exponentiated error generators. + #These are in circuit ordering, so reverse for matmul. + exp_error_generators.reverse() + if len(exp_error_generators)>1: + eoc_error_channel = _np.linalg.multi_dot(exp_error_generators) + else: + eoc_error_channel = exp_error_generators[0] + + if mx_basis != 'pp': + eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='pp', to_basis=mx_basis) + + return eoc_error_channel + # + #def averaged_eoc_error_channel(self, circuit, include_spam=True, mx_basis='pp'): + # """ + # Propagate all of the error generators for each circuit layer to the end of the circuit, + # then apply a second order cumulant expansion to approximate the average of the end of circuit + # error channel over the values error generator rates that are stochastic processes. +# + # Parameters + # ---------- + # circuit : `Circuit` + # Circuit to construct a set of post gate error generators for. +# + # include_spam : bool, optional (default True) + # If True then we include in the propagation the error generators associated + # with state preparation and measurement. +# + # mx_basis : Basis or str, optional (default 'pp') + # Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the + # basis in which to return the process matrix for the error channel. +# + # Returns + # ------- + # avg_eoc_error_channel : numpy.ndarray + # A numpy array corresponding to the end-of-circuit error channel resulting + # from the propagated error generators and averaging over the stochastic processes + # for the error generator rates using a second order cumulant approximation. + # """ +# + # #propagate_errorgens_nonmarkovian returns a list of list of + # propagated_error_generators = self.propagate_errorgens_nonmarkovian(circuit, include_spam) + # + # #construct the nonmarkovian propagators + # for i in range(len(propagated_error_generators)): + # for j in range(i+1): + # if i==j: + # # term: + # pass + # #prop_contrib = amam + # else: + # pass + # + # + # #loop though the propagated error generator layers and construct their error generators. + # #Then exponentiate + # exp_error_generators = [] + # for err_gen_layer_list in propagated_error_generators: + # if err_gen_layer_list: #if not empty. Should be length one if not empty. + # #Keep the error generator in the standard basis until after the end-of-circuit + # #channel is constructed so we can reduce the overhead of changing basis. + # exp_error_generators.append(_spl.expm(self.errorgen_layer_dict_to_errorgen(err_gen_layer_list[0], mx_basis='std'))) + # #Next take the product of these exponentiated error generators. + # #These are in circuit ordering, so reverse for matmul. + # exp_error_generators.reverse() + # eoc_error_channel = _np.linalg.multi_dot(exp_error_generators) + # eoc_error_channel = _bt.change_basis(eoc_error_channel, from_basis='std', to_basis='pp') +# + # return eoc_error_channel +# + + def propagate_errorgens(self, circuit, include_spam=True): + """ + Propagate all of the error generators for each circuit layer to the end without + any recombinations or averaging. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct a set of post gate error generators for. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + + Returns + ------- + propagated_errorgen_layers : list of lists of dictionaries + A list of lists of dictionaries, each corresponding to the result of propagating + an error generator layer through to the end of the circuit. + """ + #TODO: Check for proper handling of empty circuit and length 1 circuits. + + #start by converting the input circuit into a list of stim Tableaus with the + #first element dropped. + stim_layers = self.construct_stim_layers(circuit, drop_first_layer = not include_spam) + + #We next want to construct a new set of Tableaus corresponding to the cumulative products + #of each of the circuit layers with those that follow. These Tableaus correspond to the + #clifford operations each error generator will be propagated through in order to reach the + #end of the circuit. + propagation_layers = self.construct_propagation_layers(stim_layers) + + #Next we take the input circuit and construct a list of dictionaries, each corresponding + #to the error generators for a particular gate layer. + #TODO: Add proper inferencing for number of qubits: + assert circuit.line_labels is not None and circuit.line_labels != ('*',) + errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam) + #propagate the errorgen_layers through the propagation_layers to get a list + #of end of circuit error generator dictionaries. + propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) + + return propagated_errorgen_layers + + + def propagate_errorgens_bch(self, circuit, bch_order=1, include_spam=True, truncation_threshold=1e-14): + """ + Propagate all of the error generators for each circuit to the end, + performing approximation/recombination using the BCH approximation. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct a set of post gate error generators for. + + bch_order : int, optional (default 1) + Order of the BCH approximation to use. A maximum value of 4 is + currently supported. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + + truncation_threshold : float, optional (default 1e-14) + Threshold below which any error generators with magnitudes below this value + are truncated during the BCH approximation. + """ + + propagated_errorgen_layers = self.propagate_errorgens(circuit, include_spam=include_spam) + #if length one no need to do anything. + if len(propagated_errorgen_layers)==1: + return propagated_errorgen_layers[0] + + #otherwise iterate through in reverse order (the propagated layers are + #in circuit ordering and not matrix multiplication ordering at the moment) + #and combine the terms pairwise + combined_err_layer = propagated_errorgen_layers[-1] + for i in range(len(propagated_errorgen_layers)-2, -1, -1): + combined_err_layer = _eprop.bch_approximation(combined_err_layer, propagated_errorgen_layers[i], + bch_order=bch_order, truncation_threshold=truncation_threshold) + + return combined_err_layer + + +# def propagate_errorgens_nonmarkovian(self, circuit, include_spam=True): +# """ +# Propagate all of the error generators for each circuit layer to the end without +# any recombinations or averaging. This version also only track the overall modifier/weighting +# factor picked up by each of the final error generators over the course of the optimization, +# with the actual rates introduced in subsequent stages. +# +# Parameters +# ---------- +# circuit : `Circuit` +# Circuit to construct a set of post gate error generators for. +# +# include_spam : bool, optional (default True) +# If True then we include in the propagation the error generators associated +# with state preparation and measurement. +# +# Returns +# ------- +# propagated_errorgen_layers : list of lists of dictionaries +# A list of lists of dictionaries, each corresponding to the result of propagating +# an error generator layer through to the end of the circuit. +# +# """ +# #start by converting the input circuit into a list of stim Tableaus with the +# #first element dropped. +# stim_layers = self.construct_stim_layers(circuit, drop_first_layer = not include_spam) +# +# #We next want to construct a new set of Tableaus corresponding to the cumulative products +# #of each of the circuit layers with those that follow. These Tableaus correspond to the +# #clifford operations each error generator will be propagated through in order to reach the +# #end of the circuit. +# propagation_layers = self.construct_propagation_layers(stim_layers) +# +# #Next we take the input circuit and construct a list of dictionaries, each corresponding +# #to the error generators for a particular gate layer. +# #TODO: Add proper inferencing for number of qubits: +# assert circuit.line_labels is not None and circuit.line_labels != ('*',) +# errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam, +# include_circuit_time=True, fixed_rate=1) +# #propagate the errorgen_layers through the propagation_layers to get a list +# #of end of circuit error generator dictionaries. +# propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) +# +# #in the context of doing propagation for nonmarkovianity we won't be using BCH, so do a partial flattening +# #of this data structure. +# propagated_errorgen_layers = [errorgen_layers[0] for errorgen_layers in propagated_errorgen_layers] +# +# return propagated_errorgen_layers + + + def errorgen_transform_map(self, circuit, include_spam=True): + """ + Construct a map giving the relationship between input error generators and their final + value following propagation through the circuit. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct error generator transform map for. + + include_spam : bool, optional (default True) + If True then we include in the propagation the error generators associated + with state preparation and measurement. + """ + #start by converting the input circuit into a list of stim Tableaus with the + #first element dropped. + stim_layers = self.construct_stim_layers(circuit, drop_first_layer = not include_spam) + + #We next want to construct a new set of Tableaus corresponding to the cumulative products + #of each of the circuit layers with those that follow. These Tableaus correspond to the + #clifford operations each error generator will be propagated through in order to reach the + #end of the circuit. + propagation_layers = self.construct_propagation_layers(stim_layers) + + #Next we take the input circuit and construct a list of dictionaries, each corresponding + #to the error generators for a particular gate layer. + #TODO: Add proper inferencing for number of qubits: + assert circuit.line_labels is not None and circuit.line_labels != ('*',) + errorgen_layers = self.construct_errorgen_layers(circuit, len(circuit.line_labels), include_spam, fixed_rate=1) + #propagate the errorgen_layers through the propagation_layers to get a list + #of end of circuit error generator dictionaries. + propagated_errorgen_layers = self._propagate_errorgen_layers(errorgen_layers, propagation_layers, include_spam) + + #there should be a one-to-one mapping between the index into propagated_errorgen_layers and the + #index of the circuit layer where the error generators in that propagated layer originated. + #Moreover, LocalStimErrorgenLabels remember who they were at instantiation. + input_output_errgen_map = dict() + for i, output_layer in enumerate(propagated_errorgen_layers): + for output_label, output_rate in output_layer.items(): + original_label = _LSE.cast(output_label.initial_label) + input_output_errgen_map[(original_label, i)] = (output_label, output_rate) + + return input_output_errgen_map + + def errorgen_gate_contributors(self, errorgen, circuit, layer_idx, include_spam=True): + """ + Walks through the gates in the specified circuit layer and query the parent + model to figure out which gates could have given rise to a particular error generator + in a layer. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator layer to find instance of. + + circuit : `Circuit` + Circuit to identify potential gates in. + + layer_idx : int + Index of circuit layer. + + include_spam : bool, optional (default True) + If True include the spam circuit layers at the beginning and + end of the circuit. + + Returns + ------- + label_list_for_errorgen : list of `Label` + A list of gate labels contained within this circuit layer that could have + contributed this error generator. + """ + + if not isinstance(self.model, _OpModel): + raise ValueError('This method does not work for non-OpModel models.') + + if include_spam: + circuit = self.model.complete_circuit(circuit) + + assert layer_idx < len(circuit), f'layer_idx {layer_idx} is out of range for circuit with length {len(circuit)}' + + if isinstance(errorgen, _GlobalElementaryErrorgenLabel): + errorgen = _LocalElementaryErrorgenLabel.cast(errorgen, sslbls = self.model.state_space.qubit_labels) + elif isinstance(errorgen, _LSE): + errorgen = errorgen.to_local_eel() + else: + assert isinstance(errorgen, _LocalElementaryErrorgenLabel), f'Unsupported `errorgen` type {type(errorgen)}.' + + circuit_layer = circuit.layer(layer_idx) + + if isinstance(self.model, _ExplicitOpModel): + #check if this error generator is in the error generator coefficient dictionary for this layer, and if not return the empty dictionary. + layer_errorgen_coeff_dict = self.model.circuit_layer_operator(circuit_layer).errorgen_coefficients(label_type='local') + if errorgen in layer_errorgen_coeff_dict: + label_list_for_errorgen = [circuit_layer] + else: + label_list_for_errorgen = [] + + elif isinstance(self.model, _ImplicitOpModel): + #Loop through each label in this layer and ask for the circuit layer operator + #for each. Then query this for the error generator coefficients associated + #with that layer. + #Note: This may not be 100% robust, I'm assuming there aren't any exotic layer rules + #that would, e.g., add in totally new error generators when certain pairs of gates appear in a layer. + label_list_for_errorgen = [] + for lbl in circuit_layer: + circuit_layer_operator = self.model.circuit_layer_operator(lbl) + label_errorgen_coeff_dict = circuit_layer_operator.errorgen_coefficients(label_type='local') + if errorgen in label_errorgen_coeff_dict: + label_list_for_errorgen.append(lbl) + else: + raise ValueError(f'Type of model {type(self.model)=} is not supported with this method.') + + return label_list_for_errorgen + + def construct_stim_layers(self, circuit, drop_first_layer=True): + """ + Converts a `Circuit` to a list of stim Tableau objects corresponding to each + gate layer. + + Parameters + ---------- + circuit : `Circuit` + Circuit to convert. + + drop_first_layer : bool, optional (default True) + If True the first Tableau for the first gate layer is dropped in the returned output. + This default setting is what is primarily used in the context of error generator + propagation. + + Returns + ------- + stim_layers : list of `stim.Tableau` + A list of `stim.Tableau` objects, each corresponding to the ideal Clifford operation + for each layer of the input pygsti `Circuit`, with the first layer optionally dropped. + """ + + stim_dict=standard_gatenames_stim_conversions() + stim_layers=circuit.convert_to_stim_tableau_layers(gate_name_conversions=stim_dict) + if drop_first_layer and len(stim_layers)>0: + stim_layers = stim_layers[1:] + return stim_layers + + def construct_propagation_layers(self, stim_layers): + """ + Construct a list of stim Tableau objects corresponding to the Clifford + operation each error generator will be propagated through. This corresponds + to a list of cumulative products of the ideal operations, but in reverse. + I.e. the initial entry corresponds to the product (in matrix multiplication order) + of all elements of `stim_layers`, the second entry is the product of the elements of + `stim_layers[1:]`, then `stim_layers[2:]` and so on until the last entry which is + `stim_layers[-1]`. + + Parameters + ---------- + stim_layers : list of stim.Tableau + The list of stim.Tableau objects corresponding to a set of ideal Clifford + operation for each circuit layer through which we will be propagating error + generators. + + Returns + ------- + propagation_layers : list of `stim.Tableau` + A list of `stim.Tableau` objects, each corresponding to a cumulative product of + ideal Clifford operations for a set of circuit layers, each corresponding to a layer + of operations which we will be propagating error generators through. + """ + if len(stim_layers) > 1: + propagation_layers = [0]*len(stim_layers) + #if propagation_layers is empty that means that stim_layers was empty + #final propagation layer is the final stim layer for the circuit + propagation_layers[-1] = stim_layers[-1] + for layer_idx in reversed(range(len(stim_layers)-1)): + propagation_layers[layer_idx] = propagation_layers[layer_idx+1]*stim_layers[layer_idx] + elif len(stim_layers) == 1: + propagation_layers = stim_layers + else: + propagation_layers = [] + return propagation_layers + + def construct_errorgen_layers(self, circuit, num_qubits, include_spam=True, include_circuit_time=False, fixed_rate=None): + """ + Construct a nested list of lists of dictionaries corresponding to the error generators for each circuit layer. + This is currently (as implemented) only well defined for `ExplicitOpModels` where each layer corresponds + to a single 'gate'. This should also in principle work for crosstalk-free `ImplicitOpModels`, but is not + configured to do so just yet. The entries of the top-level list correspond to circuit layers, while the entries + of the second level (i.e. the dictionaries at each layer) correspond to different orders of the BCH approximation. + + Parameters + ---------- + circuit : `Circuit` + Circuit to construct the error generator layers for. + + num_qubits : int + Total number of qubits, used for padding out error generator coefficient labels. + + include_spam : bool, optional (default True) + If True then include the error generators for state preparation and measurement. + + include_circuit_time : bool, optional (default False) + If True then include as part of the error generator coefficient labels the circuit + time from which that error generator arose. + + fixed_rate : float, optional (default None) + If specified this rate is used for all of the error generator coefficients, overriding the + value currently found in the model. + Returns + ------- + List of dictionaries, each one containing the error generator coefficients and rates for a circuit layer, + with the error generator coefficients now represented using LocalStimErrorgenLabel. + + """ + #If including spam then start by completing the circuit (i.e. adding in the explicit SPAM labels). + if include_spam: + circuit = self.model.complete_circuit(circuit) + + #TODO: Infer the number of qubits from the model and/or the circuit somehow. + #Pull out the error generator dictionaries for each operation (may need to generalize this for implicit models): + #model_error_generator_dict = dict() #key will be a label and value the lindblad error generator dictionary. + #for op_lbl, op in self.model.operations.items(): + # #TODO add assertion that the operation is a lindblad error generator type modelmember. + # model_error_generator_dict[op_lbl] = op.errorgen_coefficients() + #add in the error generators for the prep and measurement if needed. + #if include_spam: + # for prep_lbl, prep in self.model.preps.items(): + # model_error_generator_dict[prep_lbl] = prep.errorgen_coefficients() + # for povm_lbl, povm in self.model.povms.items(): + # model_error_generator_dict[povm_lbl] = povm.errorgen_coefficients() + + #TODO: Generalize circuit time to not be in one-to-one correspondence with the layer index. + error_gen_dicts_by_layer = [] + + #cache the error generator coefficients for a circuit layer to accelerate cases where we've already seen that layer. + circuit_layer_errorgen_cache = dict() + + for j in range(len(circuit)): + circuit_layer = circuit[j] # get the layer + #can probably relax this if we detect that the model is a crosstalk free model. + #assert isinstance(circuit_layer, Label), 'Correct support for parallel gates is still under development.' + errorgen_layer = dict() + + layer_errorgen_coeff_dict = circuit_layer_errorgen_cache.get(circuit_layer, None) + if layer_errorgen_coeff_dict is None: + layer_errorgen_coeff_dict = self.model.circuit_layer_operator(circuit_layer).errorgen_coefficients(label_type='local') #get the errors for the gate + circuit_layer_errorgen_cache[circuit_layer] = layer_errorgen_coeff_dict + + for errgen_coeff_lbl, rate in layer_errorgen_coeff_dict.items(): #for an error in the accompanying error dictionary + #only track this error generator if its rate is not exactly zero. #TODO: Add more flexible initial truncation logic. + if rate !=0 or fixed_rate is not None: + #if isinstance(errgen_coeff_lbl, _LocalElementaryErrorgenLabel): + initial_label = errgen_coeff_lbl + #else: + # initial_label = None + #TODO: Can probably replace this function call with `padded_basis_element_labels` method of `GlobalElementaryErrorgenLabel` + paulis = _eprop.errgen_coeff_label_to_stim_pauli_strs(errgen_coeff_lbl, num_qubits) + pauli_strs = errgen_coeff_lbl.basis_element_labels #get the original python string reps from local labels + if include_circuit_time: + #TODO: Refactor the fixed rate stuff to reduce the number of if statement evaluations. + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, circuit_time=j, + initial_label=initial_label, pauli_str_reps=pauli_strs)] = rate if fixed_rate is None else fixed_rate + else: + errorgen_layer[_LSE(errgen_coeff_lbl.errorgen_type, paulis, initial_label=initial_label, + pauli_str_reps=pauli_strs)] = rate if fixed_rate is None else fixed_rate + error_gen_dicts_by_layer.append(errorgen_layer) + return error_gen_dicts_by_layer + + def _propagate_errorgen_layers(self, errorgen_layers, propagation_layers, include_spam=True): + """ + Propagates the error generator layers through each of the corresponding propagation layers + (i.e. the clifford operations for the remainder of the circuit). This results in a list of + lists of dictionaries, where each sublist corresponds to an order of the BCH approximation + (when not using the BCH approximation this list will be length 1), and the dictionaries + correspond to end of circuit error generators and rates. + + Parameters + ---------- + errorgen_layers : list of lists of dicts + Each sublist corresponds to a circuit layer, with these sublists containing dictionaries + of the error generator coefficients and rates for a circuit layer. Each dictionary corresponds + to a different order of the BCH approximation (when not using the BCH approximation this list will + be length 1). The error generator coefficients are represented using LocalStimErrorgenLabel. + + propagation_layers : list of `stim.Tableau` + A list of `stim.Tableau` objects, each corresponding to a cumulative product of + ideal Clifford operations for a set of circuit layers, each corresponding to a layer + of operations which we will be propagating error generators through. + + include_spam : bool, optional (default True) + If True then include the error generators for state preparation and measurement + are included in errogen_layers, and the state preparation error generator should + be propagated through (the measurement one is simply appended at the end). + + Returns + ------- + fully_propagated_layers : list of lists of dicts + A list of list of dicts with the same structure as errorgen_layers corresponding + to the results of having propagated each of the error generator layers through + the circuit to the end. + """ + + #the stopping index in errorgen_layers will depend on whether the measurement error + #generator is included or not. + if include_spam: + stopping_idx = len(errorgen_layers)-2 + else: + stopping_idx = len(errorgen_layers)-1 + + fully_propagated_layers = [] + for i in range(stopping_idx): + err_layer = errorgen_layers[i] + prop_layer = propagation_layers[i] + new_error_dict=dict() + #iterate through dictionary of error generator coefficients and propagate each one. + for errgen_coeff_lbl in err_layer: + propagated_error_gen = errgen_coeff_lbl.propagate_error_gen_tableau(prop_layer, err_layer[errgen_coeff_lbl]) + new_error_dict[propagated_error_gen[0]] = propagated_error_gen[1] + fully_propagated_layers.append(new_error_dict) + #add the final layers which didn't require actual propagation (since they were already at the end). + fully_propagated_layers.extend(errorgen_layers[stopping_idx:]) + return fully_propagated_layers + + + def errorgen_layer_dict_to_errorgen(self, errorgen_layer, mx_basis='pp'): + """ + Helper method for converting from an error generator dictionary in the format + utilized in the `errorgenpropagation` module into a numpy array. + + Parameters + ---------- + errorgen_layer : dict + A dictionary containing the error generator coefficients and rates for a circuit layer, + with the error generator coefficients labels represented using `LocalStimErrorgenLabel`. + + mx_basis : Basis or str, optional (default 'pp') + Either a `Basis` object, or a string which can be cast to a `Basis`, specifying the + basis in which to return the error generator. + + return_dense : bool, optional (default False) + If True return the error generator as a dense numpy array. + + Returns + ------- + errorgen : numpy.ndarray + Error generator corresponding to input `errorgen_layer` dictionary as a numpy array. + """ + + #Use the keys of errorgen_layer to construct a new `ExplicitErrorgenBasis` with + #the elements necessary for the construction of the error generator matrix. + + #Construct a list of new errorgen coefficients by looping through the keys of errorgen_layer + #and converting them to LocalElementaryErrorgenLabels. + local_errorgen_coeffs = [coeff_lbl.to_local_eel() for coeff_lbl in errorgen_layer.keys()] + eg_types = [lbl.errorgen_type for lbl in local_errorgen_coeffs] + eg_bels = [lbl.basis_element_labels for lbl in local_errorgen_coeffs] + basis_1q = _BuiltinBasis('PP', 4) + num_qubits = len(self.model.state_space.qubit_labels) + errorgen = _np.zeros((4**num_qubits, 4**num_qubits), dtype=complex128) + #do this in blocks of 1000 to reduce memory requirements. + for eg_typ_batch, eg_bels_batch, eg_rates_batch in zip(_batched(eg_types, 1000), _batched(eg_bels, 1000), _batched(errorgen_layer.values(), 1000)): + elemgen_matrices = _ot.bulk_create_elementary_errorgen_nqudit(eg_typ_batch, eg_bels_batch, basis_1q, normalize=False, + sparse=False, tensorprod_basis=False) + + #Stack the arrays and then use broadcasting to weight them according to the rates + elemgen_matrices_array = _np.stack(elemgen_matrices, axis=-1) + weighted_elemgen_matrices_array = _np.array(eg_rates_batch)*elemgen_matrices_array + weighted_elemgen_matrices_array = _np.real_if_close(weighted_elemgen_matrices_array) + #The error generator is then just the sum of weighted_elemgen_matrices_array along the third axis. + errorgen += _np.sum(weighted_elemgen_matrices_array, axis = 2) + + #finally need to change from the standard basis (which is what the error generator is currently in) + #to the pauli basis. + errorgen = _bt.change_basis(errorgen, from_basis='std', to_basis=mx_basis)#, expect_real=False) + + return errorgen + + +# There's a factor of a half missing in here. +#def nm_propagators(corr, Elist,qubits): +# Kms = [] +# for idm in range(len(Elist)): +# Am=zeros([4**qubits,4**qubits],dtype=complex128) +# for key in Elist[idm][0]: +# Am += key.toWeightedErrorBasisMatrix() +# # This assumes that Elist is in reverse chronological order +# partials = [] +# for idn in range(idm, len(Elist)): +# An=zeros([4**qubits,4**qubits],dtype=complex128) +# for key2 in Elist[idn][0]: +# An = key2.toWeightedErrorBasisMatrix() +# partials += [corr[idm,idn] * Am @ An] +# partials[0] = partials[0]/2 +# Kms += [sum(partials,0)] +# return Kms + +#def averaged_evolution(corr, Elist,qubits): +# Kms = nm_propagators(corr, Elist,qubits) +# return multi_dot([expm(Km) for Km in Kms]) + + +def _batched(iterable, n): + """ + Yield successive n-sized batches from an iterable. + + Parameters: + iterable (iterable): The iterable to divide into batches. + n (int): The batch size. + + Yields: + iterable: An iterable containing the next batch of items. + """ + it = iter(iterable) + while True: + batch = list(islice(it, n)) + if not batch: + break + yield batch \ No newline at end of file diff --git a/pygsti/errorgenpropagation/localstimerrorgen.py b/pygsti/errorgenpropagation/localstimerrorgen.py new file mode 100644 index 000000000..e481fe30d --- /dev/null +++ b/pygsti/errorgenpropagation/localstimerrorgen.py @@ -0,0 +1,266 @@ +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + +from pygsti.baseobjs.errorgenlabel import ElementaryErrorgenLabel as _ElementaryErrorgenLabel, GlobalElementaryErrorgenLabel as _GEEL,\ +LocalElementaryErrorgenLabel as _LEEL +try: + import stim +except ImportError: + pass +import numpy as _np +from pygsti.tools import change_basis +from pygsti.tools.lindbladtools import create_elementary_errorgen + +#TODO: Split this into a parent class and subclass for markovian and non-markovian +#propagation. There is some overhead in instantiating the NM version of these labels +#which we can avoid and make markovian applications much more efficient (label instantiation +#is like a third of runtime when using higher-order BCH, e.g.) +class LocalStimErrorgenLabel(_ElementaryErrorgenLabel): + + """ + `LocalStimErrorgenLabel` is a specialized `ElementaryErrorgenLabel` + designed to manage the propagation of error generator using Stim primitives for fast Pauli and + Clifford operations, storing propagation related metadata, and storing metadata relevant to the + evaluation of non-Markovian error propagators using cumulant expansion based techniques. + """ + + @classmethod + def cast(cls, obj, sslbls=None): + """ + Method for casting objects to instances of LocalStimErrorgenLabel. + + Parameters + ---------- + obj : `LocalStimErrorgenLabel`, ``LocalElementaryErrorgenLabel`, `GlobalElementaryErrorgenLabel`, tuple or list + + sslbls : tuple or list, optional (default None) + A complete set of state space labels. Used when casting from a GlobalElementaryErrorgenLabel + or from a tuple of length 3 (wherein the final element is interpreted as the set of ssblbs the error + generator acts upon). + + Returns + ------- + `LocalStimErrorgenLabel` + """ + if isinstance(obj, LocalStimErrorgenLabel): + return obj + + if isinstance(obj, _GEEL): + #convert to a tuple representation + assert sslbls is not None, 'Must specify sslbls when casting from `GlobalElementaryErrorgenLabel`.' + obj = (obj.errorgen_type, obj.basis_element_labels, obj.sslbls) + initial_label=None + + if isinstance(obj, _LEEL): + #convert to a tuple representation + initial_label = obj + obj = (obj.errorgen_type, obj.basis_element_labels) + + if isinstance(obj, (tuple, list)): + #In this case assert that the first element of the tuple is a string corresponding to the + #error generator type. + errorgen_type = obj[0] + initial_label = None + + #two elements for a local label and three for a global one + #second element should have the basis element labels + assert len(obj)==2 or len(obj)==3 and isinstance(obj[1], (tuple, list)) + + #if a global label tuple the third element should be a tuple or list. + if len(obj)==3: + assert isinstance(obj[2], (tuple, list)) + assert sslbls is not None, 'Must specify sslbls when casting from a tuple or list of length 3. See docstring.' + #convert to local-style bels. + indices_to_replace = [sslbls.index(sslbl) for sslbl in obj[2]] + local_bels = [] + for global_lbl in obj[1]: + #start by figure out which initialization to use, either stim + #or a string. + local_bel = stim.PauliString('I'*len(sslbls)) + for kk, k in enumerate(indices_to_replace): + local_bel[k] = global_lbl[kk] + local_bels.append(local_bel) + else: + local_bels = obj[1] + + #now build the LocalStimErrorgenLabel + stim_bels = [] + for bel in local_bels: + if isinstance(bel, str): + stim_bels.append(stim.PauliString(bel)) + elif isinstance(bel, stim.PauliString): + stim_bels.append(bel) + else: + raise ValueError('Only str and `stim.PauliString` basis element labels are supported presently.') + + return cls(errorgen_type, stim_bels, initial_label=initial_label) + + + def __init__(self, errorgen_type, basis_element_labels, circuit_time=None, initial_label=None, + label=None, pauli_str_reps=None): + """ + Create a new instance of `LocalStimErrorgenLabel` + + Parameters + ---------- + errorgen_type : str + A string corresponding to the error generator sector this error generator label is + an element of. Allowed values are 'H', 'S', 'C' and 'A'. + + basis_element_labels : tuple or list + A list or tuple of stim.PauliString labeling basis elements used to label this error generator. + This is either length-1 for 'H' and 'S' type error generators, or length-2 for 'C' and 'A' + type. + + circuit_time : float, optional (default None) + An optional value which associates this error generator with a particular circuit time at + which it arose. This is primarily utilized in the context of non-Markovian simulations and + estimation where an error generator may notionally be associated with a stochastic process. + + initial_label : `ElementaryErrorgenLabel`, optional (default None) + If not None, then this `ElementaryErrorgenLabel` is stored within this label and is interpreted + as being the 'initial' value of this error generator, prior to any propagation or transformation + during the course of its use. If None, then this is initialized to a `LocalElementaryErrorgenLabel` + matching the `errorgen_type` and `basis_element_labels` of this label. + + label : str, optional (default None) + An optional label string which is included when printing the string representation of this + label. + + pauli_str_reps : tuple of str, optional (default None) + Optional tuple of python strings corresponding to the stim.PauliStrings in basis_element_labels. + When specified can speed up construction of hashable label representations. + """ + self.errorgen_type = errorgen_type + self.basis_element_labels = tuple(basis_element_labels) + self.label = label + self.circuit_time = circuit_time + + if pauli_str_reps is not None: + self._hashable_basis_element_labels = pauli_str_reps + self._hashable_string_rep = self.errorgen_type.join(pauli_str_reps) + else: + self._hashable_basis_element_labels = self.bel_to_strings() + self._hashable_string_rep = self.errorgen_type.join(self._hashable_basis_element_labels) + + #additionally store a copy of the value of the original error generator label which will remain unchanged + #during the course of propagation for later bookkeeping purposes. + if initial_label is not None: + self.initial_label = initial_label + else: + self.initial_label = self.to_local_eel() + #TODO: Update various methods to account for additional metadata that has been added. + + def __hash__(self): + #return hash((self.errorgen_type, self._hashable_basis_element_labels)) + return hash(self._hashable_string_rep) + + def bel_to_strings(self): + """ + Convert the elements of `basis_element_labels` to python strings + (from stim.PauliString(s)) and return as a tuple. + """ + return tuple([str(ps)[1:].replace('_',"I") for ps in self.basis_element_labels]) + + + def __eq__(self, other): + """ + Performs equality check by seeing if the two error gen labels have the same `errorgen_type` + and `basis_element_labels`. + """ + return self.errorgen_type == other.errorgen_type and self.basis_element_labels == other.basis_element_labels \ + and isinstance(other, LocalStimErrorgenLabel) + + + def __str__(self): + if self.label is None: + return self.errorgen_type + "(" + ",".join(map(str, self.basis_element_labels)) + ")" + else: + return self.errorgen_type + " " + str(self.label)+ " " + "(" \ + + ",".join(map(str, self.basis_element_labels)) + ")" + + def __repr__(self): + if self.label is None: + if self.circuit_time is not None: + return f'({self.errorgen_type}, {self.basis_element_labels}, time={self.circuit_time})' + else: + return f'({self.errorgen_type}, {self.basis_element_labels})' + else: + if self.circuit_time is not None: + return f'({self.errorgen_type}, {self.label}, {self.basis_element_labels}, time={self.circuit_time})' + else: + return f'({self.errorgen_type}, {self.label}, {self.basis_element_labels})' + + + #TODO: Rework this to not directly modify the weights, and only return the sign modifier. + def propagate_error_gen_tableau(self, slayer, weight): + """ + Parameters + ---------- + slayer : `stim.Tableau` + `stim.Tableau` object corresponding to an ideal Clifford operations for + a circuit layer which we will be propagating this error generator through. + + weight : float + Current weight of this error generator. + + Returns + ------- + tuple of consisting of an `LocalStimErrorgenLabel` and an updated error generator + weight, which may have changed by a sign. + """ + new_basis_labels = [] + weightmod = 1.0 + if self.errorgen_type == 'S': + for pauli in self.basis_element_labels: + temp = slayer(pauli) + temp = temp*temp.sign + new_basis_labels.append(temp) + else: + for pauli in self.basis_element_labels: + temp = slayer(pauli) + temp_sign = temp.sign + weightmod = temp_sign.real*weightmod + temp = temp*temp_sign + new_basis_labels.append(temp) + + return (LocalStimErrorgenLabel(self.errorgen_type, new_basis_labels, initial_label=self.initial_label, circuit_time=self.circuit_time), + weightmod*weight) + + def to_global_eel(self, sslbls = None): + """ + Returns a `GlobalElementaryErrorgenLabel` equivalent to this `LocalStimErrorgenLabel`. + + sslbls : list (optional, default None) + A list of state space labels corresponding to the qubits corresponding to each + of the paulis in the local basis element label. If None this defaults a list of integers + ranging from 0 to N where N is the number of paulis in the basis element labels. + """ + + #first get the pauli strings corresponding to the stim.PauliString object(s) that are the + #basis_element_labels. + pauli_strings = self.bel_to_strings() + if sslbls is None: + sslbls = list(range(len(pauli_strings[0]))) #The two pauli strings should be the same length, so take the first. + #GlobalElementaryErrorgenLabel should have built-in support for casting from a tuple of the error gen type + #and the paulis for the basis element labels, so long as it is given appropriate sslbls to use. + return _GEEL.cast((self.errorgen_type,) + pauli_strings, sslbls= sslbls) + + + def to_local_eel(self): + """ + Returns a `LocalElementaryErrorgenLabel` equivalent to this `LocalStimErrorgenLabel`. + + Returns + ------- + `LocalElementaryErrorgenLabel` + """ + return _LEEL(self.errorgen_type, self._hashable_basis_element_labels) + + diff --git a/pygsti/extras/lfh/__init__.py b/pygsti/extras/lfh/__init__.py new file mode 100644 index 000000000..b5143ca45 --- /dev/null +++ b/pygsti/extras/lfh/__init__.py @@ -0,0 +1,9 @@ +""" Low Frequency Hamiltonian Sub-package """ +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** diff --git a/pygsti/extras/lfh/lfherrorgen.py b/pygsti/extras/lfh/lfherrorgen.py new file mode 100644 index 000000000..a00df744f --- /dev/null +++ b/pygsti/extras/lfh/lfherrorgen.py @@ -0,0 +1,213 @@ +""" +Defines the LFHLindbladErrorgen class, an extension of LindbladErrorgen with +support for fluctuating Hamiltonian parameters. +""" +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** +''' +import numpy as _np +import collections as _collections +import itertools as _itertools +from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen +from pygsti.forwardsims import WeakForwardSimulator as _WeakForwardsimulator +from pygsti.forwardsims import MapForwardSimulator as _MapForwardSimulator +from pygsti.forwardsims import SimpleMapForwardSimulator as _SimpleMapForwardSimulator +from pygsti.forwardsims import MatrixForwardSimulator as _MatrixForwardSimulator +from pygsti.evotypes import Evotype as _Evotype + +from pygsti.forwardsims import ForwardSimulator as _ForwardSimulator +from pygsti.models import ExplicitOpModel as _ExplicitOpModel +from pygsti.modelmembers.operations import ExpErrorgenOp as _ExpErrorgenOp +from pygsti.modelmembers.operations import ComposedOp as _ComposedOp +from pygsti.baseobjs import statespace as _statespace +from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel +from pygsti.modelmembers.operations import LindbladParameterization +from pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock as _LindbladCoefficientBlock + +from scipy.special import roots_hermite +from math import sqrt, pi + + +#--------- New LindbladErrorgen ------------# +#Pattern match a bit off of the parameterized lindblad error generator Jordan cooked up +class LFHLindbladErrorgen(_LindbladErrorgen): + """ + A Lindblad error generator with parameters that are combined + to get the target error generator based on some function params_to_coeffs of the parameter vector + params_to_coeffs should return a numpy array + """ + def coeff_dict_from_vector(self): + basis = _BuiltinBasis('pp', 4) + v = self.current_rates + #print(len(v)) + error_rates_dict = {} + for i in range(3): + error_rates_dict[('H',basis.labels[i+1])] = v[i] + labels = [('S', 'X'), ('A','X','Y'),('A','X','Z'),('C','X','Z'),('S','Y'),('A','Y','Z'),('C','X','Y'),('C','Y','Z'),('S','Z')] + for i in range(3,12): + error_rates_dict[(labels[i-3])] = v[i] + return error_rates_dict + + def __init__(self, h_means, otherlindbladparams, h_devs, lindblad_basis='auto', elementary_errorgen_basis='pp', + evotype="default", state_space=1, parameterization='CPTPLND', truncate=True, rng= None): + #Pass in a vector of standard lindblad parameters as well as a vector of standard deviations + #for each of the hamiltonian parameters + + #Store the values of the mean hamiltonian rates. + self.means= h_means + self.otherlindbladparams = otherlindbladparams + + self.paramvals = _np.array([param for param in self.means] + [param for param in self.otherlindbladparams]) #the parameters + self.current_rates = self.paramvals.copy() + + #let's make the h deviations a dictionary instead, so we can control which of the hamiltonian rates are fluctuating + #to make the marginalization more efficient (avoiding duplicated calculations when std. devs are 0. + #We'll make the keys of the dictionary the index in h_means that the deviation corresponds to. + + self.dev_dict = h_devs + self.devs= _np.fromiter(h_devs.values(), dtype = _np.double) + + #set the random number generator used for sampling from a normal distribution. + if rng is not None: + if isinstance(rng, int): + self.rng= _np.random.default_rng(rng) + else: + self.rng = rng + else: + self.rng= _np.random.default_rng() + + #Get the coefficient dictionary for this parameter vector + self.coefficients = self.coeff_dict_from_vector() + #super().from_elementary_errorgens(coeff_dict, state_space = 1) + + state_space = _statespace.StateSpace.cast(state_space) + dim = state_space.dim # Store superop dimension + basis = _Basis.cast(elementary_errorgen_basis, dim) + + #convert elementary errorgen labels to *local* labels (ok to specify w/global labels) + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = state_space.tensor_product_block_labels(0) # just take first TPB labels as all labels + elementary_errorgens = _collections.OrderedDict( + [(_LocalElementaryErrorgenLabel.cast(lbl, sslbls, identity_label_1Q), val) + for lbl, val in self.coefficients.items()]) + + parameterization = LindbladParameterization.minimal_from_elementary_errorgens(elementary_errorgens) \ + if parameterization == "auto" else LindbladParameterization.cast(parameterization) + + eegs_by_typ = { + 'ham': {eeglbl: v for eeglbl, v in elementary_errorgens.items() if eeglbl.errorgen_type == 'H'}, + 'other_diagonal': {eeglbl: v for eeglbl, v in elementary_errorgens.items() if eeglbl.errorgen_type == 'S'}, + 'other': {eeglbl: v for eeglbl, v in elementary_errorgens.items() if eeglbl.errorgen_type != 'H'} + } + + blocks = [] + for blk_type, blk_param_mode in zip(parameterization.block_types, parameterization.param_modes): + relevant_eegs = eegs_by_typ[blk_type] # KeyError => unrecognized block type! + bels = sorted(set(_itertools.chain(*[lbl.basis_element_labels for lbl in relevant_eegs.keys()]))) + blk = _LindbladCoefficientBlock(blk_type, basis, bels, param_mode=blk_param_mode) + blk.set_elementary_errorgens(relevant_eegs, truncate=truncate) + blocks.append(blk) + #print(blk) + + evotype= _Evotype.cast(evotype) + evotype.prefer_dense_reps = True + + super().__init__(blocks, evotype=evotype, state_space=1) + + @property + def num_params(self): + """ + Get the number of independent parameters which specify this operation. + + Returns + ------- + int + the number of independent parameters. + """ + return len(self.paramvals) + len(self.devs) + + def to_vector(self): + ret_vec= [param for param in self.paramvals] + [dev for dev in self.devs] + + return _np.array(ret_vec) + + def from_vector(self,v, close=False, dirty_value=True): + """ + Initialize the operation using a vector of parameters. + + Parameters + ---------- + v : numpy array + The 1D vector of operation parameters. Length + must == num_params() + + close : bool, optional + Whether `v` is close to this operation's current + set of parameters. Under some circumstances, when this + is true this call can be completed more quickly. + + dirty_value : bool, optional + The value to set this object's "dirty flag" to before exiting this + call. This is passed as an argument so it can be updated *recursively*. + Leave this set to `True` unless you know what you're doing. + + Returns + ------- + None + """ + assert(len(v) == self.num_params) + + #split off the terms that go into paramvals and devs + v = _np.array(v) + new_paramvals= v[:len(self.paramvals)] + new_otherlindblad_params = v[3:len(self.paramvals)] + new_devs= v[len(self.paramvals):] + new_means= v[0:3] + + self.paramvals = new_paramvals + self.means= new_means + self.devs= new_devs + self.dev_dict = {key:val for key,val in zip(self.dev_dict.keys(), new_devs)} + self.otherlindbladparams = new_otherlindblad_params + + self.coefficients = self.coeff_dict_from_vector() + + #coefficient blocks and current rates get reset to the new mean values passed in + #resampling can cause the values of the coefficient blocks and the rates to become + #different though. + self.current_rates= self.paramvals.copy() + off = 0 + u = self.paramvals + for blk in self.coefficient_blocks: + blk.from_vector(u[off: off + blk.num_params]) + off += blk.num_params + self._update_rep() + self.dirty = dirty_value + + #Now the special ingredient we need is functionality for resampling + #What we want to be able to do is use the current hamiltonian means + #and std deviations to get a new set of hamiltonian weights. + + def sample_hamiltonian_rates(self):#, dirty_value=True): + + new_h_rates = [self.rng.normal(loc=mean, scale=self.dev_dict[i]) if i in self.dev_dict else mean + for i, mean in enumerate(self.means)] + + #now we want to update the coefficent blocks and current rates: + self.current_rates = _np.array(new_h_rates + [other_lindblad for other_lindblad in self.otherlindbladparams]) + off = 0 + u = self.current_rates + for blk in self.coefficient_blocks: + blk.from_vector(u[off: off + blk.num_params]) + off += blk.num_params + self._update_rep() + #self.dirty = dirty_value + +''' \ No newline at end of file diff --git a/pygsti/extras/lfh/lfhforwardsims.py b/pygsti/extras/lfh/lfhforwardsims.py new file mode 100644 index 000000000..e9099eb3a --- /dev/null +++ b/pygsti/extras/lfh/lfhforwardsims.py @@ -0,0 +1,926 @@ +""" +Defines the various forward simulators for use with models containing operations with +fluctuating Hamiltonian parameters. +""" +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** +''' + +import numpy as _np +import collections as _collections +import itertools as _itertools +from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen +from pygsti.forwardsims import WeakForwardSimulator as _WeakForwardsimulator +from pygsti.forwardsims import MapForwardSimulator as _MapForwardSimulator +from pygsti.forwardsims import SimpleMapForwardSimulator as _SimpleMapForwardSimulator +from pygsti.forwardsims import MatrixForwardSimulator as _MatrixForwardSimulator +from pygsti.evotypes import Evotype as _Evotype +from pygsti.extras.lfh.lfherrorgen import LFHLindbladErrorgen as _LFHLindbladErrorgen +import pygsti.tools.slicetools as _slct + + +from pygsti.forwardsims import ForwardSimulator as _ForwardSimulator +from pygsti.models import ExplicitOpModel as _ExplicitOpModel +from pygsti.modelmembers.operations import ExpErrorgenOp as _ExpErrorgenOp +from pygsti.modelmembers.operations import ComposedOp as _ComposedOp +from pygsti.baseobjs import statespace as _statespace +from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel +from pygsti.modelmembers.operations import LindbladParameterization +from pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock as _LindbladCoefficientBlock + +from scipy.special import roots_hermite +from math import sqrt, pi + +#Next we need to define a new custom weak forward simulator +class LFHWeakForwardSimulator(_ForwardSimulator): + """ + Weak forward simulator specialized for dealing with low-frequency hamiltonian models. + """ + + def __init__(self, shots, model=None, base_seed=None): + """ + Construct a new WeakForwardSimulator object. + + Parameters + ---------- + shots: int + Number of times to run each circuit to obtain an approximate probability + model : Model + Optional parent Model to be stored with the Simulator + """ + self.shots = shots + super().__init__(model) + + def bulk_probs(self, circuits, clip_to=None, resource_alloc=None, smartc=None): + """ + Construct a dictionary containing the probabilities for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + clip_to : 2-tuple, optional + (min,max) to clip return value if not None. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + probs : dictionary + A dictionary such that `probs[circuit]` is an ordered dictionary of + outcome probabilities whose keys are outcome labels. + """ + + #We want to loop through each of the circuits in a "rasterization pass" collecting one + #one shot each. At the start of each loop we want to resample the randomly fluctuating + #hamiltonian parameters. + #We should be able to farm out the probability calculation to another forward simulator + #though. + probs_for_shot = [] + for i in range(self.shots): + #Have the model resample the hamiltonian rates: + self.model.sample_hamiltonian_rates() + helper_sim = _MapForwardSimulator(model=self.model) + + #Now that we've sampled the hamiltonian rates calculate the probabilities for + #all of the circuits. + #import pdb + #pdb.set_trace() + probs_for_shot.append(helper_sim.bulk_probs(circuits)) + #Now loop through and perform an averaging over the output probabilities. + #Initialize a dictionary for storing the final results. + #print(probs_for_shot) + outcome_labels= probs_for_shot[0][circuits[0]].keys() + averaged_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for prob_dict in probs_for_shot: + for ckt in circuits: + for lbl in outcome_labels: + averaged_probs[ckt][lbl] += prob_dict[ckt][lbl]/self.shots + + #return the averaged probabilities: + return averaged_probs + + def bulk_dprobs(self, circuits, resource_alloc=None, smartc=None): + """ + Construct a dictionary containing the probability derivatives for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + dprobs : dictionary + A dictionary such that `dprobs[circuit]` is an ordered dictionary of + derivative arrays (one element per differentiated parameter) whose + keys are outcome labels + """ + + #If _compute_circuit_outcome_probability_derivatives is implemented, use it! + #resource_alloc = layout.resource_alloc() + + eps = 1e-7 # hardcoded? +# if param_slice is None: +# param_slice = slice(0, self.model.num_params) +# param_indices = _slct.to_array(param_slice) + +# if dest_param_slice is None: +# dest_param_slice = slice(0, len(param_indices)) +# dest_param_indices = _slct.to_array(dest_param_slice) + +# iParamToFinal = {i: dest_param_indices[ii] for ii, i in enumerate(param_indices)} + + probs = self.bulk_probs(circuits) + orig_vec = self.model.to_vector().copy() + + #pull out the requisite outcome labels: + outcome_labels= probs[circuits[0]].keys() + + #initialize a dprobs array: + dprobs= {ckt: {lbl: _np.empty(self.model.num_params, dtype= _np.double) for lbl in outcome_labels} for ckt in circuits} + + for i in range(self.model.num_params): + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + probs2 = self.bulk_probs(circuits) + + #need to parse this and construct the corresponding entries of the dprobs dict. + + for ckt in circuits: + for lbl in outcome_labels: + dprobs[ckt][lbl][i] = (probs2[ckt][lbl] - probs[ckt][lbl]) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec, close=True) + + return dprobs + + + #Try out a different "weak" forward simulator that doesn't use sampling to do the integration +#over the gaussian, but rather approximates the expectation values using gauss-hermite quadrature +class LFHIntegratingForwardSimulator(_ForwardSimulator): + """ + Weak forward simulator specialized for dealing with low-frequency hamiltonian models. + """ + + def __init__(self, order, model=None, base_seed=None): + """ + Construct a new WeakForwardSimulator object. + + Parameters + ---------- + order: int + order of the gauss-hermite approximation for the integral. + model : Model + Optional parent Model to be stored with the Simulator + """ + self.order = order + self.helper_sim = None + super().__init__(model) + + def build_sampling_grid(self): + #build the grid of sample points and weights + #for the simulators model. + + #Need to identify how many deviation parameters there are. + num_deviances= 0 + dev_values= [] + mean_values = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + dev_values.extend(subop.errorgen.devs) + mean_values.extend(subop.errorgen.means) + num_deviances += len(subop.errorgen.devs) + + #Once we know the number of deviances and their values we can start building + #out the grid of sampling points and weights. + base_one_d_points , base_one_d_weights= roots_hermite(self.order) + + #print(base_one_d_points) + #print(base_one_d_weights) + + #print(mean_values) + #print(dev_values) + + #The weights remain the same, but I need to modify the sampling points + #Now I need to get updates + gaussian_one_d_points = [[] for _ in range(len(dev_values))] + + for i,(dev, mean) in enumerate(zip(dev_values, mean_values)): + for point in base_one_d_points: + gaussian_one_d_points[i].append(mean+sqrt(2)*dev*point) + + #print(gaussian_one_d_points[0]) + + return gaussian_one_d_points, (1/sqrt(pi))*base_one_d_weights + + + def bulk_probs(self, circuits, clip_to=None, resource_alloc=None, smartc=None, return_layout= False, cached_layout= None): + """ + Construct a dictionary containing the probabilities for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + clip_to : 2-tuple, optional + (min,max) to clip return value if not None. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + probs : dictionary + A dictionary such that `probs[circuit]` is an ordered dictionary of + outcome probabilities whose keys are outcome labels. + """ + + sample_points_lists , weights = self.build_sampling_grid() + + #The grid of points is the cartesian product of the sample point lists. + + sample_point_grid = _itertools.product(*sample_points_lists) + + #do this for convienience + weight_grid = _itertools.product(*([weights]*len(sample_points_lists))) + + #I need to identify where in the model vector the sampled hamiltonian weights + #need to go. + hamiltonian_model_indices = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + + orig_vec = self.model.to_vector() + + if self.helper_sim is None: + self.add_helper_sim() + + #create a circuit layout that we can reuse to speed things up + #(We'll be using the same circuit list at every evaluation point) + if cached_layout is None: + ckt_layout = self.helper_sim.create_layout(circuits) + else: + ckt_layout = cached_layout + + weighted_probs_for_point = [] + + for sample_grid_point, weight_grid_point in zip(sample_point_grid, weight_grid): + vec = orig_vec.copy() + vec[hamiltonian_model_indices] = _np.array(sample_grid_point) + + #despite storing it as a grid, we just need the scalar product of the weights + weight_value = _np.prod(weight_grid_point) + + #set the model to this current vec value + self.model.from_vector(vec) + + #next simulate the model using the helper simulator: + #We can pass in a COPAlayout for this instead of a list of circuits, which speeds things up. + probs_for_point = self.helper_sim.bulk_probs(ckt_layout) + #probs_for_point = helper_sim.bulk_probs(circuits) + + #print(probs_for_point) + + #Iterate through and add weight terms. + outcome_labels= probs_for_point[circuits[0]].keys() + weighted_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for ckt in circuits: + for lbl in outcome_labels: + weighted_probs[ckt][lbl] = probs_for_point[ckt][lbl] * weight_value + + weighted_probs_for_point.append(weighted_probs) + + #reset the model to it's original value + self.model.from_vector(orig_vec) + + #print(len(weighted_probs_for_point)) + + #Aggregate all of the probability values into a final_result + averaged_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for prob_dict in weighted_probs_for_point: + for ckt in circuits: + for lbl in outcome_labels: + averaged_probs[ckt][lbl] += prob_dict[ckt][lbl] + + #return the averaged probabilities: + if return_layout: + return averaged_probs, ckt_layout + else: + return averaged_probs + + def bulk_dprobs(self, circuits, resource_alloc=None, smartc=None): + """ + Construct a dictionary containing the probability derivatives for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + dprobs : dictionary + A dictionary such that `dprobs[circuit]` is an ordered dictionary of + derivative arrays (one element per differentiated parameter) whose + keys are outcome labels + """ + + #If _compute_circuit_outcome_probability_derivatives is implemented, use it! + #resource_alloc = layout.resource_alloc() + + eps = 1e-7 # hardcoded? +# if param_slice is None: +# param_slice = slice(0, self.model.num_params) +# param_indices = _slct.to_array(param_slice) + +# if dest_param_slice is None: +# dest_param_slice = slice(0, len(param_indices)) +# dest_param_indices = _slct.to_array(dest_param_slice) + +# iParamToFinal = {i: dest_param_indices[ii] for ii, i in enumerate(param_indices)} + + probs, ckt_layout = self.bulk_probs(circuits, return_layout= True) + orig_vec = self.model.to_vector().copy() + + #pull out the requisite outcome labels: + outcome_labels= probs[circuits[0]].keys() + + #initialize a dprobs array: + dprobs= {ckt: {lbl: _np.empty(self.model.num_params, dtype= _np.double) for lbl in outcome_labels} for ckt in circuits} + + for i in range(self.model.num_params): + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + probs2 = self.bulk_probs(circuits, cached_layout= ckt_layout) + + #need to parse this and construct the corresponding entries of the dprobs dict. + + for ckt in circuits: + for lbl in outcome_labels: + dprobs[ckt][lbl][i] = (probs2[ckt][lbl] - probs[ckt][lbl]) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec) + + return dprobs + + def add_helper_sim(self): + if self.model is not None: + self.helper_sim = _MatrixForwardSimulator(model=self.model) + + def create_layout(self, bulk_circuit_list, dataset=None, resource_alloc=None, + array_types=(), verbosity=1): + + if self.helper_sim is None: + self.add_helper_sim() + + return self.helper_sim.create_layout(bulk_circuit_list, dataset, resource_alloc, + array_types, verbosity=verbosity) + + #Add a bulk_fill_probs method that does something similar to bulk_probs but returns + #an array filled according to a layout instead of an outcome dictionary + def bulk_fill_probs(self, array_to_fill, layout): + + sample_points_lists , weights = self.build_sampling_grid() + + #The grid of points is the cartesian product of the sample point lists. + + sample_point_grid = list(_itertools.product(*sample_points_lists)) + + #do this for convienience + weight_grid = list(_itertools.product(*([weights]*len(sample_points_lists)))) + + #I need to identify where in the model vector the sampled hamiltonian weights + #need to go. + hamiltonian_model_indices = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + + orig_vec = self.model.to_vector() + + #If I have a layout then I should have a helper sim by this point + #if self.helper_sim is None: + # self.add_helper_sim() + + #create copies of the array being filled + temp_arrays = [array_to_fill.copy() for _ in sample_point_grid] + + for i, (sample_grid_point, weight_grid_point) in enumerate(zip(sample_point_grid, weight_grid)): + + vec = orig_vec.copy() + vec[hamiltonian_model_indices] = _np.array(sample_grid_point) + + #despite storing it as a grid, we just need the scalar product of the weights + weight_value = _np.prod(weight_grid_point) + + #set the model to this current vec value + self.model.from_vector(vec) + + #next simulate the model using the helper simulator: + self.helper_sim.bulk_fill_probs(temp_arrays[i], layout) + + #Iterate through and add weight terms. + temp_arrays[i] = weight_value*temp_arrays[i] + + #reset the model to it's original value + self.model.from_vector(orig_vec) + + #Aggregate all of the probability values into a final_result + averaged_array = temp_arrays[0] + for temp_array in temp_arrays[1:]: + averaged_array += temp_array + + #print('averaged: ', averaged_array) + + array_to_fill[:]= averaged_array + #return averaged_array + + + #Next I need a version of bulk_fill_dprobs: + + def bulk_fill_dprobs(self, array_to_fill, layout, pr_array_to_fill=None): + + eps = 1e-7 # hardcoded? + + if pr_array_to_fill is not None: + self.bulk_fill_probs(pr_array_to_fill, layout) + probs = pr_array_to_fill.copy() + + else: + probs = layout.allocate_local_array('e', 'd') + self.bulk_fill_probs(probs, layout) + + orig_vec = self.model.to_vector().copy() + + for i in range(self.model.num_params): + probs2 = probs.copy() + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + self.bulk_fill_probs(probs2,layout) + + #now put this result into the array to be filled. + array_to_fill[: , i] =(probs2 - probs) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec) + + #print('dprobs: ', array_to_fill) + #return dprobs + +class LFHSigmaForwardSimulator(_ForwardSimulator): + """ + Weak forward simulator specialized for dealing with low-frequency hamiltonian models. + This version uses sigma point methods (unscented transform) to approximate the requisite + integrals. + """ + + def __init__(self, model=None): + """ + Construct a new WeakForwardSimulator object. + + Parameters + ---------- + order: int + order of the gauss-hermite approximation for the integral. + model : Model + Optional parent Model to be stored with the Simulator + """ + self.helper_sim = None + super().__init__(model) + + def sigma_points(self): + #build the grid of sample points and weights + #for the simulators model. + + #Need to identify how many deviation parameters there are. + num_deviances= 0 + dev_values= [] + mean_values = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + dev_values.extend(subop.errorgen.devs) + mean_values.extend([subop.errorgen.means[i] for i in subop.errorgen.dev_dict.keys()]) + num_deviances += len(subop.errorgen.devs) + + #Now construct the set of points and weights: + mean_vec = _np.array(mean_values).reshape((num_deviances,1)) + std_vec = _np.array(dev_values) + + #Currently only have _LFHLindbladErrorgen objects that are configured for + #diagonal covariances, so we can simplify the sigma point construction logic + #a bit. Use a heuristic from Julier and Uhlmann. + #The first sigma point is just the mean. + #columns of this matrix will become sigma vectors. + sigma_vec_array = _np.repeat(mean_vec, repeats=2*num_deviances+1, axis=1) + + #calculate a special scaling factor used in the Unscented transform. + #This scale factor is n + kappa in Julier and Uhlmann, and they claim + #a value of n+kappa =3 is a good heuristic for gaussian distributions. + scale_factor = 3 + #columns of offsets correspond to the offset vectors + offsets = _np.diag(_np.sqrt(scale_factor)*std_vec) + #Note: the application of these shifts can be done much more efficiently + #by appropriately using slicing and broadcasting, but this is easy for now. + shifts = _np.concatenate([_np.zeros_like(mean_vec), offsets, -offsets], axis=1) + #Add these offsets to columns 1 to L and subtract from + #columns L+1 to 2L+1 + sigma_vec_array += shifts + + #next we need the weights + kappa = scale_factor - num_deviances + weights = _np.array([kappa/scale_factor, 1/(2*scale_factor)]) + + return sigma_vec_array, weights + + + + def bulk_probs(self, circuits, clip_to=None, resource_alloc=None, smartc=None, return_layout= False, cached_layout= None): + """ + Construct a dictionary containing the probabilities for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + clip_to : 2-tuple, optional + (min,max) to clip return value if not None. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + probs : dictionary + A dictionary such that `probs[circuit]` is an ordered dictionary of + outcome probabilities whose keys are outcome labels. + """ + + sigma_points , weights = self.sigma_points() + + #I need to identify where in the model vector the sampled hamiltonian weights + #need to go. I should probably make this something that gets cached, as it usually + #won't need recomputation. + hamiltonian_model_indices = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + hamiltonian_model_indices.extend([op.gpindices.start+i for i in subop.errorgen.dev_dict.keys()]) + #hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + + orig_vec = self.model.to_vector() + + if self.helper_sim is None: + self.add_helper_sim() + + #create a circuit layout that we can reuse to speed things up + #(We'll be using the same circuit list at every evaluation point) + if cached_layout is None: + ckt_layout = self.helper_sim.create_layout(circuits) + else: + ckt_layout = cached_layout + + weighted_probs_for_point = [] + weight_iter = _itertools.chain([0] ,_itertools.repeat(1, sigma_points.shape[1]-1)) + for i, j in zip(range(sigma_points.shape[1]), weight_iter): + vec = orig_vec.copy() + vec[hamiltonian_model_indices] = sigma_points[:,i] + + #set the model to this current vec value + self.model.from_vector(vec) + + #next simulate the model using the helper simulator: + #We can pass in a COPAlayout for this instead of a list of circuits, which speeds things up. + probs_for_point = self.helper_sim.bulk_probs(ckt_layout) + #probs_for_point = helper_sim.bulk_probs(circuits) + + #print(probs_for_point) + + #Iterate through and add weight terms. + outcome_labels= probs_for_point[circuits[0]].keys() + weighted_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for ckt in circuits: + for lbl in outcome_labels: + weighted_probs[ckt][lbl] = probs_for_point[ckt][lbl] * weights[j] + + weighted_probs_for_point.append(weighted_probs) + + #reset the model to it's original value + self.model.from_vector(orig_vec) + + #print(len(weighted_probs_for_point)) + + #Aggregate all of the probability values into a final_result + averaged_probs = {ckt:{lbl:0 for lbl in outcome_labels} for ckt in circuits} + + for prob_dict in weighted_probs_for_point: + for ckt in circuits: + for lbl in outcome_labels: + averaged_probs[ckt][lbl] += prob_dict[ckt][lbl] + + #return the averaged probabilities: + if return_layout: + return averaged_probs, ckt_layout + else: + return averaged_probs + + def bulk_dprobs(self, circuits, resource_alloc=None, smartc=None): + """ + Construct a dictionary containing the probability derivatives for an entire list of circuits. + + Parameters + ---------- + circuits : list of Circuits + The list of circuits. May also be a :class:`CircuitOutcomeProbabilityArrayLayout` + object containing pre-computed quantities that make this function run faster. + + resource_alloc : ResourceAllocation, optional + A resource allocation object describing the available resources and a strategy + for partitioning them. + + smartc : SmartCache, optional + A cache object to cache & use previously cached values inside this + function. + + Returns + ------- + dprobs : dictionary + A dictionary such that `dprobs[circuit]` is an ordered dictionary of + derivative arrays (one element per differentiated parameter) whose + keys are outcome labels + """ + + #If _compute_circuit_outcome_probability_derivatives is implemented, use it! + #resource_alloc = layout.resource_alloc() + + eps = 1e-7 # hardcoded? +# if param_slice is None: +# param_slice = slice(0, self.model.num_params) +# param_indices = _slct.to_array(param_slice) + +# if dest_param_slice is None: +# dest_param_slice = slice(0, len(param_indices)) +# dest_param_indices = _slct.to_array(dest_param_slice) + +# iParamToFinal = {i: dest_param_indices[ii] for ii, i in enumerate(param_indices)} + + probs, ckt_layout = self.bulk_probs(circuits, return_layout= True) + orig_vec = self.model.to_vector().copy() + + #pull out the requisite outcome labels: + outcome_labels= probs[circuits[0]].keys() + + #initialize a dprobs array: + dprobs= {ckt: {lbl: _np.empty(self.model.num_params, dtype= _np.double) for lbl in outcome_labels} for ckt in circuits} + + for i in range(self.model.num_params): + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + probs2 = self.bulk_probs(circuits, cached_layout= ckt_layout) + + #need to parse this and construct the corresponding entries of the dprobs dict. + + for ckt in circuits: + for lbl in outcome_labels: + dprobs[ckt][lbl][i] = (probs2[ckt][lbl] - probs[ckt][lbl]) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec) + + return dprobs + + def add_helper_sim(self): + if self.model is not None: + self.helper_sim = _MatrixForwardSimulator(model=self.model) + + def create_layout(self, bulk_circuit_list, dataset=None, resource_alloc=None, + array_types=(), verbosity=1): + + if self.helper_sim is None: + self.add_helper_sim() + + return self.helper_sim.create_layout(bulk_circuit_list, dataset, resource_alloc, + array_types, verbosity=verbosity) + + #Add a bulk_fill_probs method that does something similar to bulk_probs but returns + #an array filled according to a layout instead of an outcome dictionary + def bulk_fill_probs(self, array_to_fill, layout): + + sigma_points , weights = self.sigma_points() + + #I need to identify where in the model vector the sampled hamiltonian weights + #need to go. + hamiltonian_model_indices = [] + for op in self.model.operations.values(): + if isinstance(op, _ComposedOp): + for subop in op.factorops: + if isinstance(subop, _ExpErrorgenOp): + if isinstance(subop.errorgen, _LFHLindbladErrorgen): + hamiltonian_model_indices.extend([op.gpindices.start+i for i in subop.errorgen.dev_dict.keys()]) + #hamiltonian_model_indices.extend(list(range(op.gpindices.start, op.gpindices.start+ len(subop.errorgen.means)))) + + orig_vec = self.model.to_vector() + + #If I have a layout then I should have a helper sim by this point + #if self.helper_sim is None: + # self.add_helper_sim() + + #create copies of the array being filled + temp_arrays = [array_to_fill.copy() for _ in range(sigma_points.shape[1])] + + weight_iter = _itertools.chain([0] ,_itertools.repeat(1, sigma_points.shape[1]-1)) + + for i, j in zip(range(sigma_points.shape[1]), weight_iter): + + vec = orig_vec.copy() + vec[hamiltonian_model_indices] = sigma_points[:,i] + + #set the model to this current vec value + self.model.from_vector(vec) + + #next simulate the model using the helper simulator: + self.helper_sim.bulk_fill_probs(temp_arrays[i], layout) + + #Iterate through and add weight terms. + temp_arrays[i] = weights[j]*temp_arrays[i] + + #reset the model to it's original value + self.model.from_vector(orig_vec) + + #Aggregate all of the probability values into a final_result + averaged_array = temp_arrays[0] + for temp_array in temp_arrays[1:]: + averaged_array += temp_array + + #print('averaged: ', averaged_array) + + array_to_fill[:]= averaged_array + #return averaged_array + + #Next I need a version of bulk_fill_dprobs: + def bulk_fill_dprobs(self, array_to_fill, layout, pr_array_to_fill=None): + + eps = 1e-7 # hardcoded? + + if pr_array_to_fill is not None: + self.bulk_fill_probs(pr_array_to_fill, layout) + probs = pr_array_to_fill.copy() + + else: + probs = layout.allocate_local_array('e', 'd') + self.bulk_fill_probs(probs, layout) + + orig_vec = self.model.to_vector().copy() + + for i in range(self.model.num_params): + probs2 = probs.copy() + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + self.bulk_fill_probs(probs2,layout) + + #now put this result into the array to be filled. + array_to_fill[: , i] =(probs2 - probs) / eps + + #restore the model to it's original value + self.model.from_vector(orig_vec) + + #print('dprobs: ', array_to_fill) + #return dprobs + + #add a version of bulk_fill_hprobs + + def bulk_fill_hprobs(self, array_to_fill, layout, + pr_array_to_fill=None, deriv1_array_to_fill=None, + deriv2_array_to_fill=None): + """ + Compute the outcome probability-Hessians for an entire list of circuits. + + Similar to `bulk_fill_probs(...)`, but fills a 3D array with + the Hessians for each circuit outcome probability. + + Parameters + ---------- + array_to_fill : numpy ndarray + an already-allocated numpy array of shape `(len(layout),M1,M2)` where + `M1` and `M2` are the number of selected model parameters (by `wrt_filter1` + and `wrt_filter2`). + + layout : CircuitOutcomeProbabilityArrayLayout + A layout for `array_to_fill`, describing what circuit outcome each + element corresponds to. Usually given by a prior call to :meth:`create_layout`. + + pr_mx_to_fill : numpy array, optional + when not None, an already-allocated length-`len(layout)` numpy array that is + filled with probabilities, just as in :meth:`bulk_fill_probs`. + + deriv1_array_to_fill : numpy array, optional + when not None, an already-allocated numpy array of shape `(len(layout),M1)` + that is filled with probability derivatives, similar to + :meth:`bulk_fill_dprobs` (see `array_to_fill` for a definition of `M1`). + + deriv2_array_to_fill : numpy array, optional + when not None, an already-allocated numpy array of shape `(len(layout),M2)` + that is filled with probability derivatives, similar to + :meth:`bulk_fill_dprobs` (see `array_to_fill` for a definition of `M2`). + + Returns + ------- + None + """ + + if pr_array_to_fill is not None: + self.bulk_fill_probs(pr_array_to_fill, layout) + if deriv1_array_to_fill is not None: + self.bulk_fill_dprobs(deriv1_array_to_fill, layout) + dprobs = deriv1_array_to_fill.copy() + if deriv2_array_to_fill is not None: + deriv2_array_to_fill[:, :] = deriv1_array_to_fill[:, :] + + eps = 1e-4 # hardcoded? + dprobs = _np.empty((len(layout), self.model.num_params), 'd') + self.bulk_fill_dprobs(dprobs, layout) + + dprobs2 = _np.empty((len(layout), self.model.num_params), 'd') + + orig_vec = self.model.to_vector().copy() + for i in range(self.model.num_params): + vec = orig_vec.copy() + vec[i] += eps + self.model.from_vector(vec, close=True) + self.bulk_fill_dprobs(dprobs2, layout) + array_to_fill[:, i, :] = (dprobs2 - dprobs) / eps + self.model.from_vector(orig_vec, close=True) +''' \ No newline at end of file diff --git a/pygsti/extras/lfh/lfhmodel.py b/pygsti/extras/lfh/lfhmodel.py new file mode 100644 index 000000000..5856d2c79 --- /dev/null +++ b/pygsti/extras/lfh/lfhmodel.py @@ -0,0 +1,80 @@ +""" +Defines the LFHExplicitOpModel class, an extension of ExplicitOpModel with +support for fluctuating Hamiltonian parameters. +""" +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** +''' + +import numpy as np +import collections as _collections +import itertools as _itertools +from pygsti.modelmembers.operations import LindbladErrorgen as _LindbladErrorgen +from pygsti.forwardsims import WeakForwardSimulator as _WeakForwardsimulator +from pygsti.forwardsims import MapForwardSimulator as _MapForwardSimulator +from pygsti.forwardsims import SimpleMapForwardSimulator as _SimpleMapForwardSimulator +from pygsti.forwardsims import MatrixForwardSimulator as _MatrixForwardSimulator +from pygsti.evotypes import Evotype as _Evotype +from pygsti.extras.lfh.lfherrorgen import LFHLindbladErrorgen as _LFHLindbladErrorgen + +from pygsti.forwardsims import ForwardSimulator as _ForwardSimulator +from pygsti.models import ExplicitOpModel as _ExplicitOpModel +from pygsti.modelmembers.operations import ExpErrorgenOp as _ExpErrorgenOp +from pygsti.modelmembers.operations import ComposedOp as _ComposedOp +from pygsti.baseobjs import statespace as _statespace +from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel +from pygsti.modelmembers.operations import LindbladParameterization +from pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock as _LindbladCoefficientBlock + +from scipy.special import roots_hermite +from math import sqrt, pi + +#I think the last thing I need is a model which can propagate through the resampling to any +#underlying LFHLindbladErrorgen objects +class LFHExplicitOpModel(_ExplicitOpModel): + + #Use the same init as explicit op model: + def __init__(self, state_space, basis="pp", default_gate_type="full", + default_prep_type="auto", default_povm_type="auto", + default_instrument_type="auto", prep_prefix="rho", effect_prefix="E", + gate_prefix="G", povm_prefix="M", instrument_prefix="I", + simulator="auto", evotype="default"): + + super().__init__(state_space, basis, default_gate_type, + default_prep_type, default_povm_type, + default_instrument_type, prep_prefix, effect_prefix, + gate_prefix, povm_prefix, instrument_prefix, + simulator, evotype) + + #Add a method that resamples the hamiltonian rates when requested. + def sample_hamiltonian_rates(self): + #loop through the elements of the operations dictionary + for member in self.operations.values(): + if isinstance(member, _ComposedOp): + #next check is any of the factor ops are exponentiated error generators + for factor in member.factorops: + if isinstance(factor, _ExpErrorgenOp): + #check to see if the error generator is a LFHLindbladErrorgen + if isinstance(factor.errorgen, _LFHLindbladErrorgen): + #then propagate the resampling through. + factor.errorgen.sample_hamiltonian_rates() + #update the representation of the exponentiated error generator + factor._update_rep() + + #Once I have updated the reps of the factors I need to reinitalize the rep of + #the composed op. + #print([op._rep for op in member.factorops]) + member._update_denserep() + #.reinit_factor_op_reps([op._rep for op in member.factorops]) + + #need a version of the circuit_layer_operator method which doesn't call clean_paramvec + #since I think this is what is causing the value of the + +''' \ No newline at end of file diff --git a/pygsti/modelmembers/errorgencontainer.py b/pygsti/modelmembers/errorgencontainer.py index 644b1dfdd..5c80665a4 100644 --- a/pygsti/modelmembers/errorgencontainer.py +++ b/pygsti/modelmembers/errorgencontainer.py @@ -23,7 +23,7 @@ class ErrorGeneratorContainer(object): def __init__(self, errorgen): self.errorgen = errorgen - def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): + def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this operation. @@ -46,6 +46,12 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): channel where all stochastic generators had this same coefficient. This is the value returned by :meth:`error_rates`. + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- lindblad_term_dict : dict @@ -61,19 +67,27 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): A Basis mapping the basis labels used in the keys of `lindblad_term_dict` to basis matrices. """ - return self.errorgen.coefficients(return_basis, logscale_nonham) + return self.errorgen.coefficients(return_basis, logscale_nonham, label_type) - def errorgen_coefficient_labels(self): + def errorgen_coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- tuple A tuple of (, [,, [, 0: raise ValueError("Cannot set any error generator coefficients on an op with no error generator!") - def errorgen_coefficient_labels(self): + def errorgen_coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. @@ -479,7 +529,7 @@ def errorgen_coefficients_array_deriv_wrt_params(self): """ return _np.empty((0, self.num_params), 'd') - def error_rates(self): + def error_rates(self, label_type): """ Constructs a dictionary of the error rates associated with this operation. @@ -502,6 +552,14 @@ def error_rates(self): rates is not necessarily the error rate of the overall channel. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- lindblad_term_dict : dict diff --git a/pygsti/modelmembers/operations/composederrorgen.py b/pygsti/modelmembers/operations/composederrorgen.py index 9459e1be5..f9c321c52 100644 --- a/pygsti/modelmembers/operations/composederrorgen.py +++ b/pygsti/modelmembers/operations/composederrorgen.py @@ -21,6 +21,7 @@ from pygsti.evotypes import Evotype as _Evotype from pygsti.baseobjs import statespace as _statespace from pygsti.baseobjs.basis import ExplicitBasis as _ExplicitBasis +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel from pygsti.tools import matrixtools as _mt @@ -95,7 +96,7 @@ def _from_memoized_dict(cls, mm_dict, serial_memo): errgens_to_compose = [serial_memo[i] for i in mm_dict['submembers']] return cls(errgens_to_compose, mm_dict['evotype'], state_space) - def coefficients(self, return_basis=False, logscale_nonham=False): + def coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this error generator. @@ -118,6 +119,12 @@ def coefficients(self, return_basis=False, logscale_nonham=False): channel where all stochastic generators had this same coefficient. This is the value returned by :meth:`error_rates`. + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- Ltermdict : dict @@ -139,7 +146,7 @@ def coefficients(self, return_basis=False, logscale_nonham=False): constant_basis = None # the single same Basis used for every factor with a nonempty basis for eg in self.factors: - factor_coeffs = eg.coefficients(return_basis, logscale_nonham) + factor_coeffs = eg.coefficients(return_basis, logscale_nonham, label_type) if return_basis: ltdict, factor_basis = factor_coeffs @@ -184,17 +191,25 @@ def coefficients(self, return_basis=False, logscale_nonham=False): else: return Ltermdict - def coefficient_labels(self): + def coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- tuple A tuple of (, [,, [,)` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are integers starting at 0. Values are complex - coefficients. - basis : Basis - A Basis mapping the basis labels used in the - keys of `Ltermdict` to basis matrices. + embedded_coeffs : dict + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ - return self.embedded_op.coefficients(return_basis, logscale_nonham) + coeffs_to_embed = self.embedded_op.coefficients(return_basis, logscale_nonham, label_type) + + if coeffs_to_embed: + embedded_labels = self.coefficient_labels(label_type=label_type, identity_label=identity_label) + embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, coeffs_to_embed.values())} + else: + embedded_coeffs = dict() + + return embedded_coeffs - def coefficient_labels(self): + def coefficient_labels(self, label_type='global', identity_label='I'): """ The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. + Returns ------- tuple A tuple of (, [,)` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are real error rates except for the 2-basis-label - case. + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ - return self.coefficients(return_basis=False, logscale_nonham=True) + return self.coefficients(return_basis=False, logscale_nonham=True, label_type=label_type, identity_label=identity_label) def set_coefficients(self, lindblad_term_dict, action="update", logscale_nonham=False, truncate=True): """ @@ -223,14 +276,11 @@ def set_coefficients(self, lindblad_term_dict, action="update", logscale_nonham= Parameters ---------- lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are the coefficients of these error generators, - and should be real except for the 2-basis-label case. + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. Values are corresponding rates. action : {"update","add","reset"} How the values in `lindblad_term_dict` should be combined with existing @@ -255,7 +305,9 @@ def set_coefficients(self, lindblad_term_dict, action="update", logscale_nonham= ------- None """ - self.embedded_op.set_coefficients(lindblad_term_dict, action, logscale_nonham, truncate) + if lindblad_term_dict: + unembedded_coeffs = self._unembed_coeff_dict_labels(lindblad_term_dict) + self.embedded_op.set_coefficients(unembedded_coeffs, action, logscale_nonham, truncate) def set_error_rates(self, lindblad_term_dict, action="update"): """ @@ -353,6 +405,6 @@ def __str__(self): """ Return string representation """ s = "Embedded error generator with full dimension %d and state space %s\n" % (self.dim, self.state_space) s += " that embeds the following %d-dimensional operation into acting on the %s space\n" \ - % (self.embedded_op.dim, str(self.targetLabels)) + % (self.embedded_op.dim, str(self.target_labels)) s += str(self.embedded_op) return s diff --git a/pygsti/modelmembers/operations/embeddedop.py b/pygsti/modelmembers/operations/embeddedop.py index 0b1735eea..980038fbb 100644 --- a/pygsti/modelmembers/operations/embeddedop.py +++ b/pygsti/modelmembers/operations/embeddedop.py @@ -10,7 +10,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** -import collections as _collections import itertools as _itertools import numpy as _np @@ -19,6 +18,7 @@ from pygsti.modelmembers.operations.linearop import LinearOperator as _LinearOperator from pygsti.modelmembers import modelmember as _modelmember from pygsti.baseobjs.statespace import StateSpace as _StateSpace +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel class EmbeddedOp(_LinearOperator): @@ -58,6 +58,10 @@ def __init__(self, state_space, target_labels, operation_to_embed, allocated_to_ evotype = operation_to_embed._evotype rep = self._create_rep_object(evotype, state_space) + self._cached_embedded_errorgen_labels_global = None + self._cached_embedded_errorgen_labels_local = None + self._cached_embedded_label_identity_label = None + _LinearOperator.__init__(self, rep, evotype) self.init_gpindices(allocated_to_parent) # initialize our gpindices based on sub-members if self._rep_type == 'dense': self._update_denserep() @@ -551,7 +555,8 @@ def transform_inplace(self, s): # s and Sinv matrices... but haven't needed it yet. raise NotImplementedError("Cannot transform an EmbeddedOp yet...") - def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): + #TODO: I don't think the return_basis flag actually works atm. Maybe remove? + def errorgen_coefficients(self, return_basis=False, logscale_nonham=False, label_type='global', identity_label='I'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this operation. @@ -574,44 +579,101 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): channel where all stochastic generators had this same coefficient. This is the value returned by :meth:`error_rates`. + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. + Returns ------- - lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are integers starting at 0. Values are complex - coefficients. - basis : Basis - A Basis mapping the basis labels used in the - keys of `lindblad_term_dict` to basis matrices. + embedded_coeffs : dict + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ #*** Note: this function is nearly identical to EmbeddedErrorgen.coefficients() *** - embedded_coeffs = self.embedded_op.errorgen_coefficients(return_basis, logscale_nonham) - if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, - self.target_labels)} - embedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in embedded_coeffs.items()} + coeffs_to_embed = self.embedded_op.errorgen_coefficients(return_basis, logscale_nonham, label_type) + + if coeffs_to_embed: + embedded_labels = self.errorgen_coefficient_labels(label_type=label_type, identity_label=identity_label) + embedded_coeffs = {lbl:val for lbl, val in zip(embedded_labels, coeffs_to_embed.values())} + else: + embedded_coeffs = dict() + return embedded_coeffs - def errorgen_coefficient_labels(self): + def errorgen_coefficient_labels(self, label_type='global', identity_label='I'): """ The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. + Returns ------- tuple A tuple of (, [,0: + if isinstance(labels_to_embed[0], _GlobalElementaryErrorgenLabel): + mapdict = {loc: tgt for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, + self.target_labels)} + embedded_labels = tuple([k.map_state_space_labels(mapdict) for k in labels_to_embed]) + self._cached_embedded_errorgen_labels_global = embedded_labels + elif isinstance(labels_to_embed[0], _LocalElementaryErrorgenLabel): + #use different embedding scheme for local labels + embedded_labels = [] + base_label = [identity_label for _ in range(self.state_space.num_qudits)] + for lbl in labels_to_embed: + new_bels = [] + for bel in lbl.basis_element_labels: + base_label = [identity_label for _ in range(self.state_space.num_qudits)] + for target, pauli in zip(self.target_labels, bel): + base_label[target] = pauli + new_bels.append(''.join(base_label)) + embedded_labels.append(_LocalElementaryErrorgenLabel(lbl.errorgen_type, tuple(new_bels))) + embedded_labels = tuple(embedded_labels) + self._cached_embedded_errorgen_labels_local = embedded_labels + self._cached_embedded_label_identity_label = identity_label + else: + raise ValueError(f'Invalid error generator label type {labels_to_embed[0]}') + else: + embedded_labels = tuple() return embedded_labels def errorgen_coefficients_array(self): @@ -643,7 +705,7 @@ def errorgen_coefficients_array_deriv_wrt_params(self): """ return self.embedded_op.errorgen_coefficients_array_deriv_wrt_params() - def error_rates(self): + def error_rates(self, label_type='global', identity_label='I'): """ Constructs a dictionary of the error rates associated with this operation. @@ -666,19 +728,31 @@ def error_rates(self): rates is not necessarily the error rate of the overall channel. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + + identity_label : str, optional (default 'I') + An optional string specifying the basis element label for the + identity. Used when label_type is 'local' to allow for embedding + local basis element labels into the appropriate higher dimensional + space. Only change when using a basis for which 'I' does not denote + the identity. + Returns ------- lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are real error rates except for the 2-basis-label - case. + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ - return self.errorgen_coefficients(return_basis=False, logscale_nonham=True) + return self.errorgen_coefficients(return_basis=False, logscale_nonham=True, label_type=label_type, identity_label=identity_label) def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscale_nonham=False, truncate=True): """ @@ -690,14 +764,11 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal Parameters ---------- lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - or `"A"` (Affine). Hamiltonian and Affine terms always have a - single basis label (so key is a 2-tuple) whereas Stochastic tuples - have 1 basis label to indicate a *diagonal* term and otherwise have - 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Values are the coefficients of these error generators, - and should be real except for the 2-basis-label case. + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. Values are corresponding rates. action : {"update","add","reset"} How the values in `lindblad_term_dict` should be combined with existing @@ -722,17 +793,47 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal ------- None """ - if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: - mapdict = {tgt: loc for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, - self.target_labels)} - unembedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in lindblad_term_dict.items()} - else: - unembedded_coeffs = lindblad_term_dict - - self.embedded_op.set_errorgen_coefficients(unembedded_coeffs, action, logscale_nonham, truncate) - if self._rep_type == 'dense': self._update_denserep() - self.dirty = True - + #determine is we need to unembed the error generator labels in lindblad_term_dict. + if lindblad_term_dict: + unembedded_coeffs = self._unembed_coeff_dict_labels(lindblad_term_dict) + self.embedded_op.set_errorgen_coefficients(unembedded_coeffs, action, logscale_nonham, truncate) + if self._rep_type == 'dense': self._update_denserep() + self.dirty = True + + def _unembed_coeff_dict_labels(self, lindblad_term_dict): + """ + Helper function encapsulating unembedding logic for error generator labels. + Returns a new dictionary of error generator coefficient rate with unembedded labels. + """ + first_coeff_lbl = next(iter(lindblad_term_dict)) + if isinstance(first_coeff_lbl, _GlobalElementaryErrorgenLabel): + if self.target_labels != self.embedded_op.state_space.sole_tensor_product_block_labels: + mapdict = {tgt: loc for loc, tgt in zip(self.embedded_op.state_space.sole_tensor_product_block_labels, + self.target_labels)} + unembedded_coeffs = {k.map_state_space_labels(mapdict): v for k, v in lindblad_term_dict.items()} + else: + unembedded_coeffs = lindblad_term_dict + elif isinstance(first_coeff_lbl, _LocalElementaryErrorgenLabel): + #if the length of the basis element labels are the same as the length of this + #embedded op's target labels then assume those are associated. + if len(first_coeff_lbl.basis_element_labels[0]) == len(self.target_labels): + unembedded_coeffs = lindblad_term_dict + #if the length is equal to the number of qudits then we need to unembed. + elif len(first_coeff_lbl.basis_element_labels[0]) == self.state_space.num_qudits: + unembedded_labels = list(lindblad_term_dict.keys()) + for lbl in unembedded_labels: + new_bels = [] + for bel in lbl.basis_element_labels: + new_bels.append("".join(bel[target] for target in self.target_labels)) + lbl.basis_element_labels = tuple(new_bels) + unembedded_coeffs = {lbl:val for lbl, val in zip(unembedded_labels, lindblad_term_dict.values())} + else: + msg = "Could not parse error generator labels. Expected either length equal to this embedded op's"\ + +" target_labels or equal to the number of qudits." + raise ValueError(msg) + return unembedded_coeffs + + def set_error_rates(self, lindblad_term_dict, action="update"): """ Sets the coeffcients of terms in the error generator of this operation. diff --git a/pygsti/modelmembers/operations/lindbladcoefficients.py b/pygsti/modelmembers/operations/lindbladcoefficients.py index c19986626..f50625218 100644 --- a/pygsti/modelmembers/operations/lindbladcoefficients.py +++ b/pygsti/modelmembers/operations/lindbladcoefficients.py @@ -87,6 +87,11 @@ def __init__(self, block_type, basis, basis_element_labels=None, initial_block_d self._cache_mx = _np.zeros((len(self._bel_labels), len(self._bel_labels)), 'complex') \ if self._block_type == 'other' else None + #this would get set to True in the very next method call anyway + self._coefficients_need_update = True + self._cached_elementary_errorgens = None + self._cached_elementary_errorgen_indices = None + self._set_block_data(initial_block_data, truncate) def _set_block_data(self, block_data, truncate): @@ -115,6 +120,11 @@ def _set_block_data(self, block_data, truncate): self._truncate_block_data(truncate) + #set a flag to indicate that the coefficients (as returned by elementary_errorgens) + #need to be updated. + self._coefficients_need_update = True + + @property def basis_element_labels(self): return self._bel_labels @@ -136,7 +146,7 @@ def num_params(self): def create_lindblad_term_superoperators(self, mx_basis='pp', sparse="auto", include_1norms=False, flat=False): """ - Compute the superoperator-generators corresponding to the Lindblad coefficiens in this block. + Compute the superoperator-generators corresponding to the Lindblad coefficients in this block. TODO: docstring update Returns @@ -335,7 +345,6 @@ def i_im(a, b): return pio + (b * num_bels + a) return Lterms - #TODO: could cache this and update only when needed (would need to add dirty flag logic) @property def elementary_errorgen_indices(self): """ @@ -380,6 +389,9 @@ def elementary_errorgen_indices(self): # this coefficient block's coefficients that product the given (by the key) # elementary error generator. Values are lists of (c_i, index_i) pairs, # such that the given elementary generator == sum_i c_i * coefficients_in_flattened_block[index_i] + if not self._coefficients_need_update and self._cached_elementary_errorgen_indices is not None: + return self._cached_elementary_errorgen_indices + from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LEEL elem_errgen_indices = _collections.OrderedDict() @@ -412,6 +424,8 @@ def elementary_errorgen_indices(self): else: raise ValueError("Internal error: invalid block type!") + self._cached_elementary_errorgen_indices = elem_errgen_indices + return elem_errgen_indices @property @@ -457,7 +471,6 @@ def _block_data_indices(self): return block_data_indices - #TODO: could cache this and update only when needed (would need to add dirty flag logic) @property def elementary_errorgens(self): """ @@ -479,7 +492,10 @@ def elementary_errorgens(self): Specifies `block_data` as a linear combination of elementary error generators. Keys are :class:`LocalElementaryErrorgenLabel` objects and values are floats. """ - elementary_errorgens = _collections.OrderedDict() + if not self._coefficients_need_update and self._cached_elementary_errorgens is not None: + return self._cached_elementary_errorgens + + elementary_errorgens = dict() eeg_indices = self.elementary_errorgen_indices flat_data = self.block_data.ravel() @@ -487,6 +503,9 @@ def elementary_errorgens(self): val = _np.sum([coeff * flat_data[index] for coeff, index in linear_combo]) elementary_errorgens[eeg_lbl] = _np.real_if_close(val).item() # item() -> scalar #set_basis_el(lbl, basis[lbl]) # REMOVE + #cache the error generator dictionary for future use + self._cached_elementary_errorgens = elementary_errorgens + self._coefficients_need_update = False return elementary_errorgens @@ -512,6 +531,10 @@ def set_elementary_errorgens(self, elementary_errorgens, on_missing='ignore', tr self.block_data[(slice(None, None),) * self.block_data.ndim] = flat_data.reshape(self.block_data.shape) self._truncate_block_data(truncate) + #set a flag to indicate that the coefficients (as returned by elementary_errorgens) + #need to be updated. + self._coefficients_need_update = True + return unused_elementary_errorgens def set_from_errorgen_projections(self, errorgen, errorgen_basis='pp', return_projected_errorgen=False, @@ -523,6 +546,11 @@ def set_from_errorgen_projections(self, errorgen, errorgen_basis='pp', return_pr elementary_errorgens = out[0] if return_projected_errorgen else out unused = self.set_elementary_errorgens(elementary_errorgens, on_missing='raise', truncate=truncate) assert(len(unused) == 0) + + #set a flag to indicate that the coefficients (as returned by elementary_errorgens) + #need to be updated. + self._coefficients_need_update = True + return out[1] if return_projected_errorgen else None @property @@ -780,6 +808,7 @@ def from_vector(self, v): v : numpy.ndarray A 1D array of real parameter values. """ + if self._param_mode == 'static': assert(len(v) == 0), "'static' paramterized blocks should have zero parameters!" return # self.block_data remains the same - no update @@ -859,6 +888,10 @@ def from_vector(self, v): % (self._param_mode, self._block_type)) else: raise ValueError("Internal error: invalid block type!") + + #set a flag to indicate that the coefficients (as returned by elementary_errorgens) + #need to be updated. + self._coefficients_need_update = True def deriv_wrt_params(self, v=None): """ diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index bc8d7f807..cd9e9e4b4 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -11,29 +11,23 @@ #*************************************************************************************************** import warnings as _warnings -import collections as _collections -import copy as _copy import itertools as _itertools import numpy as _np import scipy.linalg as _spl import scipy.sparse as _sps -import scipy.sparse.linalg as _spsl from pygsti.baseobjs.opcalc import compact_deriv as _compact_deriv, \ bulk_eval_compact_polynomials_complex as _bulk_eval_compact_polynomials_complex, \ abs_sum_bulk_eval_compact_polynomials_complex as _abs_sum_bulk_eval_compact_polynomials_complex from pygsti.modelmembers.operations.linearop import LinearOperator as _LinearOperator from pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock as _LindbladCoefficientBlock -from pygsti.modelmembers import term as _term from pygsti.evotypes import Evotype as _Evotype from pygsti.baseobjs import statespace as _statespace -from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis -from pygsti.baseobjs.polynomial import Polynomial as _Polynomial +from pygsti.baseobjs.basis import Basis as _Basis from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel -from pygsti.tools import basistools as _bt from pygsti.tools import matrixtools as _mt from pygsti.tools import optools as _ot @@ -42,53 +36,65 @@ class LindbladErrorgen(_LinearOperator): """ - An Lindblad-form error generator. - - This error generator consisting of terms that, with appropriate constraints - ensurse that the resulting (after exponentiation) operation/layer operation - is CPTP. These terms can be divided into "Hamiltonian"-type terms, which - map rho -> i[H,rho] and "non-Hamiltonian"/"other"-type terms, which map rho - -> A rho B + 0.5*(ABrho + rhoAB). + A class for representing noisy quantum operations using Lindblad error generators. """ - _generators_cache = {} # a custom cache for _init_generators method calls - @classmethod - def from_operation_matrix_and_blocks(cls, op_matrix, lindblad_coefficient_blocks, lindblad_basis='auto', + def from_operation_matrix_and_blocks(cls, op_matrix, lindblad_coefficient_blocks, elementary_errorgen_basis='PP', mx_basis='pp', truncate=True, evotype="default", state_space=None): - """ - Create a Lindblad-parameterized error generator from an operation matrix and coefficient blocks. + Creates a Lindblad-parameterized error generator from an operation and a set + of `LindbladCoefficientBlock`s. + + Here "operation" means the exponentiated error generator, so this method + essentially takes the matrix log of `op_matrix` and constructs an error + generator from this by subsequently projecting this constructed error generator + onto the specified `LindbladCoefficientBlock`s. Note that since these blocks are + user specified this projection may not be complete. E.g. passing in a general operation consisting + of non-trivial 'H', 'S', 'C' and 'A' generators together with a single `LindbladCoefficientBlock` + for storing 'H' terms will result in an 'H'-only generator. Parameters ---------- op_matrix : numpy array or SciPy sparse matrix - A square 2D array that gives the raw operation matrix, assumed to be in the `mx_basis` basis. - The shape of this array sets the dimension of the operation. - - lindblad_coefficient_blocks : list - A list of Lindblad coefficient blocks to set from the error generator projections. - - lindblad_basis : {'auto', 'PP', 'std', 'gm', 'qt'}, optional - The basis used for Lindblad terms. Default is 'auto'. - - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object, optional - The basis for this error generator's linear mapping. Default is 'pp'. + a square 2D array that gives the raw operation matrix, assumed to + be in the `mx_basis` basis, to parameterize. The shape of this + array sets the dimension of the operation. + + lindblad_coefficient_blocks : list of `LindbladCoefficientBlocks` + List of `LindbladCoefficientBlocks` for storing the input error generator data + given the projections onto these blocks. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... truncate : bool, optional - Whether to truncate the projections onto the Lindblad terms in order to meet constraints. - Default is True. (e.g. to preserve CPTP) when necessary. If False, then an error is thrown - when the Lindblad terms don't conform to the constrains. + Whether to truncate the projections onto the Lindblad terms in + order to meet constraints (e.g. to preserve CPTP) when necessary. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. - evotype : {"default", "densitymx", "svterm", "cterm"}, optional - The evolution type of the error generator being constructed. Default is "default". + evotype : str or `Evotype`, optional (default 'default') + The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. + `"densitymx"` means usual Lioville density-matrix-vector propagation + via matrix-vector products. `"svterm"` denotes state-vector term- + based evolution (action of operation is obtained by evaluating the rank-1 + terms up to some order). `"cterm"` is similar but uses Clifford operation + action on stabilizer states. - state_space : StateSpace, optional - The state space for the error generator. Default is None. + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. Returns ------- - LindbladErrorgen + `LindbladErrorgen` """ sparseOp = _sps.issparse(op_matrix) @@ -110,10 +116,10 @@ def from_operation_matrix_and_blocks(cls, op_matrix, lindblad_coefficient_blocks mx_basis, "logGTi") for blk in lindblad_coefficient_blocks: blk.set_from_errorgen_projections(errgenMx, mx_basis, truncate=truncate) - return cls(lindblad_coefficient_blocks, lindblad_basis, mx_basis, evotype, state_space) + return cls(lindblad_coefficient_blocks, elementary_errorgen_basis, mx_basis, evotype, state_space) @classmethod - def from_operation_matrix(cls, op_matrix, parameterization='CPTP', lindblad_basis='PP', + def from_operation_matrix(cls, op_matrix, parameterization='CPTPLND', elementary_errorgen_basis='PP', mx_basis='pp', truncate=True, evotype="default", state_space=None): """ Creates a Lindblad-parameterized error generator from an operation. @@ -125,31 +131,56 @@ def from_operation_matrix(cls, op_matrix, parameterization='CPTP', lindblad_basi Parameters ---------- op_matrix : numpy array or SciPy sparse matrix - A square 2D array that gives the raw operation matrix, assumed to be in the `mx_basis` basis. - The shape of this array sets the dimension of the operation. - - parameterization : str, optional (default 'CPTP') - Describes how the Lindblad coefficients/projections relate to the error generator's parameter values. - Default is "CPTP". Supported strings are those castable to `LindbladParameterization`. See - `LindbladParameterization` for supported options. - - lindblad_basis : {'PP', 'std', 'gm', 'qt'}, optional - The basis used for Lindblad terms. Default is 'PP'. - - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object, optional - The basis for this error generator's linear mapping. Default is 'pp'. Allowed - values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - and Qutrit (qt) (or a custom basis object). + a square 2D array that gives the raw operation matrix, assumed to + be in the `mx_basis` basis, to parameterize. The shape of this + array sets the dimension of the operation. + + parameterization: `LindbladParameterization` or str castable to `LindbladParameterization`, optional (default 'auto') + Either an instance of `LindbladParameterization` or a string castable to a + valid `LindbladParameterization`. This object specifies the internal parameterizations + and coefficient blocks required for storing the data associated with this error generator + and requisite for enforcing appropriate constraints. See documentation of `LindbladParameterization` + for more details, but common examples include: + + - 'auto': A minimal parameterization is inferred based on the contents of `elementary_errorgens`. + See the `minimal_from_elementary_errorgens` method of `LindbladParameterization` for more. + - 'CPTPLND': A CPTP-constrained error generator parameterization + - 'GLND': General Lindbladian, a non-CP (but still TP) constrained parameterization + - 'H': A Hamiltonian-only parameterization (no 'S', 'C', or 'A' terms) + - 'H+S': A Hamiltonian + Stochastic only parameterization (no 'C' or 'A' terms). + + See aforementioned documentation for more details. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... truncate : bool, optional - Whether to truncate the projections onto the Lindblad terms in order to meet constraints. - Default is True. + Whether to truncate the projections onto the Lindblad terms in + order to meet constraints (e.g. to preserve CPTP) when necessary. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. - evotype : {"default", "densitymx", "svterm", "cterm"}, optional - The evolution type of the error generator being constructed. Default is "default". + evotype : str or `Evotype`, optional (default 'default') + The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. + `"densitymx"` means usual Lioville density-matrix-vector propagation + via matrix-vector products. `"svterm"` denotes state-vector term- + based evolution (action of operation is obtained by evaluating the rank-1 + terms up to some order). `"cterm"` is similar but uses Clifford operation + action on stabilizer states. - state_space : StateSpace, optional - The state space for the error generator. Default is None. + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. + + Returns + ------- + `LindbladErrorgen` """ #Compute an errorgen from the given op_matrix. Works with both @@ -172,142 +203,142 @@ def from_operation_matrix(cls, op_matrix, parameterization='CPTP', lindblad_basi else: errgenMx = _ot.error_generator(op_matrix, _np.identity(op_matrix.shape[0], 'd'), mx_basis, "logGTi") - return cls.from_error_generator(errgenMx, parameterization, lindblad_basis, + return cls.from_error_generator(errgenMx, parameterization, elementary_errorgen_basis, mx_basis, truncate, evotype, state_space=state_space) @classmethod - def from_error_generator(cls, errgen_or_dim, parameterization="CPTP", lindblad_basis='PP', mx_basis='pp', + def from_error_generator(cls, errgen_or_dim, parameterization="CPTPLND", elementary_errorgen_basis='PP', mx_basis='pp', truncate=True, evotype="default", state_space=None): """ - Create a Lindblad-parameterized error generator from an error generator matrix or dimension. - + Construct a new `LindbladErrorgen` instance instantiated using a dense numpy array or sparse + scipy array representation. + Parameters ---------- - errgen_or_dim : numpy array, SciPy sparse matrix, or int - A square 2D array that gives the full error generator or an integer specifying the dimension - of a zero error generator. - - parameterization : str, optional (default 'CPTP') - Describes how the Lindblad coefficients/projections relate to the error generator's parameter values. - Default is "CPTP". Supported strings are those castable to `LindbladParameterization`. See - `LindbladParameterization` for supported options. - - lindblad_basis : {'PP', 'std', 'gm', 'qt'}, optional - The basis used for Lindblad terms. Default is 'PP'. - - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object, optional - The basis for this error generator's linear mapping. Default is 'pp'. Allowed - values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), - and Qutrit (qt) (or a custom basis object). + errgen_or_dim : numpy array or SciPy sparse matrix or int + A square 2D array that gives the full error generator, or an integer specifying the dimension + of an empty (all-zeros) 2D array to construct. + + parameterization: `LindbladParameterization` or str castable to `LindbladParameterization`, optional (default 'auto') + Either an instance of `LindbladParameterization` or a string castable to a + valid `LindbladParameterization`. This object specifies the internal parameterizations + and coefficient blocks required for storing the data associated with this error generator + and requisite for enforcing appropriate constraints. See documentation of `LindbladParameterization` + for more details, but common examples include: + + - 'auto': A minimal parameterization is inferred based on the contents of `elementary_errorgens`. + See the `minimal_from_elementary_errorgens` method of `LindbladParameterization` for more. + - 'CPTPLND': A CPTP-constrained error generator parameterization + - 'GLND': General Lindbladian, a non-CP (but still TP) constrained parameterization + - 'H': A Hamiltonian-only parameterization (no 'S', 'C', or 'A' terms) + - 'H+S': A Hamiltonian + Stochastic only parameterization (no 'C' or 'A' terms). + + See aforementioned documentation for more details. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... truncate : bool, optional - Whether to truncate the projections onto the Lindblad terms in order to meet constraints. - Default is True. + Whether to truncate the projections onto the Lindblad terms in + order to meet constraints (e.g. to preserve CPTP) when necessary. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. - evotype : {"default", "densitymx", "svterm", "cterm"}, optional - The evolution type of the error generator being constructed. Default is "default". + evotype : str or `Evotype`, optional (default 'default') + The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. + `"densitymx"` means usual Lioville density-matrix-vector propagation + via matrix-vector products. `"svterm"` denotes state-vector term- + based evolution (action of operation is obtained by evaluating the rank-1 + terms up to some order). `"cterm"` is similar but uses Clifford operation + action on stabilizer states. - state_space : StateSpace, optional - The state space for the error generator. Default is None. + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. Returns ------- - LindbladErrorgen + `LindbladErrorgen` """ errgen = _np.zeros((errgen_or_dim, errgen_or_dim), 'd') \ if isinstance(errgen_or_dim, (int, _np.int64)) else errgen_or_dim - return cls._from_error_generator(errgen, parameterization, lindblad_basis, + return cls._from_error_generator(errgen, parameterization, elementary_errorgen_basis, mx_basis, truncate, evotype, state_space) @classmethod def from_error_generator_and_blocks(cls, errgen_or_dim, lindblad_coefficient_blocks, - lindblad_basis='PP', mx_basis='pp', + elementary_errorgen_basis='PP', mx_basis='pp', truncate=True, evotype="default", state_space=None): """ - Create a Lindblad-parameterized error generator from an error generator matrix or dimension and coefficient blocks. - - Parameters - ---------- - errgen_or_dim : numpy array, SciPy sparse matrix, or int - A square 2D array that gives the full error generator or an integer specifying the dimension - of a zero error generator. + Creates a Lindblad-parameterized error generator from an operation and a set + of `LindbladCoefficientBlock`s. - lindblad_coefficient_blocks : list - A list of Lindblad coefficient blocks to set from the error generator projections. - - lindblad_basis : {'PP', 'std', 'gm', 'qt'}, optional - The basis used for Lindblad terms. Default is 'PP'. - - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object, optional - The basis for this error generator's linear mapping. Default is 'pp'. - - truncate : bool, optional - Whether to truncate the projections onto the Lindblad terms in order to meet constraints. - Default is True. - - evotype : {"default", "densitymx", "svterm", "cterm"}, optional - The evolution type of the error generator being constructed. Default is "default". - - state_space : StateSpace, optional - The state space for the error generator. Default is None. - - Returns - ------- - LindbladErrorgen - """ - errgenMx = _np.zeros((errgen_or_dim, errgen_or_dim), 'd') \ - if isinstance(errgen_or_dim, (int, _np.int64)) else errgen_or_dim - for blk in lindblad_coefficient_blocks: - blk.set_from_errorgen_projections(errgenMx, mx_basis, truncate=truncate) - return cls(lindblad_coefficient_blocks, lindblad_basis, mx_basis, evotype, state_space) - - @classmethod - def _from_error_generator(cls, errgen, parameterization="CPTP", lindblad_basis="PP", - mx_basis="pp", truncate=True, evotype="default", state_space=None): - """ - Create a Lindblad-form error generator from an error generator matrix and a basis. - - The basis specifies how to decompose (project) the error generator. + Here "operation" means the exponentiated error generator, so this method + essentially takes the matrix log of `op_matrix` and constructs an error + generator from this by subsequently projecting this constructed error generator + onto the specified `LindbladCoefficientBlock`s. Note that since these blocks are + user specified this projection may not be complete. E.g. passing in a general operation consisting + of non-trivial 'H', 'S', 'C' and 'A' generators together with a single `LindbladCoefficientBlock` + for storing 'H' terms will result in an 'H'-only generator. Parameters ---------- - errgen : numpy array or SciPy sparse matrix - a square 2D array that gives the full error generator. The shape of - this array sets the dimension of the operator. The projections of - this quantity are closely related to the parameters of the error - generator (they may not be exactly equal if parameterization = 'CPTP'). - - lindblad_basis : {'PP', 'std', 'gm', 'qt'}, optional - The basis used for Lindblad terms. Default is 'PP'. - - parameterization : str, optional (default 'CPTP') - Describes how the Lindblad coefficients/projections relate to the error generator's parameter values. - Default is "CPTP". Supported strings are those castable to `LindbladParameterization`. See - `LindbladParameterization` for supported options. - - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object, optional - The basis for this error generator's linear mapping. Default is 'pp'. + errgen_or_dim : numpy array or SciPy sparse matrix or int + A square 2D array that gives the full error generator, or an integer specifying the dimension + of an empty (all-zeros) 2D array to construct. + + lindblad_coefficient_blocks : list of `LindbladCoefficientBlocks` + List of `LindbladCoefficientBlocks` for storing the input error generator data + given the projections onto these blocks. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... truncate : bool, optional Whether to truncate the projections onto the Lindblad terms in order to meet constraints (e.g. to preserve CPTP) when necessary. - If False, then an error is thrown when the given `errgen` cannot - be realized by the specified set of Lindblad projections. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. - evotype : {"densitymx","svterm","cterm"} + evotype : str or `Evotype`, optional (default 'default') The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. `"densitymx"` means usual Lioville density-matrix-vector propagation via matrix-vector products. `"svterm"` denotes state-vector term- based evolution (action of operation is obtained by evaluating the rank-1 terms up to some order). `"cterm"` is similar but uses Clifford operation action on stabilizer states. - state_space : StateSpace, optional - The state space for the error generator. Default is None. + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. Returns ------- - LindbladErrorgen + `LindbladErrorgen` + """ + errgenMx = _np.zeros((errgen_or_dim, errgen_or_dim), 'd') \ + if isinstance(errgen_or_dim, (int, _np.int64)) else errgen_or_dim + for blk in lindblad_coefficient_blocks: + blk.set_from_errorgen_projections(errgenMx, mx_basis, truncate=truncate) + return cls(lindblad_coefficient_blocks, elementary_errorgen_basis, mx_basis, evotype, state_space) + + @classmethod + def _from_error_generator(cls, errgen, parameterization="CPTPLND", elementary_errorgen_basis="PP", + mx_basis="pp", truncate=True, evotype="default", state_space=None): + """ + See `from_error_generator` for more details. """ dim = errgen.shape[0] @@ -318,12 +349,12 @@ def _from_error_generator(cls, errgen, parameterization="CPTP", lindblad_basis=" # given to us are sparse or not and make them all consistent # (maybe this is needed by lindblad_errorgen_projections call below?) sparse = None - if isinstance(lindblad_basis, _Basis): - sparse = lindblad_basis.sparse + if isinstance(elementary_errorgen_basis, _Basis): + sparse = elementary_errorgen_basis.sparse else: - if isinstance(lindblad_basis, str): sparse = _sps.issparse(errgen) - elif len(lindblad_basis) > 0: sparse = _sps.issparse(lindblad_basis[0]) - lindblad_basis = _Basis.cast(lindblad_basis, dim, sparse=sparse) + if isinstance(elementary_errorgen_basis, str): sparse = _sps.issparse(errgen) + elif len(elementary_errorgen_basis) > 0: sparse = _sps.issparse(elementary_errorgen_basis[0]) + elementary_errorgen_basis = _Basis.cast(elementary_errorgen_basis, dim, sparse=sparse) if sparse is None: sparse = False # the default @@ -338,66 +369,90 @@ def _from_error_generator(cls, errgen, parameterization="CPTP", lindblad_basis=" # Create blocks based on bases along - no specific errorgen labels blocks = [] for blk_type, blk_param_mode in zip(parameterization.block_types, parameterization.param_modes): - blk = _LindbladCoefficientBlock(blk_type, lindblad_basis, param_mode=blk_param_mode) + blk = _LindbladCoefficientBlock(blk_type, elementary_errorgen_basis, param_mode=blk_param_mode) blk.set_from_errorgen_projections(errgen, matrix_basis, truncate=truncate) blocks.append(blk) return cls(blocks, "auto", mx_basis, evotype, state_space) + #TODO: Need to make the construction robust to empty elementary_errorgens dictionaries. @classmethod def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto', elementary_errorgen_basis='PP', - mx_basis="pp", truncate=True, evotype="default", state_space=None): + mx_basis="pp", truncate=True, evotype="default", state_space=None): """ - Create a Lindblad-parameterized error generator from elementary error generators. - + Construct a new `LindbladErrorgen` instance instantiated using a dictionary of elementary error generator + coefficients and rates. + Parameters ---------- elementary_errorgens : dict - A dictionary of elementary error generators. Keys are labels specifying the type and basis - elements of the elementary error generators, and values are the corresponding coefficients. - Keys are `(termType, basisLabel1, )` tuples, where `termType` is - `"H"` (Hamiltonian), `"S"` (Stochastic), `"C"` (Correlation) or `"A"` (Active). - Hamiltonian and Stochastic terms always have a single basis label (so key is a 2-tuple) - whereas C and A tuples have 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are pauli strings. Values are coefficients. - - parameterization : str, optional (default 'CPTP') - Describes how the Lindblad coefficients/projections relate to the error generator's parameter values. - Default is "CPTP". Supported strings are those castable to `LindbladParameterization`. See - `LindbladParameterization` for supported options. - - elementary_errorgen_basis : {'PP', 'std', 'gm', 'qt'}, optional - The basis used for the elementary error generators. Default is 'PP'. - - mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object, optional - The basis for this error generator's linear mapping. Default is 'pp'. + A dictionary whose keys are `ElementaryErrogenLabel`s and whose values are corresponding error generator rates. + + parameterization: `LindbladParameterization` or str castable to `LindbladParameterization`, optional (default 'auto') + Either an instance of `LindbladParameterization` or a string castable to a + valid `LindbladParameterization`. This object specifies the internal parameterizations + and coefficient blocks required for storing the data associated with this error generator + and requisite for enforcing appropriate constraints. See documentation of `LindbladParameterization` + for more details, but common examples include: + + - 'auto': A minimal parameterization is inferred based on the contents of `elementary_errorgens`. + See the `minimal_from_elementary_errorgens` method of `LindbladParameterization` for more. + - 'CPTPLND': A CPTP-constrained error generator parameterization + - 'GLND': General Lindbladian, a non-CP (but still TP) constrained parameterization + - 'H': A Hamiltonian-only parameterization (no 'S', 'C', or 'A' terms) + - 'H+S': A Hamiltonian + Stochastic only parameterization (no 'C' or 'A' terms). + + See aforementioned documentation for more details. + + elementary_errorgen_basis: str or 'Basis', optional (default 'PP') + The basis is used to construct the elementary error generator basis elements. + Should be compatible with the basis element subscripts labeling the coefficients + in `elementary_errorgens`. Most commonly 'PP', the unnormalized Pauli-product basis. + + mx_basis : str or Basis object + The basis in which to return matrix representation of the constructed error generator. + E.g. 'pp', 'gm', 'std', etc... truncate : bool, optional - Whether to truncate the projections onto the Lindblad terms in order to meet constraints. - Default is True. + Whether to truncate the projections onto the Lindblad terms in + order to meet constraints (e.g. to preserve CPTP) when necessary. + If False, then an error is thrown when the given error generator cannot + be realized by the specified parameterization. - evotype : {"default", "densitymx", "svterm", "cterm"}, optional - The evolution type of the error generator being constructed. Default is "default". + evotype : str or `Evotype`, optional (default 'default') + The evolution type of the error generator being constructed. + When specifying 'default' the evotype is automatically inferred/chosen. + `"densitymx"` means usual Lioville density-matrix-vector propagation + via matrix-vector products. `"svterm"` denotes state-vector term- + based evolution (action of operation is obtained by evaluating the rank-1 + terms up to some order). `"cterm"` is similar but uses Clifford operation + action on stabilizer states. - state_space : StateSpace, optional - The state space for the error generator. Default is None. + state_space : `StateSpace` or castable to `StateSpace` + The state space upon which this error generator acts. Returns ------- - LindbladErrorgen + `LindbladErrorgen` """ - + if state_space is None: + raise ValueError('Must specify a state space when using `from_elementary_errorgens`.') state_space = _statespace.StateSpace.cast(state_space) dim = state_space.dim # Store superop dimension basis = _Basis.cast(elementary_errorgen_basis, dim) - #convert elementary errorgen labels to *local* labels (ok to specify w/global labels) - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = state_space.sole_tensor_product_block_labels # first TPB labels == all labels - elementary_errorgens = _collections.OrderedDict( - [(_LocalElementaryErrorgenLabel.cast(lbl, sslbls, identity_label_1Q), val) - for lbl, val in elementary_errorgens.items()]) - + #check the first key, if local then no need to convert, otherwise convert from global. + if elementary_errorgens: + first_key = next(iter(elementary_errorgens)) + if isinstance(first_key, (_GlobalElementaryErrorgenLabel, tuple)): + #convert keys to local elementary errorgen labels (the same as those used by the coefficient blocks): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elementary_errorgens = {_LocalElementaryErrorgenLabel.cast(k, sslbls, identity_label_1Q): v + for k, v in elementary_errorgens.items()} + else: + assert isinstance(first_key, _LocalElementaryErrorgenLabel), 'Unsupported error generator label type as key.' + parameterization = LindbladParameterization.minimal_from_elementary_errorgens(elementary_errorgens) \ if parameterization == "auto" else LindbladParameterization.cast(parameterization) @@ -410,14 +465,15 @@ def from_elementary_errorgens(cls, elementary_errorgens, parameterization='auto' blocks = [] for blk_type, blk_param_mode in zip(parameterization.block_types, parameterization.param_modes): relevant_eegs = eegs_by_typ[blk_type] # KeyError => unrecognized block type! - bels = sorted(set(_itertools.chain(*[lbl.basis_element_labels for lbl in relevant_eegs.keys()]))) - blk = _LindbladCoefficientBlock(blk_type, basis, bels, param_mode=blk_param_mode) - blk.set_elementary_errorgens(relevant_eegs, truncate=truncate) - blocks.append(blk) - + #only add block type is relevant_eegs is not empty. + if relevant_eegs: + bels = sorted(set(_itertools.chain(*[lbl.basis_element_labels for lbl in relevant_eegs.keys()]))) + blk = _LindbladCoefficientBlock(blk_type, basis, bels, param_mode=blk_param_mode) + blk.set_elementary_errorgens(relevant_eegs, truncate=truncate) + blocks.append(blk) return cls(blocks, basis, mx_basis, evotype, state_space) - def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis='pp', + def __init__(self, lindblad_coefficient_blocks, elementary_errorgen_basis='auto', mx_basis='pp', evotype="default", state_space=None): """ @@ -467,15 +523,18 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis= raise ValueError("Evotype doesn't support any of the representations a LindbladErrorgen requires.") sparse_bases = bool(self._rep_type == 'sparse superop') # we use sparse bases iff we have a sparse rep - if lindblad_basis == "auto": + state_space = _statespace.StateSpace.cast(state_space) + dim = state_space.dim # Store superop dimension + + if elementary_errorgen_basis == "auto": assert(all([(blk._basis is not None) for blk in lindblad_coefficient_blocks])), \ - "When `lindblad_basis == 'auto'`, the supplied coefficient blocks must have valid bases!" - default_lindblad_basis = None + "When `elementary_errorgen_basis == 'auto'`, the supplied coefficient blocks must have valid bases!" + default_elementary_errorgen_basis = None else: - default_lindblad_basis = _Basis.cast(lindblad_basis, dim, sparse=sparse_bases) + default_elementary_errorgen_basis = _Basis.cast(elementary_errorgen_basis, dim, sparse=sparse_bases) for blk in lindblad_coefficient_blocks: - if blk._basis is None: blk._basis = default_lindblad_basis + if blk._basis is None: blk._basis = default_elementary_errorgen_basis elif blk._basis.sparse != sparse_bases: # update block bases to desired sparsity if needed blk._basis = blk._basis.with_sparsity(sparse_bases) @@ -502,7 +561,8 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis= for blk in lindblad_coefficient_blocks] #combine all of the linblad term superoperators across the blocks to a single concatenated tensor. - self.combined_lindblad_term_superops = _np.concatenate([Lterm_superops for (Lterm_superops, _) in self.lindblad_term_superops_and_1norms], axis=0) + self.combined_lindblad_term_superops = _np.concatenate([Lterm_superops for (Lterm_superops, _) in + self.lindblad_term_superops_and_1norms], axis=0) #Create a representation of the type chosen above: if self._rep_type == 'lindblad errorgen': @@ -538,8 +598,6 @@ def __init__(self, lindblad_coefficient_blocks, lindblad_basis='auto', mx_basis= [blk.param_labels for blk in self.coefficient_blocks])), dtype=object) assert(self._onenorm_upbound is not None) # _update_rep should set this - # Done with __init__(...) - def _init_terms(self, coefficient_blocks, max_polynomial_vars): Lterms = []; off = 0 @@ -559,21 +617,6 @@ def _init_terms(self, coefficient_blocks, max_polynomial_vars): ctape = _np.empty(0, complex) coeffs_as_compact_polys = (vtape, ctape) - #DEBUG TODO REMOVE (and make into test) - check norm of rank-1 terms - # (Note: doesn't work for Clifford terms, which have no .base): - # rho =OP=> coeff * A rho B - # want to bound | coeff * Tr(E Op rho) | = | coeff | * | | - # so A and B should be unitary so that | | <= 1 - # but typically these are unitaries / (sqrt(2)*nqubits) - #import bpdb; bpdb.set_trace() - #scale = 1.0 - #for t in Lterms: - # for op in t._rep.pre_ops: - # test = _np.dot(_np.conjugate(scale * op.base.T), scale * op.base) - # assert(_np.allclose(test, _np.identity(test.shape[0], 'd'))) - # for op in t._rep.post_ops: - # test = _np.dot(_np.conjugate(scale * op.base.T), scale * op.base) - # assert(_np.allclose(test, _np.identity(test.shape[0], 'd'))) return Lterms, coeffs_as_compact_polys def _set_params_from_matrix(self, errgen, truncate): @@ -593,7 +636,6 @@ def _set_params_from_matrix(self, errgen, truncate): off += blk.num_params self._update_rep() - #assert(_np.allclose(errgen, self.to_dense())) # DEBUG def _update_rep(self): """ @@ -796,23 +838,6 @@ def total_term_magnitude_deriv(self): assert(_np.linalg.norm(_np.imag(ret)) < 1e-8) return ret.real - #DEBUG - #ret2 = _np.empty(self.num_params,'d') - #eps = 1e-8 - #orig_vec = self.to_vector().copy() - #f0 = sum([abs(coeff) for coeff in coeff_values]) - #for i in range(self.num_params): - # v = orig_vec.copy() - # v[i] += eps - # new_coeff_values = _bulk_eval_compact_polynomials_complex(vtape, ctape, v, (len(self.Lterms),)) - # ret2[i] = ( sum([abs(coeff) for coeff in new_coeff_values]) - f0 ) / eps - - #test3 = _np.linalg.norm(ret-ret2) - #print("TEST3 = ",test3) - #if test3 > 10.0: - # import bpdb; bpdb.set_trace() - #return ret - @property def num_params(self): """ @@ -871,13 +896,13 @@ def from_vector(self, v, close=False, dirty_value=True): self._update_rep() self.dirty = dirty_value - def coefficients(self, return_basis=False, logscale_nonham=False): + def coefficients(self, return_basis=False, logscale_nonham=False, label_type='global'): """ Constructs a dictionary of the Lindblad-error-generator coefficients of this error generator. - Note that these are not necessarily the parameter values, as these - coefficients are generally functions of the parameters (so as to keep - the coefficients positive, for instance). + Note that these are not necessarily the parameter values as those parameter value + correspond to the internal representation utilized, which may be constructed to + enforce positivity constraints, for instance. Parameters ---------- @@ -893,34 +918,53 @@ def coefficients(self, return_basis=False, logscale_nonham=False): the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. This is the value returned by :meth:`error_rates`. + + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. Returns ------- - Ltermdict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - `"C"` (Correlation) or `"A"` (Active). Hamiltonian and Stochastic terms - always have a single basis label (so key is a 2-tuple) whereas C and A tuples - have 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are pauli strings. Values are coefficients. + elem_errorgens : dict + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. + basis : Basis A Basis mapping the basis labels used in the - keys of `Ltermdict` to basis matrices. + keys of `elem_errorgens` to basis matrices. """ - elem_errorgens = {} - bases = set() - for blk in self.coefficient_blocks: - elem_errorgens.update(blk.elementary_errorgens) - if blk._basis not in bases: - bases.add(blk._basis) - - #convert to *global* elementary errorgen labels - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels - elem_errorgens = _collections.OrderedDict( - [(_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q), value) - for local_eeg_lbl, value in elem_errorgens.items()]) + assert label_type=='global' or label_type=='local', "Allowed values of label_type are 'global' and 'local'." + elem_errorgens = {} + + if return_basis: + bases = set() + for blk in self.coefficient_blocks: + elem_errorgens.update(blk.elementary_errorgens) + if blk._basis not in bases: + bases.add(blk._basis) + else: #split this off to avoid expensive basis hashing and equivalence checking if not needed. + for blk in self.coefficient_blocks: + elem_errorgens.update(blk.elementary_errorgens) + + first_key = next(iter(elem_errorgens)) + if label_type=='global' and isinstance(first_key, _LocalElementaryErrorgenLabel): + #convert to *global* elementary errorgen labels + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elem_errorgens = {_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q): value + for local_eeg_lbl, value in elem_errorgens.items()} + elif label_type=='local' and isinstance(first_key, _GlobalElementaryErrorgenLabel): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elem_errorgens = {_LocalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q): value + for local_eeg_lbl, value in elem_errorgens.items()} + if logscale_nonham: dim = self.dim for k in elem_errorgens.keys(): @@ -935,26 +979,47 @@ def coefficients(self, return_basis=False, logscale_nonham=False): else: return elem_errorgens - def coefficient_labels(self): + def coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- - tuple - A tuple of (, [,)` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ labels = [] for blk in self.coefficient_blocks: #labels.extend(blk.coefficent_labels) labels.extend(blk.elementary_errorgens.keys()) - #convert to *global* elementary errorgen labels - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels - return tuple([_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q) - for local_eeg_lbl in labels]) + first_label = labels[0] if len(labels)>0 else None + + if label_type == 'global' and isinstance(first_label, _LocalElementaryErrorgenLabel): + #convert to *global* elementary errorgen labels + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + labels = [_GlobalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q) + for local_eeg_lbl in labels] + elif label_type=='local' and isinstance(first_label, _GlobalElementaryErrorgenLabel): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + labels = [_LocalElementaryErrorgenLabel.cast(local_eeg_lbl, sslbls, identity_label_1Q) + for local_eeg_lbl in labels] + return tuple(labels) + def coefficients_array(self): """ @@ -971,7 +1036,6 @@ def coefficients_array(self): combination of standard error generators that is this error generator. """ # Note: ret will be complex if any block's data is - #ret = _np.concatenate([blk.block_data.flat for blk in self.coefficient_blocks]) ret = _np.concatenate([list(blk.elementary_errorgens.values()) for blk in self.coefficient_blocks]) if self._coefficient_weights is not None: ret *= self._coefficient_weights @@ -1003,7 +1067,7 @@ def coefficients_array_deriv_wrt_params(self): ret *= self._coefficient_weights[:, None] return ret - def error_rates(self): + def error_rates(self, label_type='global'): """ Constructs a dictionary of the error rates associated with this error generator. @@ -1028,35 +1092,42 @@ def error_rates(self): rates is not necessarily the error rate of the overall channel. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- - Ltermdict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - `"C"` (Correlation) or `"A"` (Active). Hamiltonian and Stochastic terms - always have a single basis label (so key is a 2-tuple) whereas C and A tuples - have 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are pauli strings. Values are coefficients. - Values are real error rates except for the 2-basis-label case. + elem_errorgens : dict + Keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. """ - return self.coefficients(return_basis=False, logscale_nonham=True) + return self.coefficients(return_basis=False, logscale_nonham=True, label_type=label_type) def set_coefficients(self, elementary_errorgens, action="update", logscale_nonham=False, truncate=True): """ Sets the coefficients of elementary error generator terms in this error generator. - The dictionary `lindblad_term_dict` has tuple-keys describing the type - of term and the basis elements used to construct it, e.g. `('H','X')`. + The dictionary `elementary_errorgens` has keys which are `ElementaryErrorgenLabel`s + describing the type of term and the basis elements used to construct it, e.g. `('H','X')`, + together with the corresponding rates. Parameters ---------- - lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - `"C"` (Correlation) or `"A"` (Active). Hamiltonian and Stochastic terms - always have a single basis label (so key is a 2-tuple) whereas C and A tuples - have 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are pauli strings. + elementary_errorgens : dict + Dictionary whose keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient, and whose + values are corresponding error generator rates for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. action : {"update","add","reset"} How the values in `lindblad_term_dict` should be combined with existing @@ -1081,12 +1152,16 @@ def set_coefficients(self, elementary_errorgens, action="update", logscale_nonha ------- None """ - #convert keys to local elementary errorgen labels (the same as those used by the coefficient blocks): - identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? - sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels - elem_errorgens = _collections.OrderedDict( - [(_LocalElementaryErrorgenLabel.cast(k, sslbls, identity_label_1Q), v) - for k, v in elementary_errorgens.items()]) + #check the first key, if local then no need to convert, otherwise convert from global. + first_key = next(iter(elementary_errorgens)) + if isinstance(first_key, (_GlobalElementaryErrorgenLabel, tuple)): + #convert keys to local elementary errorgen labels (the same as those used by the coefficient blocks): + identity_label_1Q = 'I' # maybe we could get this from a 1Q basis somewhere? + sslbls = self.state_space.sole_tensor_product_block_labels # take first TPB labels as all labels + elem_errorgens = {_LocalElementaryErrorgenLabel.cast(k, sslbls, identity_label_1Q): v + for k, v in elementary_errorgens.items()} + else: + assert isinstance(first_key, _LocalElementaryErrorgenLabel), 'Unsupported error generator label type as key.' processed = set() # keep track of which entries in elem_errorgens have been processed by a block for blk in self.coefficient_blocks: @@ -1132,18 +1207,22 @@ def set_error_rates(self, elementary_errorgens, action="update"): Sets the coeffcients of elementary error generator terms in this error generator. Coefficients are set so that the contributions of the resulting - channel's error rate are given by the values in `lindblad_term_dict`. + channel's error rate are given by the values in `elementary_errorgens`. See :meth:`error_rates` for more details. Parameters ---------- - lindblad_term_dict : dict - Keys are `(termType, basisLabel1, )` - tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), - `"C"` (Correlation) or `"A"` (Active). Hamiltonian and Stochastic terms - always have a single basis label (so key is a 2-tuple) whereas C and A tuples - have 2 basis labels to specify off-diagonal non-Hamiltonian Lindblad - terms. Basis labels are pauli strings. + elementary_errorgens : dict + Dictionary whose keys are instances of `ElementaryErrorgenLabel`, which wrap the + `(termType, basisLabel1, )` information for each coefficient, and whose + values are corresponding error generator rates for each coefficient. + Where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), + `"C"`(Correlation) or `"A"` (Affine). Hamiltonian and S terms always have a + single basis label while 'C' and 'A' terms have two. + + action : {"update","add","reset"} + How the values in `lindblad_term_dict` should be combined with existing + error-generator coefficients. action : {"update","add","reset"} How the values in `lindblad_term_dict` should be combined with existing @@ -1155,21 +1234,13 @@ def set_error_rates(self, elementary_errorgens, action="update"): """ self.set_coefficients(elementary_errorgens, action, logscale_nonham=True) - def coefficient_weights(self, weights): + def coefficient_weights(self): """ - Get the non-default coefficient weights. - - This method returns a dictionary of coefficient weights that are not equal to the default value of 1.0. - - Parameters - ---------- - weights : dict - A dictionary where keys are coefficient labels and values are the corresponding weights. - - Returns - ------- - dict - A dictionary where keys are coefficient labels and values are the corresponding weights that are not equal to 1.0. + Return a dictionary whose keys are error generator coefficients, as given by + :method:`coefficient_labels`, and whose value are the weights that have been specified + for those coefficients. Note that weight != rate! These weights are used in conjunction + with certain penalty factor options available in the construction of objective functions + for parameters estimation purposes, and are not generally used outside of that setting. """ coeff_labels = self.coefficient_labels() lbl_lookup = {i: lbl for i, lbl in enumerate(coeff_labels)} @@ -1185,10 +1256,12 @@ def coefficient_weights(self, weights): def set_coefficient_weights(self, weights): """ - Set the coefficient weights. - - This method sets the weights for the coefficients of the error generator. If the coefficient weights - array is not initialized, it initializes it to an array of ones. + Set the weights for the error generator coefficients in this error generator using a + dictionary whose keys are error generator coefficients, as given by + :method:`coefficient_labels`, and whose value are the weights that have been specified + for those coefficients. Note that weight != rate! These weights are used in conjunction + with certain penalty factor options available in the construction of objective functions + for parameters estimation purposes, and are not generally used outside of that setting. Parameters ---------- @@ -1244,6 +1317,7 @@ def transform_inplace(self, s): raise ValueError("Invalid transform for this LindbladErrorgen: type %s" % str(type(s))) + def deriv_wrt_params(self, wrt_filter=None): """ The element-wise derivative this operation. @@ -1391,6 +1465,7 @@ def _from_memoized_dict(cls, mm_dict, serial_memo): state_space = _statespace.StateSpace.from_nice_serialization(mm_dict['state_space']) coeff_blocks = [_LindbladCoefficientBlock.from_nice_serialization(blk) for blk in mm_dict['coefficient_blocks']] + ret = cls(coeff_blocks, 'auto', mx_basis, mm_dict['evotype'], state_space) #reinitialize the paramvals attribute from memoized dict. Rederiving this from the block data has #been leading to sign ambiguity on deserialization. @@ -1408,10 +1483,10 @@ def __str__(self): (self.dim, self.num_params) return s - def _oneline_contents(self): + def _oneline_contents(self, label_type='global'): """ Summarizes the contents of this object in a single line. Does not summarize submembers. """ MAXLEN = 60 - coeff_dict = self.coefficients(); s = "" + coeff_dict = self.coefficients(label_type=label_type); s = "" for lbl, val in coeff_dict.items(): if len(s) > MAXLEN: s += "..."; break @@ -1479,10 +1554,8 @@ def minimal_from_elementary_errorgens(cls, errs): if any([lbl.errorgen_type == 'S' for lbl in errs]): paramtypes.append('S') if any([lbl.errorgen_type == 'C' for lbl in errs]): paramtypes.append('C') if any([lbl.errorgen_type == 'A' for lbl in errs]): paramtypes.append('A') - #if any([lbl.errorgen_type == 'S' and len(lbl.basis_element_labels) == 2 for lbl in errs]): - # # parameterization must be "CPTP" if there are any ('S',b1,b2) keys if 'C' in paramtypes or 'A' in paramtypes: - parameterization = "CPTP" + parameterization = "CPTPLND" else: parameterization = '+'.join(paramtypes) return cls.cast(parameterization) diff --git a/pygsti/modelmembers/povms/composedpovm.py b/pygsti/modelmembers/povms/composedpovm.py index 0e3aec77a..345ccbfbe 100644 --- a/pygsti/modelmembers/povms/composedpovm.py +++ b/pygsti/modelmembers/povms/composedpovm.py @@ -370,17 +370,25 @@ def __str__(self): % (len(self)) return s - def errorgen_coefficient_labels(self): + def errorgen_coefficient_labels(self, label_type='global'): """ The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. + Parameters + ---------- + label_type : str, optional (default 'global') + String specifying which type of `ElementaryErrorgenLabel` to use + as the keys for the returned dictionary. Allowed options are + 'global' for `GlobalElementaryErrorgenLabel` and 'local' for + `LocalElementaryErrorgenLabel`. + Returns ------- tuple A tuple of (, [,, [, 0: diff --git a/pygsti/tools/basistools.py b/pygsti/tools/basistools.py index b8cdcfc22..0d8bdea85 100644 --- a/pygsti/tools/basistools.py +++ b/pygsti/tools/basistools.py @@ -10,7 +10,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. #*************************************************************************************************** -from functools import partial +from functools import partial, lru_cache import numpy as _np @@ -18,7 +18,7 @@ # from ..baseobjs.basis import Basis, BuiltinBasis, DirectSumBasis from pygsti.baseobjs import basis as _basis - +@lru_cache(maxsize=1) def basis_matrices(name_or_basis, dim, sparse=False): """ Get the elements of the specifed basis-type which spans the density-matrix space given by `dim`. @@ -118,14 +118,14 @@ def is_sparse_basis(name_or_basis): return False -def change_basis(mx, from_basis, to_basis): +def change_basis(mx, from_basis, to_basis, expect_real=True): """ Convert a operation matrix from one basis of a density matrix space to another. Parameters ---------- mx : numpy array - The operation matrix (a 2D square array) in the `from_basis` basis. + The operation matrix (a 2D square array or 1D vector) in the `from_basis` basis. from_basis: {'std', 'gm', 'pp', 'qt'} or Basis object The source basis. Allowed values are Matrix-unit (std), Gell-Mann (gm), @@ -134,6 +134,10 @@ def change_basis(mx, from_basis, to_basis): to_basis : {'std', 'gm', 'pp', 'qt'} or Basis object The destination basis. Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt) (or a custom basis object). + + expect_real : bool, optional (default True) + Optional flag specifying whether it is expected that the returned + array in the new basis is real valued. Default is True. Returns ------- @@ -196,7 +200,7 @@ def change_basis(mx, from_basis, to_basis): if not to_basis.real: return ret - if _mt.safe_norm(ret, 'imag') > 1e-8: + if expect_real and _mt.safe_norm(ret, 'imag') > 1e-8: raise ValueError("Array has non-zero imaginary part (%g) after basis change (%s to %s)!\n%s" % (_mt.safe_norm(ret, 'imag'), from_basis, to_basis, ret)) return ret.real diff --git a/pygsti/tools/errgenproptools.py b/pygsti/tools/errgenproptools.py new file mode 100644 index 000000000..018a78235 --- /dev/null +++ b/pygsti/tools/errgenproptools.py @@ -0,0 +1,7709 @@ +""" +Tools for the propagation of error generators through circuits. +""" +#*************************************************************************************************** +# Copyright 2015, 2019, 2025 National Technology & Engineering Solutions of Sandia, LLC (NTESS). +# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights +# in this software. +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +# in compliance with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory. +#*************************************************************************************************** + +import warnings +try: + import stim +except ImportError: + msg = "Stim is required for use of the error generator propagation tools module, " \ + "and it does not appear to be installed. If you intend to use this module please update" \ + " your environment." + warnings.warn(msg) + +import numpy as _np +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GEEL, LocalElementaryErrorgenLabel as _LEEL +from pygsti.baseobjs import QubitSpace as _QubitSpace +from pygsti.baseobjs.basis import BuiltinBasis as _BuiltinBasis +from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis as _CompleteElementaryErrorgenBasis, ExplicitElementaryErrorgenBasis as _ExplicitElementaryErrorgenBasis +from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE +from pygsti.modelmembers.operations import LindbladErrorgen as _LinbladErrorgen +from pygsti.circuits import Circuit as _Circuit +from pygsti.tools.optools import create_elementary_errorgen_nqudit, state_to_dmvec +from functools import reduce +from itertools import chain, product +from math import factorial + +def errgen_coeff_label_to_stim_pauli_strs(err_gen_coeff_label, num_qubits): + """ + Converts an input `GlobalElementaryErrorgenLabel` to a tuple of stim.PauliString + objects, padded with an appropriate number of identities. + + Parameters + ---------- + err_gen_coeff_label : `GlobalElementaryErrorgenLabel` or `LocalElementaryErrorgenLabel` + The error generator coefficient label to construct the tuple of pauli + strings for. + + num_qubits : int + Number of total qubits to use for the Pauli strings. Used to determine + the number of identities added when padding. + + Returns + ------- + tuple of stim.PauliString + A tuple of either length 1 (for H and S) or length 2 (for C and A) + whose entries are stim.PauliString representations of the indices for the + input error generator label, padded with an appropriate number of identities + given the support of the error generator label. + + """ + + if isinstance(err_gen_coeff_label, _LEEL): + return tuple([stim.PauliString(bel) for bel in err_gen_coeff_label.basis_element_labels]) + + elif isinstance(err_gen_coeff_label, _GEEL): + #the coefficient label is a tuple with 3 elements. + #The first element is the error generator type. + #the second element is a tuple of paulis either of length 1 or 2 depending on the error gen type. + #the third element is a tuple of subsystem labels. + errorgen_typ = err_gen_coeff_label.errorgen_type + pauli_lbls = err_gen_coeff_label.basis_element_labels + sslbls = err_gen_coeff_label.support + + #double check that the number of qubits specified is greater than or equal to the length of the + #basis element labels. + #assert len(pauli_lbls) >= num_qubits, 'Specified `num_qubits` is less than the length of the basis element labels.' + + if errorgen_typ == 'H' or errorgen_typ == 'S': + pauli_string = num_qubits*['I'] + pauli_lbl = pauli_lbls[0] + for i, sslbl in enumerate(sslbls): + pauli_string[sslbl] = pauli_lbl[i] + pauli_string = stim.PauliString(''.join(pauli_string)) + return (pauli_string,) + elif errorgen_typ == 'C' or errorgen_typ == 'A': + pauli_strings = [] + for pauli_lbl in pauli_lbls: #iterate through both pauli labels + pauli_string = num_qubits*['I'] + for i, sslbl in enumerate(sslbls): + pauli_string[sslbl] = pauli_lbl[i] + pauli_strings.append(stim.PauliString(''.join(pauli_string))) + return tuple(pauli_strings) + else: + raise ValueError(f'Unsupported error generator type {errorgen_typ}') + else: + raise ValueError('Only `GlobalElementaryErrorgenLabel and LocalElementaryErrorgenLabel is currently supported.') + +#------- Error Generator Math -------------# + +def bch_approximation(errgen_layer_1, errgen_layer_2, bch_order=1, truncation_threshold=1e-14): + """ + Apply the BCH approximation at the given order to combine the input dictionaries + of error generator rates. + + Parameters + ---------- + errgen_layer_1 : dict + Dictionary of the error generator coefficients and rates for a circuit layer. + The error generator coefficients are represented using LocalStimErrorgenLabel. + + errgen_layer_2 : list of dicts + See errgen_layer_1. + + bch_order : int, optional (default 1) + Order of the BCH approximation to use. Currently support for up to fifth order. + + truncation_threshold : float, optional (default 1e-14) + Threshold for which any error generators with magnitudes below this value + are truncated. + + Returns + ------- + combined_errgen_layer : dict + A dictionary with the same general structure as `errgen_layer_1` and `errgen_layer_2`, but with the + rates combined according to the selected order of the BCH approximation. + + """ + new_errorgen_layer=[] + for curr_order in range(0, bch_order): + #add first order terms into new layer + if curr_order == 0: + #Get a combined set of error generator coefficient labels for these two + #dictionaries. + current_combined_coeff_lbls = {key: None for key in chain(errgen_layer_1, errgen_layer_2)} + + first_order_dict = dict() + #loop through the combined set of coefficient labels and add them to the new dictionary for the current BCH + #approximation order. If present in both we sum the rates. + for coeff_lbl in current_combined_coeff_lbls: + #only add to the first order dictionary if the coefficient exceeds the truncation threshold. + first_order_rate = errgen_layer_1.get(coeff_lbl, 0) + errgen_layer_2.get(coeff_lbl, 0) + if abs(first_order_rate) > truncation_threshold: + first_order_dict[coeff_lbl] = first_order_rate + + #allow short circuiting to avoid an expensive bunch of recombination logic when only using first order BCH + #which will likely be a common use case. + if bch_order==1: + return first_order_dict + new_errorgen_layer.append(first_order_dict) + + #second order BCH terms. + # (1/2)*[X,Y] + elif curr_order == 1: + #calculate the pairwise commutators between each of the error generators in current_errgen_dict_1 and + #current_errgen_dict_2. + #precompute an identity string for comparisons in commutator calculations. + if errgen_layer_1: + identity = stim.PauliString('I'*len(next(iter(errgen_layer_1)).basis_element_labels[0])) + commuted_errgen_list = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in errgen_layer_2.items(): + #get the list of error generator labels + weight = .5*error1_val*error2_val + #avoid computing commutators which will be effectively zero. + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight= weight, identity=identity) + commuted_errgen_list.extend(commuted_errgen_sublist) + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + second_order_comm_dict = {error_tuple[0]: 0 for error_tuple in commuted_errgen_list} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list: + second_order_comm_dict[error_tuple[0]] += error_tuple[1] + + #truncate any terms which are below the truncation threshold following + #aggregation. + second_order_comm_dict = {key: val for key, val in second_order_comm_dict.items() if abs(val)>truncation_threshold} + + new_errorgen_layer.append(second_order_comm_dict) + + #third order BCH terms + # (1/12)*([X,[X,Y]] - [Y,[X,Y]]) + #TODO: Can make this more efficient by using linearity of commutators + elif curr_order == 2: + #we've already calculated (1/2)*[X,Y] in the previous order, so reuse this result. + #two different lists for the two different commutators so that we can more easily reuse + #this at higher order if needed. + commuted_errgen_list_1 = [] + commuted_errgen_list_2 = [] + for error1a, error1a_val in errgen_layer_1.items(): + for error2, error2_val in second_order_comm_dict.items(): + #only need a factor of 1/6 because new_errorgen_layer[1] is 1/2 the commutator + weighta = (1/6)*error1a_val*error2_val + + #avoid computing commutators which will be effectively zero. + if not abs(weighta) < truncation_threshold: + commuted_errgen_sublist = error_generator_commutator(error1a, error2, + weight=weighta, identity=identity) + commuted_errgen_list_1.extend(commuted_errgen_sublist) + + for error1b, error1b_val in errgen_layer_2.items(): + for error2, error2_val in second_order_comm_dict.items(): + #only need a factor of -1/6 because new_errorgen_layer[1] is 1/2 the commutator + weightb = -(1/6)*error1b_val*error2_val + if not abs(weightb) < truncation_threshold: + commuted_errgen_sublist = error_generator_commutator(error1b, error2, + weight=weightb, identity=identity) + commuted_errgen_list_2.extend(commuted_errgen_sublist) + + + #turn the two new commuted error generator lists into dictionaries. + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + third_order_comm_dict_1 = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_1} + third_order_comm_dict_2 = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_2} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list_1: + third_order_comm_dict_1[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_2: + third_order_comm_dict_2[error_tuple[0]] += error_tuple[1] + + #finally sum these two dictionaries, keeping only terms which are greater than the threshold. + third_order_comm_dict = dict() + current_combined_coeff_lbls = {key: None for key in chain(third_order_comm_dict_1, third_order_comm_dict_2)} + for lbl in current_combined_coeff_lbls: + third_order_rate = third_order_comm_dict_1.get(lbl, 0) + third_order_comm_dict_2.get(lbl, 0) + if abs(third_order_rate) > truncation_threshold: + third_order_comm_dict[lbl] = third_order_rate + new_errorgen_layer.append(third_order_comm_dict) + + #fourth order BCH terms + # -(1/24)*[Y,[X,[X,Y]]] + elif curr_order == 3: + #we've already calculated (1/12)*[X,[X,Y]] so reuse this result. + #this is stored in third_order_comm_dict_1 + commuted_errgen_list = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in third_order_comm_dict_1.items(): + #I *think* you can pick up at most around a factor of 8 from the commutator + #itself. Someone should validate that. Set this conservatively, but also + #avoid computing commutators which will be effectively zero. + #only need a factor of -1/2 because third_order_comm_dict_1 is 1/12 the nested commutator + weight = -.5*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list.extend(commuted_errgen_sublist) + + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + fourth_order_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list: + fourth_order_comm_dict[error_tuple[0]] += error_tuple[1] + + #drop any terms below the truncation threshold after aggregation + fourth_order_comm_dict = {key: val for key, val in fourth_order_comm_dict.items() if abs(val)>truncation_threshold} + new_errorgen_layer.append(fourth_order_comm_dict) + + #Note for fifth order and beyond we can save a bunch of commutators + #by using the results of https://doi.org/10.1016/j.laa.2003.09.010 + #Revisit this if going up to high-order ever becomes a regular computation. + #fifth-order BCH terms: + #-(1/720)*([X,F] - [Y, E]) + (1/360)*([Y,F] - [X,E]) + (1/120)*([Y,G] - [X,D]) + # Where: E = [Y,C]; F = [X,B]; G=[X,C] + # B = [X,[X,Y]]; C = [Y,[X,Y]]; D = [Y,[X,[X,Y]]] + # B, C and D have all been previously calculated (up to the leading constant). + # B is proportional to third_order_comm_dict_1, C is proportional to third_order_comm_dict_2 + # D is proportional to fourth_order_comm_dict + # This gives 9 new commutators to calculate (7 if you used linearity, and even fewer would be needed + # using the result from the paper above, but we won't here atm). + elif curr_order == 4: + B = third_order_comm_dict_1 #has a factor of 1/12 folded in already. + C = third_order_comm_dict_2 #has a factor of -1/12 folded in already. + D = fourth_order_comm_dict #has a factor of -1/24 folded in already. + #Compute the new commutators E, F and G as defined above. + #Start with E: + commuted_errgen_list_E = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in C.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_E.extend(commuted_errgen_sublist) + #Next F: + commuted_errgen_list_F = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in B.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_F.extend(commuted_errgen_sublist) + #Then G: + commuted_errgen_list_G = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in C.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_G.extend(commuted_errgen_sublist) + + #Turn the commutator lists into dictionaries: + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + E_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_E} + F_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_F} + G_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_G} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list_E: + E_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_F: + F_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_G: + G_comm_dict[error_tuple[0]] += error_tuple[1] + + #drop any terms below the truncation threshold after aggregation + E_comm_dict = {key: val for key, val in E_comm_dict.items() if abs(val)>truncation_threshold} + F_comm_dict = {key: val for key, val in F_comm_dict.items() if abs(val)>truncation_threshold} + G_comm_dict = {key: val for key, val in G_comm_dict.items() if abs(val)>truncation_threshold} + #-(1/720)*([X,F] - [Y, E]) + (1/360)*([Y,F] - [X,E]) + (1/120)*([Y,G] - [X,D]) + #Now do the next round of 6 commutators: [X,F], [Y,E], [Y,F], [X,E], [Y,G] and [X,D] + #We also need the following weight factors. F has a leading factor of (1/12) + #E and G have a leading factor of (-1/12). D has a leading factor of (-1/24) + #This gives the following additional weight multipliers: + #[X,F] = (-1/60); [Y,E] = (-1/60); [Y,F]= (1/30); [X,E]= (1/30); [Y,G] = (-1/10); [X,D] = (1/5) + + #[X,F]: + commuted_errgen_list_XF = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in F_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = -(1/60)*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_XF.extend(commuted_errgen_sublist) + #[Y,E]: + commuted_errgen_list_YE = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in E_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = -(1/60)*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_YE.extend(commuted_errgen_sublist) + #[Y,F]: + commuted_errgen_list_YF = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in F_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = (1/30)*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_YF.extend(commuted_errgen_sublist) + #[X,E]: + commuted_errgen_list_XE = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in E_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = (1/30)*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_XE.extend(commuted_errgen_sublist) + #[Y,G]: + commuted_errgen_list_YG = [] + for error1, error1_val in errgen_layer_2.items(): + for error2, error2_val in G_comm_dict.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = -.1*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_YG.extend(commuted_errgen_sublist) + #[X,D]: + commuted_errgen_list_XD = [] + for error1, error1_val in errgen_layer_1.items(): + for error2, error2_val in D.items(): + #Won't add any weight adjustments at this stage, will do that for next commutator. + weight = .2*error1_val*error2_val + if abs(weight) < truncation_threshold: + continue + commuted_errgen_sublist = error_generator_commutator(error1, error2, + weight=weight, identity=identity) + commuted_errgen_list_XD.extend(commuted_errgen_sublist) + + #Turn the commutator lists into dictionaries: + #loop through all of the elements of commuted_errorgen_list and instantiate a dictionary with the requisite keys. + XF_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_XF} + YE_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_YE} + YF_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_YF} + XE_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_XE} + YG_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_YG} + XD_comm_dict = {error_tuple[0]:0 for error_tuple in commuted_errgen_list_XD} + + #Add all of these error generators to the working dictionary of updated error generators and weights. + #There may be duplicates, which should be summed together. + for error_tuple in commuted_errgen_list_XF: + XF_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_YE: + YE_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_YF: + YF_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_XE: + XE_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_YG: + YG_comm_dict[error_tuple[0]] += error_tuple[1] + for error_tuple in commuted_errgen_list_XD: + XD_comm_dict[error_tuple[0]] += error_tuple[1] + + #finally sum these six dictionaries, keeping only terms which are greater than the threshold. + fifth_order_comm_dict = dict() + fifth_order_dicts = [XF_comm_dict, YE_comm_dict, YF_comm_dict, XE_comm_dict, YG_comm_dict, XD_comm_dict] + current_combined_coeff_lbls = {key: None for key in chain(*fifth_order_dicts)} + for lbl in current_combined_coeff_lbls: + fifth_order_rate = sum([comm_dict.get(lbl, 0) for comm_dict in fifth_order_dicts]) + if abs(fifth_order_rate) > truncation_threshold: + fifth_order_comm_dict[lbl] = fifth_order_rate + new_errorgen_layer.append(fifth_order_comm_dict) + + else: + raise NotImplementedError("Higher orders beyond fifth order are not implemented yet.") + + #Finally accumulate all of the dictionaries in new_errorgen_layer into a single one, summing overlapping terms. + errorgen_labels_by_order = [{key: None for key in order_dict} for order_dict in new_errorgen_layer] + complete_errorgen_labels = errorgen_labels_by_order[0] + for order_dict in errorgen_labels_by_order[1:]: + complete_errorgen_labels.update(order_dict) + + #initialize a dictionary with requisite keys + new_errorgen_layer_dict = {lbl: 0 for lbl in complete_errorgen_labels} + + for order_dict in new_errorgen_layer: + for lbl, rate in order_dict.items(): + new_errorgen_layer_dict[lbl] += rate.real + + #Future: Possibly do one last truncation pass in case any of the different order cancel out when aggregated? + + return new_errorgen_layer_dict + +def error_generator_commutator(errorgen_1, errorgen_2, flip_weight=False, weight=1.0, identity=None): + """ + Returns the commutator of two error generators. I.e. [errorgen_1, errorgen_2]. + + Parameters + ---------- + errorgen1 : `LocalStimErrorgenLabel` + First error generator. + + errorgen2 : `LocalStimErrorgenLabel` + Second error generator + + flip_weight : bool, optional (default False) + If True flip the sign of the input value of weight kwarg. + + weight : float, optional (default 1.0) + An optional weighting value to apply to the value of the commutator. + + identity : stim.PauliString, optional (default None) + An optional stim.PauliString to use for comparisons to the identity. + Passing in this kwarg isn't necessary, but can allow for reduced + stim.PauliString creation when calling this function many times for + improved efficiency. + + Returns + ------- + list of `LocalStimErrorgenLabel`s corresponding to the commutator of the two input error generators, + weighted by the specified value of `weight`. + """ + + errorgens=[] + + if flip_weight: + w= -weight + else: + w = weight + + errorgen_1_type = errorgen_1.errorgen_type + errorgen_2_type = errorgen_2.errorgen_type + + #The first basis element label is always well defined, + #the second we'll define only of the error generator is C or A type. + errorgen_1_bel_0 = errorgen_1.basis_element_labels[0] + errorgen_2_bel_0 = errorgen_2.basis_element_labels[0] + + if errorgen_1_type == 'C' or errorgen_1_type == 'A': + errorgen_1_bel_1 = errorgen_1.basis_element_labels[1] + if errorgen_2_type == 'C' or errorgen_2_type == 'A': + errorgen_2_bel_1 = errorgen_2.basis_element_labels[1] + + #create the identity stim.PauliString for later comparisons. + if identity is None: + identity = stim.PauliString('I'*len(errorgen_1_bel_0)) + + if errorgen_1_type=='H' and errorgen_2_type=='H': + ptup = com(errorgen_1_bel_0 , errorgen_2_bel_0) + if ptup is not None: + errorgens.append((_LSE('H', [ptup[1]]), -1j*w *ptup[0])) + + elif errorgen_1_type=='H' and errorgen_2_type=='S': + ptup = com(errorgen_2_bel_0 , errorgen_1_bel_0) + if ptup is not None: + if errorgen_2_bel_0 == ptup[1]: + errorgens.append(( _LSE('S', [errorgen_2_bel_0]), 2*1j*w*ptup[0])) + else: + new_bels = [errorgen_2_bel_0, ptup[1]] if stim_pauli_string_less_than(errorgen_2_bel_0, ptup[1])\ + else [ptup[1], errorgen_2_bel_0] + errorgens.append(( _LSE('C', new_bels), 1j*w*ptup[0])) + + elif errorgen_1_type=='S' and errorgen_2_type=='H': + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type=='H' and errorgen_2_type=='C': + ptup1 = com(errorgen_2_bel_0 , errorgen_1_bel_0) + ptup2 = com(errorgen_2_bel_1 , errorgen_1_bel_0) + if ptup1 is not None: + if ptup1[1] == errorgen_2_bel_1: + errorgens.append((_LSE('S', [errorgen_2_bel_1]), 2*1j*w*ptup1[0])) + else: + new_bels = [ptup1[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup1[1], errorgen_2_bel_1)\ + else [errorgen_2_bel_1, ptup1[1]] + errorgens.append((_LSE('C', new_bels), 1j*w*ptup1[0])) + if ptup2 is not None: + if ptup2[1] == errorgen_2_bel_0: + errorgens.append(( _LSE('S', [errorgen_2_bel_0]), 2*1j*w*ptup2[0])) + else: + new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0)\ + else [errorgen_2_bel_0, ptup2[1]] + errorgens.append((_LSE('C', new_bels), 1j*w*ptup2[0])) + + elif errorgen_1_type=='C' and errorgen_2_type=='H': + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type=='H' and errorgen_2_type=='A': + ptup1 = com(errorgen_1_bel_0 , errorgen_2_bel_0) + ptup2 = com(errorgen_1_bel_0 , errorgen_2_bel_1) + if ptup1 is not None: + if ptup1[1] != errorgen_2_bel_1: + if stim_pauli_string_less_than(ptup1[1], errorgen_2_bel_1): + errorgens.append((_LSE('A', [ptup1[1], errorgen_2_bel_1]), -1j*w*ptup1[0])) + else: + errorgens.append((_LSE('A', [errorgen_2_bel_1, ptup1[1]]), 1j*w*ptup1[0])) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_0: + if stim_pauli_string_less_than(errorgen_2_bel_0, ptup2[1]): + errorgens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), -1j*w*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), 1j*w*ptup2[0])) + + elif errorgen_1_type=='A' and errorgen_2_type=='H': + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type=='S' and errorgen_2_type=='S': + #Commutator of S with S is zero. + pass + + elif errorgen_1_type=='S' and errorgen_2_type=='C': + ptup1 = pauli_product(errorgen_1_bel_0 , errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1 , errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = pauli_product(ptup1[1], errorgen_1_bel_0) + #it shouldn't be possible for ptup2[1] to equal errorgen_1_bel_0, + #as that would imply that errorgen_1_bel_0 was the identity. + if ptup2[1] == identity: + errorgens.append((_LSE('H', [errorgen_1_bel_0]), -1j*.5*w*ptup1[0]*ptup2[0])) + else: + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]) , -1j*.5*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]) , 1j*.5*w*ptup1[0]*ptup2[0])) + + #ptup3 is just the product from ptup2 in reverse, so this can be done + #more efficiently, but I'm not going to do that at present... + ptup3 = pauli_product(errorgen_1_bel_0, ptup1[1]) + if ptup3[1] == identity: + errorgens.append((_LSE('H', [errorgen_1_bel_0]), 1j*.5*w*ptup1[0]*ptup3[0]) ) + else: + if stim_pauli_string_less_than(errorgen_1_bel_0, ptup3[1]): + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup3[1]]) , -1j*.5*w*ptup1[0]*ptup3[0])) + else: + errorgens.append((_LSE('A', [ptup3[1], errorgen_1_bel_0]) , 1j*.5*w*ptup1[0]*ptup3[0])) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'S': + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type == 'S' and errorgen_2_type == 'A': + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorgens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) + else: + if ptup[1] != identity: + errorgens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorgens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) + else: + if ptup[1] != identity: + errorgens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_0, ptup1[1]) + if ptup2 is not None: + #it shouldn't be possible for errorgen_1_bel_0 to be equal to ptup2, + #since that would imply + #com(errorgen_1_bel_0,com(errorgen_2_bel_0, errorgen_2_bel_1)) == errorgen_1_bel_0 + #Which I don't think is possible when these come from valid error genator indices. + #errorgen_1_bel_0 can't be the identity, + #And com(errorgen_1_bel_0,com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be by the same + #argument that it can't be errorgen_1_bel_0 + if stim_pauli_string_less_than(errorgen_1_bel_0, ptup2[1]): + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'S': + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'C': + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_1,errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1,errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_1: + #errorgen_2_bel_1 can't be the identity, + #And com(errorgen_2_bel_0, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. + if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1): + errorgens.append((_LSE('A', [ptup2[1], errorgen_2_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [errorgen_2_bel_1, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_0: + #errorgen_2_bel_0 can't be the identity. + #And com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. + if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0): + errorgens.append((_LSE('A', [ptup2[1], errorgen_2_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [errorgen_2_bel_0, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(ptup1[1], errorgen_1_bel_0) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_1: + #errorgen_1_bel_1 can't be the identity. + #And com(acom(errorgen_2_bel_0, errorgen_2_bel_1), errorgen_2_bel_0) can't be either + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1): + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), -.5*1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [errorgen_1_bel_1, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(ptup1[1], errorgen_1_bel_1) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_0: + #errorgen_1_bel_0 can't be the identity. + #And com(acom(errorgen_2_bel_0, errorgen_2_bel_1), errorgen_2_bel_1) can't be either + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), -.5*1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), .5*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = acom(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup2 is not None: + ptup3 = com(ptup1[1], ptup2[1]) + if ptup3 is not None: + #It shouldn't be possible for ptup3 to be the identity given valid error generator indices. + errorgens.append((_LSE('H', [ptup3[1]]), .25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'A': + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if ptup1[1] != identity and ptup2[1] != identity: + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorgens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) + else: #ptup[1] == ptup[2] + if ptup1[1] != identity: + errorgens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if ptup1[1] != identity and ptup2[1] != identity: + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorgens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup[1] == ptup[2] + if ptup1[1] != identity: + errorgens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if ptup1[1] != identity and ptup2[1] != identity: + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorgens.append((_LSE('C', new_bels), 1j*w*ptup1[0]*ptup2[0])) + else: #ptup[1] == ptup[2] + if ptup1[1] != identity: + errorgens.append((_LSE('S', [ptup1[1]]), 2*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup2 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_1) + if ptup1[1] != ptup2[1]: + if ptup1[1] != identity and ptup2[1] != identity: + new_bels = [ptup1[1], ptup2[1]] if stim_pauli_string_less_than(ptup1[1], ptup2[1]) else [ptup2[1], ptup1[1]] + errorgens.append((_LSE('C', new_bels), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup[1] == ptup[2] + if ptup1[1] != identity: + errorgens.append((_LSE('S', [ptup1[1]]), -2*1j*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_1: + #errorgen_1_bel_1 can't be the identity. + #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1): + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_1]), .5*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [errorgen_1_bel_1, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_0: + #errorgen_1_bel_0 can't be the identity. + #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0): + errorgens.append((_LSE('A', [ptup2[1], errorgen_1_bel_0]), .5*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [errorgen_1_bel_0, ptup2[1]]), -.5*w*ptup1[0]*ptup2[0])) + + ptup1 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_1: + #errorgen_2_bel_1 can't be the identity. + #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either + new_bels = [ptup2[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1) else [errorgen_2_bel_1, ptup2[1]] + errorgens.append((_LSE('C', new_bels), .5*1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_2_bel_1, don't need to check that errorgen_2_bel_1 isn't identity. + errorgens.append((_LSE('S', [errorgen_2_bel_1]), 1j*w*ptup1[0]*ptup2[0])) + + + ptup1 = acom(errorgen_1_bel_0,errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_0: + #errorgen_2_bel_0 can't be the identity. + #com(errorgen_2_bel_1, acom(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either + new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup2[1]] + errorgens.append((_LSE('C', new_bels), -.5*1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_2_bel_0, don't need to check that errorgen_2_bel_0 isn't identity. + errorgens.append((_LSE('S', [errorgen_2_bel_0]), -1j*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = acom(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup2 is not None: + ptup3= com(ptup1[1], ptup2[1]) + if ptup3 is not None: + #it shouldn't be possible for ptup3 to be identity given valid error generator + #indices. + errorgens.append((_LSE('H', [ptup3[1]]), -.25*w*ptup1[0]*ptup2[0]*ptup3[0])) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'C': + errorgens = error_generator_commutator(errorgen_2, errorgen_1, flip_weight=True, weight=weight) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'A': + ptup1 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_1) + ptup2 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_0) + + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_0) + ptup2 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_1) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_1, errorgen_2_bel_0) + ptup2 = pauli_product(errorgen_2_bel_1, errorgen_1_bel_0) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = pauli_product(errorgen_1_bel_0, errorgen_2_bel_1) + ptup2 = pauli_product(errorgen_2_bel_0, errorgen_1_bel_1) + if ptup1[1] != ptup2[1]: + if (ptup1[1] != identity) and (ptup2[1] != identity): + if stim_pauli_string_less_than(ptup1[1], ptup2[1]): + errorgens.append((_LSE('A', [ptup1[1], ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: + errorgens.append((_LSE('A', [ptup2[1], ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + elif ptup1[1] == identity: + errorgens.append((_LSE('H', [ptup2[1]]), -1j*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == identity + errorgens.append((_LSE('H', [ptup1[1]]), 1j*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_0: + #errorgen_1_bel_0 can't be the identity. + #com(errorgen_1_bel_1, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + new_bels = [ptup2[1], errorgen_1_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_0) else [errorgen_1_bel_0, ptup2[1]] + errorgens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_1_bel_0 + errorgens.append((_LSE('S', [errorgen_1_bel_0]), w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_1_bel_1: + #errorgen_1_bel_1 can't be the identity. + #com(errorgen_1_bel_0, com(errorgen_2_bel_0, errorgen_2_bel_1)) can't be either. + new_bels = [ptup2[1], errorgen_1_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_1_bel_1) else [errorgen_1_bel_1, ptup2[1]] + errorgens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_1_bel_1 + errorgens.append((_LSE('S', [errorgen_1_bel_1]), -1*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_0, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_1: + #errorgen_2_bel_1 can't be the identity. + #com(errorgen_2_bel_0, com(errorgen_1_bel_0, errorgen_1_bel_1)) can't be either. + new_bels = [ptup2[1], errorgen_2_bel_1] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_1) else [errorgen_2_bel_1, ptup2[1]] + errorgens.append((_LSE('C', new_bels), .5*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_2_bel_1 + errorgens.append((_LSE('S', [errorgen_2_bel_1]), w*ptup1[0]*ptup2[0])) + + + ptup1 = com(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_2_bel_1, ptup1[1]) + if ptup2 is not None: + if ptup2[1] != errorgen_2_bel_0: + #errorgen_2_bel_0 can't be the identity. + #com(errorgen_2_bel_1, com(errorgen_1_bel_0,errorgen_1_bel_1)) can't be either. + new_bels = [ptup2[1], errorgen_2_bel_0] if stim_pauli_string_less_than(ptup2[1], errorgen_2_bel_0) else [errorgen_2_bel_0, ptup2[1]] + errorgens.append((_LSE('C', new_bels), -.5*w*ptup1[0]*ptup2[0])) + else: #ptup2[1] == errorgen_2_bel_0 + errorgens.append((_LSE('S', [errorgen_2_bel_0]), -1*w*ptup1[0]*ptup2[0])) + + ptup1 = com(errorgen_2_bel_0, errorgen_2_bel_1) + if ptup1 is not None: + ptup2 = com(errorgen_1_bel_0, errorgen_1_bel_1) + if ptup2 is not None: + ptup3 = com(ptup1[1], ptup2[1]) + if ptup3 is not None: + #it shouldn't be possible for ptup3 to be identity given valid error generator + #indices. + errorgens.append((_LSE('H', [ptup3[1]]), .25*1j*w*ptup1[0]*ptup2[0]*ptup3[0])) + + return errorgens + +def error_generator_composition(errorgen_1, errorgen_2, weight=1.0, identity=None): + """ + Returns the composition of two error generators. I.e. errorgen_1[errorgen_2[\cdot]]. + + Parameters + ---------- + errorgen1 : `LocalStimErrorgenLabel` + First error generator. + + errorgen2 : `LocalStimErrorgenLabel` + Second error generator + + weight : float, optional (default 1.0) + An optional weighting value to apply to the value of the composition. + + identity : stim.PauliString, optional (default None) + An optional stim.PauliString to use for comparisons to the identity. + Passing in this kwarg isn't necessary, but can allow for reduced + stim.PauliString creation when calling this function many times for + improved efficiency. + + Returns + ------- + list of tuples. The first element of each tuple is a `LocalStimErrorgenLabel`s + corresponding to a component of the composition of the two input error generators. + The second element is the weight of that term, additionally weighted by the specified + value of `weight`. + """ + + composed_errorgens = [] + + w = weight + + errorgen_1_type = errorgen_1.errorgen_type + errorgen_2_type = errorgen_2.errorgen_type + + #The first basis element label is always well defined, + #the second we'll define only of the error generator is C or A type. + errorgen_1_bel_0 = errorgen_1.basis_element_labels[0] + errorgen_2_bel_0 = errorgen_2.basis_element_labels[0] + + if errorgen_1_type == 'C' or errorgen_1_type == 'A': + errorgen_1_bel_1 = errorgen_1.basis_element_labels[1] + if errorgen_2_type == 'C' or errorgen_2_type == 'A': + errorgen_2_bel_1 = errorgen_2.basis_element_labels[1] + + #create the identity stim.PauliString for later comparisons. + if identity is None: + identity = stim.PauliString('I'*len(errorgen_1_bel_0)) + + if errorgen_1_type == 'H' and errorgen_2_type == 'H': + #H_P[H_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_2_bel_0 + P_eq_Q = (P==Q) + if P.commutes(Q): + new_eg_type, new_bels, addl_factor = _ordered_new_bels_C(P, Q, False, False, P_eq_Q) + composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_factor*w)) + else: + PQ = pauli_product(P, Q) + composed_errorgens.append((_LSE('H', [PQ[1]]), -1j*w*PQ[0])) + new_eg_type, new_bels, addl_factor = _ordered_new_bels_C(P, Q, False, False, P_eq_Q) + composed_errorgens.append((_LSE(new_eg_type, new_bels), addl_factor*w)) + + elif errorgen_1_type == 'H' and errorgen_2_type == 'S': + #H_P[S_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_2_bel_0 + PQ = pauli_product(P, Q) + PQ_ident = (PQ[1] == identity) + PQ_eq_Q = (PQ[1]==Q) + if P.commutes(Q): + new_eg_type, new_bels, addl_factor = _ordered_new_bels_A(PQ[1], Q, PQ_ident, False, PQ_eq_Q) + if new_eg_type is not None: + composed_errorgens.append((_LSE(new_eg_type, new_bels), -PQ[0]*addl_factor*w)) + composed_errorgens.append((_LSE('H', [P]), -w)) + else: #if errorgen_1_bel_0 and errorgen_2_bel_0 only multiply to identity they are equal (in which case they commute). + new_eg_type, new_bels, addl_factor = _ordered_new_bels_C(PQ[1], Q, PQ_ident, False, PQ_eq_Q) + if new_eg_type is not None: + composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_factor*w)) + composed_errorgens.append((_LSE('H', [P]), -w)) + + elif errorgen_1_type == 'H' and errorgen_2_type == 'C': + #H_A[C_{P,Q}] A->errorgen_1_bel_0, P,Q -> errorgen_2_bel_0, errorgen_2_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + A = errorgen_1_bel_0 + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + #Case 1: [P,Q]=0 + if P.commutes(Q): + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PQ_ident = (PQ[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + PQ_eq_A = (PQ[1]==A) + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + + elif errorgen_1_type == 'H' and errorgen_2_type == 'A': + #H_A[A_{P,Q}] A->errorgen_1_bel_0, P,Q -> errorgen_2_bel_0, errorgen_2_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + A = errorgen_1_bel_0 + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #Case 1: P and Q commute. + if P.commutes(Q): + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PQ_ident = (PQ[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + PQ_eq_A = (PQ[1]==A) + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + + #Note: This could be done by leveraging the commutator code, but that adds + #additional overhead which I am opting to avoid. + elif errorgen_1_type == 'S' and errorgen_2_type == 'H': + #S_P[H_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_2_bel_0 + PQ = pauli_product(P, Q) + PQ_ident = (PQ[1] == identity) + PQ_eq_Q = (PQ[1]==Q) + if P.commutes(Q): + new_eg_type, new_bels, addl_factor = _ordered_new_bels_A(PQ[1], P, PQ_ident, False, PQ_eq_Q) + if new_eg_type is not None: + composed_errorgens.append((_LSE(new_eg_type, new_bels), -PQ[0]*addl_factor*w)) + composed_errorgens.append((_LSE('H', [Q]), -w)) + else: #if errorgen_1_bel_0 and errorgen_2_bel_0 only multiply to identity they are equal (in which case they commute). + new_eg_type, new_bels, addl_factor = _ordered_new_bels_C(PQ[1], P, PQ_ident, False, PQ_eq_Q) + if new_eg_type is not None: + composed_errorgens.append((_LSE(new_eg_type, new_bels), -1j*PQ[0]*addl_factor*w)) + composed_errorgens.append((_LSE('H', [Q]), -w)) + + elif errorgen_1_type == 'S' and errorgen_2_type == 'S': + #S_P[S_Q] P->errorgen_1_bel_0, Q -> errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_2_bel_0 + PQ = pauli_product(P, Q) + PQ_ident = (PQ[1] == identity) + if not PQ_ident: + composed_errorgens.append((_LSE('S', [PQ[1]]), w)) + composed_errorgens.append((_LSE('S', [P]), -w)) + composed_errorgens.append((_LSE('S', [Q]),- w)) + + elif errorgen_1_type == 'S' and errorgen_2_type == 'C': + #S_A[C_P,Q] A-> errorgen_1_bel_0, P->errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + if P.commutes(Q): #Case 1: [P,Q] = 0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + #APQ can't equal A since that implies P==Q, which would be an invalid C term input. + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + assert not PA_eq_QA #(I'm almost positive this should be true) + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. + + elif errorgen_1_type == 'S' and errorgen_2_type == 'A': + #S_A[A_P,Q] A-> errorgen_1_bel_0, P->errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + if P.commutes(Q): #Case 1: [P,Q]=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + assert not PA_eq_QA #(I'm almost positive this should be true) + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + #APQ can't equal A since that implies P==Q, which would be an invalid C term input. + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. + + elif errorgen_1_type == 'C' and errorgen_2_type == 'H': + #C_P,Q[H_A]: P -> errorgen_1_bel_0, Q-> errorgen_1_bel_1, A -> errorgen_2_bel_0 + #TODO: This only differs from H-C by a few signs, should be able to combine the two implementations to save space. + P = errorgen_1_bel_0 + Q = errorgen_1_bel_1 + A = errorgen_2_bel_0 + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + if P.commutes(Q): #[P,Q]=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether any of these products are the identity (PQ can't be the identity if this is a valid C term). + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PQ_ident = (PQ[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + PQ_eq_A = (PQ[1]==A) + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), -1*APQ[0]*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*PQ[0]*addl_factor_2*w)) + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*addl_factor_1*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'S': #TODO: This differs from S-C by just a few signs. Should be able to combine and significantly compress code. + #C_P,Q[S_A] P-> errorgen_1_bel_0, Q -> errorgen_1_bel_1, A->errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_1_bel_1 + A = errorgen_2_bel_0 + #also precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + if P.commutes(Q): #Case 1: [P,Q] = 0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + #APQ can't equal A since that implies P==Q, which would be an invalid C term input. + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + assert not PA_eq_QA #(I'm almost positive this should be true) + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. + + elif errorgen_1_type == 'C' and errorgen_2_type == 'C': + #C_A,B[C_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + B = errorgen_1_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + #precompute commutation relations we'll need. + com_PQ = P.commutes(Q) + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + com_BP = B.commutes(P) + com_BQ = B.commutes(Q) + + #There are 64 separate cases, so this is gonna suck... + if A.commutes(B): + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + AB = pauli_product(A, B) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + ABPQ_ident= (ABPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQ_eq_AB = (PQ[1] == AB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(P, QAB[1], False, QAB_ident, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(PB[1], QA[1], PB_ident, QA_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(A, BPQ[1], False, BPQ_ident, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*PB[0]*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), 1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + + else: #[P,Q] !=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + AB = pauli_product(A, B) + ABP = pauli_product(AB[0]*AB[1], P) + ABQ = pauli_product(AB[0]*AB[1], Q) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + ABP_ident = (ABP[1] == identity) + ABQ_ident = (ABQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + ABP_eq_Q = (ABP[1] == Q) + ABQ_eq_P = (ABQ[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*ABQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(ABP[1], Q, ABP_ident, False, ABP_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(ABQ[1], P, ABQ_ident, False, ABQ_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -ABP[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -ABQ[0]*addl_factor_3*w)) + else: #[A,B] != 0 + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + PQB = pauli_product(PQ[0]*PQ[1], B) + PQA = pauli_product(PQ[0]*PQ[1], A) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + PQB_ident = (PQB[1] == identity) + PQA_ident = (PQA[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQB_eq_A = (PQB[1] == A) + PQA_eq_B = (PQA[1] == B) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*PQA[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQB[1], A, PQB_ident, False, PQB_eq_A) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(PQA[1], B, PQA_ident, False, PQA_eq_B) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -PQA[0]*addl_factor_3*w)) + else: #[P,Q]!=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0),-1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + + elif errorgen_1_type == 'C' and errorgen_2_type == 'A': + #C_A,B[A_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + B = errorgen_1_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + #precompute commutation relations we'll need. + com_PQ = P.commutes(Q) + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + com_BP = B.commutes(P) + com_BQ = B.commutes(Q) + + if A.commutes(B): + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + AB = pauli_product(A, B) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), QAB[0]*addl_factor_3*w)) + else: #[P,Q]!=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + AB = pauli_product(A, B) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + ABPQ_ident= (ABPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQ_eq_AB = (PQ[1] == AB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(PB[1], QA[1], PB_ident, QA_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*PB[0]*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), QAB[0]*addl_factor_6*w)) + else: #[A,B] != 0 + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'H': + #A_{P,Q}[H_A] P->errorgen_1_bel_0, Q->errorgen_1_bel_1 A -> errorgen_2_bel_0 + A = errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_1_bel_1 + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + #Case 1: P and Q commute. + if P.commutes(Q): + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + else: #Case 2: {P,Q}=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PQ_ident = (PQ[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also also precompute whether certain relevant pauli pairs are equal. + PA_eq_Q = (PA[1]==Q) + QA_eq_P = (QA[1]==P) + PQ_eq_A = (PQ[1]==A) + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + if not APQ_ident: + composed_errorgens.append((_LSE('H', [APQ[1]]), 1j*APQ[0]*w)) + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], Q, PA_ident, False, PA_eq_Q) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], P, QA_ident, False, QA_eq_P) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PQ[1], A, PQ_ident, False, PQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PQ[0]*addl_factor_2*w)) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'S': + #A_P,Q[S_A] P->errorgen_1_bel_0, Q->errorgen_1_bel_1, A -> errorgen_2_bel_0 + P = errorgen_1_bel_0 + Q = errorgen_1_bel_1 + A = errorgen_2_bel_0 + + #precompute whether pairs commute or anticommute + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + + if P.commutes(Q): #Case 1: [P,Q]=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + assert not PA_eq_QA #(I'm almost positive this should be true) + + #Case 1a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #Case 1d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1*addl_factor_1*w)) + #TODO: Cases (1a,1b) and (1c,1d) only differ by the leading sign, can compress this code a bit. + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + #also precompute whether any of these products are the identity + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + APQ_ident = (APQ[1] == identity) + #also also precompute whether certain relevant pauli pairs are equal. + PA_eq_QA = (PA[1]==QA[1]) + #APQ can't equal A since that implies P==Q, which would be an invalid C term input. + + #Case 2a: [A,P]=0, [A,Q]=0 + if com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + + #Case 2b: {A,P}=0, {A,Q}=0 + elif not com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + + #Case 2c: [A,P]=0, {A,Q}=0 + elif com_AP and not com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #Case 2d: {A,P}=0, [A,Q]=0 + elif not com_AP and com_AQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QA[1], PA_ident, QA_ident, PA_eq_QA) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(APQ[1], A, APQ_ident, False, False) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(P, Q, False, False, False) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -APQ[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1*addl_factor_2*w)) + #TODO: Cases (2a,2b) and (2c,2d) only differ by the leading sign, can compress this code a bit. + + elif errorgen_1_type == 'A' and errorgen_2_type == 'C': + #A_A,B[C_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + B = errorgen_1_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + #precompute commutation relations we'll need. + com_PQ = P.commutes(Q) + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + com_BP = B.commutes(P) + com_BQ = B.commutes(Q) + + if A.commutes(B): + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + else: #[P,Q]!=0 + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + else: #[A,B] != 0 + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + AB = pauli_product(A, B) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + ABPQ_ident= (ABPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQ_eq_AB = (PQ[1] == AB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(PB[1], QA[1], PB_ident, QA_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*PB[0]*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), -PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), 1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -1j*PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), 1j*QAB[0]*addl_factor_6*w)) + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + AB = pauli_product(A, B) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*QAB[0]*addl_factor_3*w)) + + elif errorgen_1_type == 'A' and errorgen_2_type == 'A': + #A_A,B[A_P,Q]: A -> errorgen_1_bel_0, B -> errorgen_1_bel_1, P -> errorgen_2_bel_0, Q -> errorgen_2_bel_1 + A = errorgen_1_bel_0 + B = errorgen_1_bel_1 + P = errorgen_2_bel_0 + Q = errorgen_2_bel_1 + #precompute commutation relations we'll need. + com_PQ = P.commutes(Q) + com_AP = A.commutes(P) + com_AQ = A.commutes(Q) + com_BP = B.commutes(P) + com_BQ = B.commutes(Q) + if A.commutes(B): + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), BPQ[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*APQ[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*BPQ[0]*addl_factor_3*w)) + else: + if com_PQ: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + AB = pauli_product(A, B) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -QAB[0]*addl_factor_3*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), 1j*PAB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -1j*QAB[0]*addl_factor_3*w)) + else: + #precompute some products we'll need. + PA = pauli_product(P, A) + QA = pauli_product(Q, A) + PB = pauli_product(P, B) + QB = pauli_product(Q, B) + PQ = pauli_product(P, Q) + AB = pauli_product(A, B) + APQ = pauli_product(A, PQ[0]*PQ[1]) + BPQ = pauli_product(B, PQ[0]*PQ[1]) + PAB = pauli_product(P, AB[0]*AB[1]) + QAB = pauli_product(Q, AB[0]*AB[1]) + ABPQ = pauli_product(AB[0]*AB[1], PQ[0]*PQ[1]) + + #precompute whether any of these products are identities. + PA_ident = (PA[1] == identity) + QA_ident = (QA[1] == identity) + PB_ident = (PB[1] == identity) + QB_ident = (QB[1] == identity) + APQ_ident = (APQ[1] == identity) + BPQ_ident = (BPQ[1] == identity) + PAB_ident = (PAB[1] == identity) + QAB_ident = (QAB[1] == identity) + ABPQ_ident= (ABPQ[1] == identity) + #precompute which of the pairs of products might be equal + PA_eq_QB = (PA[1] == QB[1]) + QA_eq_PB = (QA[1] == PB[1]) + PQ_eq_AB = (PQ[1] == AB[1]) + APQ_eq_B = (APQ[1] == B) + BPQ_eq_A = (BPQ[1] == A) + PAB_eq_Q = (PAB[1] == Q) + QAB_eq_P = (QAB[1] == P) + + if com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(PA[1], QB[1], PA_ident, QB_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -PA[0]*QB[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6),- 1j*QAB[0]*addl_factor_6*w)) + elif com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(PB[1], QA[1], PB_ident, QA_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*PB[0]*QA[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), -1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + elif not com_AP and com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_C(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), -APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + elif not com_AP and not com_AQ and com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_A(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_C(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), 1j*QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_A(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_C(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_C(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), 1j*QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -QAB[0]*addl_factor_6*w)) + if not ABPQ_ident: + composed_errorgens.append((_LSE('H', [ABPQ[1]]), -1j*ABPQ[0]*w)) + elif not com_AP and not com_AQ and not com_BP and not com_BQ: + new_eg_type_0, new_bels_0, addl_factor_0 = _ordered_new_bels_C(QB[1], PA[1], QB_ident, PA_ident, PA_eq_QB) + new_eg_type_1, new_bels_1, addl_factor_1 = _ordered_new_bels_C(QA[1], PB[1], QA_ident, PB_ident, QA_eq_PB) + new_eg_type_2, new_bels_2, addl_factor_2 = _ordered_new_bels_C(PQ[1], AB[1], False, False, PQ_eq_AB) + new_eg_type_3, new_bels_3, addl_factor_3 = _ordered_new_bels_A(APQ[1], B, APQ_ident, False, APQ_eq_B) + new_eg_type_4, new_bels_4, addl_factor_4 = _ordered_new_bels_A(BPQ[1], A, BPQ_ident, False, BPQ_eq_A) + new_eg_type_5, new_bels_5, addl_factor_5 = _ordered_new_bels_A(PAB[1], Q, PAB_ident, False, PAB_eq_Q) + new_eg_type_6, new_bels_6, addl_factor_6 = _ordered_new_bels_A(QAB[1], P, QAB_ident, False, QAB_eq_P) + if new_eg_type_0 is not None: + composed_errorgens.append((_LSE(new_eg_type_0, new_bels_0), QB[0]*PA[0]*addl_factor_0*w)) + if new_eg_type_1 is not None: + composed_errorgens.append((_LSE(new_eg_type_1, new_bels_1), -QA[0]*PB[0]*addl_factor_1*w)) + if new_eg_type_2 is not None: + composed_errorgens.append((_LSE(new_eg_type_2, new_bels_2), -PQ[0]*AB[0]*addl_factor_2*w)) + if new_eg_type_3 is not None: + composed_errorgens.append((_LSE(new_eg_type_3, new_bels_3), 1j*APQ[0]*addl_factor_3*w)) + if new_eg_type_4 is not None: + composed_errorgens.append((_LSE(new_eg_type_4, new_bels_4), -1j*BPQ[0]*addl_factor_4*w)) + if new_eg_type_5 is not None: + composed_errorgens.append((_LSE(new_eg_type_5, new_bels_5), 1j*PAB[0]*addl_factor_5*w)) + if new_eg_type_6 is not None: + composed_errorgens.append((_LSE(new_eg_type_6, new_bels_6), -1j*QAB[0]*addl_factor_6*w)) + + return composed_errorgens + +#helper function for getting the new (properly ordered) basis element labels, error generator type (A can turn into H with certain index combinations), and additional signs. +#reduces code repetition in composition code. +def _ordered_new_bels_A(pauli1, pauli2, first_pauli_ident, second_pauli_ident, pauli_eq): + """ + Helper function for managing new basis element labels, error generator types and proper basis element label ordering. Returns None + if both pauli identity flags are True, which signals that the error generator is zero (i.e. should be skipped). Same for is pauli_eq is True. + """ + if pauli_eq: + return (None,None,None) + if first_pauli_ident: + if second_pauli_ident: + return (None,None,None) + else: + new_eg_type = 'H' + new_bels = [pauli2] + addl_factor = 1 + else: + if second_pauli_ident: + new_eg_type = 'H' + new_bels = [pauli1] + addl_factor = -1 + else: + new_eg_type = 'A' + new_bels, addl_factor = ([pauli1, pauli2], 1) if stim_pauli_string_less_than(pauli1, pauli2) else ([pauli2, pauli1], -1) + return new_eg_type, new_bels, addl_factor + +def _ordered_new_bels_C(pauli1, pauli2, first_pauli_ident, second_pauli_ident, pauli_eq): + """ + Helper function for managing new basis element labels, error generator types and proper basis element label ordering. Returns None + if both pauli identity flags are True, which signals that the error generator is zero (i.e. should be skipped). Same for is pauli_eq is True. + """ + if first_pauli_ident or second_pauli_ident: + return (None,None,None) + + if pauli_eq: + new_eg_type = 'S' + new_bels = [pauli1] + addl_factor = 2 + else: + new_eg_type = 'C' + addl_factor = 1 + new_bels = [pauli1, pauli2] if stim_pauli_string_less_than(pauli1, pauli2) else [pauli2, pauli1] + return new_eg_type, new_bels, addl_factor + +def com(P1, P2): + #P1 and P2 either commute or anticommute. + if P1.commutes(P2): + return None + else: + P3 = P1*P2 + return (P3.sign*2, P3 / P3.sign) + #return (sign(P3) * 2 if P1 and P2 anticommute, 0 o.w., + # unsigned P3) + +def acom(P1, P2): + #P1 and P2 either commute or anticommute. + if P1.commutes(P2): + P3 = P1*P2 + return (P3.sign*2, P3 / P3.sign) + else: + return None + + #return (sign(P3) * 2 if P1 and P2 commute, 0 o.w., + # unsigned P3) + +def pauli_product(P1, P2): + P3 = P1*P2 + return (P3.sign, P3 / P3.sign) + #return (sign(P3), + # unsigned P3) + +def stim_pauli_string_less_than(pauli1, pauli2): + """ + Returns true if pauli1 is less than pauli lexicographically. + + Parameters + ---------- + pauli1, pauli2 : stim.PauliString + Paulis to compare. + """ + + #remove the signs. + unsigned_pauli1 = pauli1/pauli1.sign + unsigned_pauli2 = pauli2/pauli2.sign + + unsigned_pauli1_str = str(unsigned_pauli1)[1:].replace('_', 'I') + unsigned_pauli2_str = str(unsigned_pauli2)[1:].replace('_', 'I') + + return unsigned_pauli1_str < unsigned_pauli2_str + +def errorgen_layer_to_matrix(errorgen_layer, num_qubits, errorgen_matrix_dict=None, sslbls=None): + """ + Converts an iterable over error generator coefficients and rates into the corresponding + dense numpy array representation. + + Parameters + ---------- + errorgen_layer : list, tuple or dict + An iterable over error generator coefficient and rates. If a list or a tuple the + elements should correspond to two-element tuples, the first value being an `ElementaryErrorgenLabel` + and the second value the rate. If a dictionary the keys should be `ElementaryErrorgenLabel` and the + values the rates. + + num_qubits : int + Number of qubits for the error generator matrix being constructed. + + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + sslbls : list or tuple, optional (default None) + A tuple or list of state space labels corresponding to the qubits upon which the error generators + can supported. Only required when passing in a value of `errorgen_matrix_dict` with + `GlobalElementaryErrogenLabel` keys in conjunction with an `errorgen_layer` with labels + which are `LocalElementaryErrorgenLabel` (or vice-versa). + + Returns + ------- + errorgen_mat : ndarray + ndarray for the dense representation of the specified error generator in the standard basis. + """ + + #if the list is empty return all zeros + #initialize empty array for accumulation. + mat = _np.zeros((4**num_qubits, 4**num_qubits), dtype=_np.complex128) + if not errorgen_layer: + return mat + + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + #infer the correct label type. + if errorgen_matrix_dict: + first_label = next(iter(errorgen_matrix_dict)) + if isinstance(first_label, _LEEL): + label_type = 'local' + elif isinstance(first_label, _GEEL): + label_type = 'global' + else: + msg = f'Label type {type(first_label)} is not supported as a key for errorgen_matrix_dict.'\ + + 'Please use either LocalElementaryErrorgenLabel or GlobalElementaryErrorgenLabel.' + raise ValueError() + else: + raise ValueError('Non-empty errorgen_layer, but errorgen_matrix_dict is empty. Cannot convert.') + + #loop through errorgen_layer and accumulate the weighted error generators prescribed. + if isinstance(errorgen_layer, (list, tuple)): + first_coefficient_lbl = errorgen_layer[0][0] + errorgen_layer_iter = errorgen_layer + elif isinstance(errorgen_layer, dict): + first_coefficient_lbl = next(iter(errorgen_layer)) + errorgen_layer_iter = errorgen_layer.items() + else: + raise ValueError(f'errorgen_layer should be either a list, tuple or dict. {type(errorgen_layer)=}') + + if ((isinstance(first_coefficient_lbl, _LEEL) and label_type == 'global') \ + or (isinstance(first_coefficient_lbl, _GEEL) and label_type == 'local')) and sslbls is None: + msg = "You have passed in an `errogen_layer` with `LocalElementaryErrorgenLabel` coefficients, and " \ + +"an `errorgen_matrix_dict` with keys which are `GlobalElementaryErrorgenLabel` (or vice-versa). When using this "\ + +"combination you must also specify the state space labels with `sslbls`." + raise ValueError(msg) + + if isinstance(first_coefficient_lbl, _LSE): + if label_type == 'local': + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[lbl.to_local_eel()] + else: + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[lbl.to_global_eel()] + elif isinstance(first_coefficient_lbl, _LEEL): + if label_type == 'local': + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[lbl] + else: + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[_GEEL.cast(lbl, sslbls=sslbls)] + elif isinstance(first_coefficient_lbl, _GEEL): + if label_type == 'local': + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[_LEEL.cast(lbl, sslbls=sslbls)] + else: + for lbl, rate in errorgen_layer_iter: + mat += rate*errorgen_matrix_dict[lbl] + else: + raise ValueError('The coefficient labels in `errorgen_layer` should be either `LocalStimErrorgenLabel`, `LocalElementaryErrorgenLabel` or `GlobalElementaryErrorgenLabel`.') + + return mat + +def iterative_error_generator_composition(errorgen_labels, rates): + """ + Iteratively compute error generator compositions. Each error generator + composition in general returns a list of multiple new error generators, + so this function manages the distribution and recursive application + of the compositions for two-or-more error generator labels. + + Parameters + ---------- + errorgen_labels : tuple of `LocalStimErrorgenLabel` + A tuple of the elementary error generator labels to be composed. + + rates : tuple of float + A tuple of corresponding error generator rates of the same length as the tuple + of error generator labels. + + Returns + ------- + List of tuples, the first element of each tuple is a `LocalStimErrorgenLabel`. + The second element of each tuple is the final rate for that term. + """ + + if len(errorgen_labels) == 1: + return [(errorgen_labels[0], rates[0])] + else: + label_tuples_to_process = [errorgen_labels] + rate_tuples_to_process = [rates] + + fully_processed_label_rate_tuples = [] + while label_tuples_to_process: + new_label_tuples_to_process = [] + new_rate_tuples_to_process = [] + + for label_tup, rate_tup in zip(label_tuples_to_process, rate_tuples_to_process): + #grab the last two elements of each of these and do the composition. + new_labels_and_rates = error_generator_composition(label_tup[-2], label_tup[-1], rate_tup[-2]*rate_tup[-1]) + + #if the new labels and rates sum to zero overall then we can kill this branch of the tree. + aggregated_labels_and_rates_dict = dict() + for lbl, rate in new_labels_and_rates: + if aggregated_labels_and_rates_dict.get(lbl, None) is None: + aggregated_labels_and_rates_dict[lbl] = rate + else: + aggregated_labels_and_rates_dict[lbl] += rate + if all([abs(val)<1e-15 for val in aggregated_labels_and_rates_dict.values()]): + continue + + label_tup_remainder = label_tup[:-2] + rate_tup_remainder = rate_tup[:-2] + if label_tup_remainder: + for new_label, new_rate in aggregated_labels_and_rates_dict.items(): + new_label_tup = label_tup_remainder + (new_label,) + new_rate_tup = rate_tup_remainder + (new_rate,) + new_label_tuples_to_process.append(new_label_tup) + new_rate_tuples_to_process.append(new_rate_tup) + else: + for new_label_rate_tup in aggregated_labels_and_rates_dict.items(): + fully_processed_label_rate_tuples.append(new_label_rate_tup) + label_tuples_to_process = new_label_tuples_to_process + rate_tuples_to_process = new_rate_tuples_to_process + + return fully_processed_label_rate_tuples + +#Helper functions for doing numeric commutators, compositions and BCH. + +def error_generator_commutator_numerical(errorgen1, errorgen2, errorgen_matrix_dict=None, num_qubits=None): + """ + Numerically compute the commutator of the two specified elementary error generators. + + Parameters + ---------- + errorgen1 : `LocalElementaryErrorgenLabel` or `LocalStimErrorgenLabel` + First error generator. + + errorgen2 : `ElementaryErrorgenLabel` or `LocalStimErrorgenLabel` + Second error generator. + + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + num_qubits : int, optional (default None) + Number of qubits for the error generator commutator being computed. Only required if `errorgen_matrix_dict` is None. + + Returns + ------- + ndarray + Numpy array corresponding to the dense representation of the commutator of the input error generators in the standard basis. + """ + + assert isinstance(errorgen1, (_LEEL, _LSE)) and isinstance(errorgen2, (_LEEL, _LSE)) + assert type(errorgen1) == type(errorgen2), "The elementary error generator labels have mismatched types." + + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + first_label = next(iter(errorgen_matrix_dict)) + + if isinstance(first_label, _LEEL): + if isinstance(errorgen1, _LEEL): + comm = errorgen_matrix_dict[errorgen1]@errorgen_matrix_dict[errorgen2] - errorgen_matrix_dict[errorgen2]@errorgen_matrix_dict[errorgen1] + else: + comm = errorgen_matrix_dict[errorgen1.to_local_eel()]@errorgen_matrix_dict[errorgen2.to_local_eel()]\ + - errorgen_matrix_dict[errorgen2.to_local_eel()]@errorgen_matrix_dict[errorgen1.to_local_eel()] + else: + if isinstance(errorgen1, _LSE): + comm = errorgen_matrix_dict[errorgen1]@errorgen_matrix_dict[errorgen2] - errorgen_matrix_dict[errorgen2]@errorgen_matrix_dict[errorgen1] + else: + comm = errorgen_matrix_dict[_LSE.cast(errorgen1)]@errorgen_matrix_dict[_LSE.cast(errorgen2)]\ + - errorgen_matrix_dict[_LSE.cast(errorgen2)]@errorgen_matrix_dict[_LSE.cast(errorgen1)] + return comm + +def error_generator_composition_numerical(errorgen1, errorgen2, errorgen_matrix_dict=None, num_qubits=None): + """ + Numerically compute the composition of the two specified elementary error generators. + + Parameters + ---------- + errorgen1 : `LocalElementaryErrorgenLabel` or `LocalStimErrorgenLabel` + First error generator. + + errorgen2 : `ElementaryErrorgenLabel` or `LocalStimErrorgenLabel` + Second error generator. + + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + num_qubits : int, optional (default None) + Number of qubits for the error generator commutator being computed. Only required if `errorgen_matrix_dict` is None. + + Returns + ------- + ndarray + Numpy array corresponding to the dense representation of the composition of the input error generators in the standard basis. + + """ + assert isinstance(errorgen1, (_LEEL, _LSE)) and isinstance(errorgen2, (_LEEL, _LSE)) + assert type(errorgen1) == type(errorgen2), "The elementary error generator labels have mismatched types." + + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + first_label = next(iter(errorgen_matrix_dict)) + + if isinstance(first_label, _LEEL): + if isinstance(errorgen1, _LEEL): + comp = errorgen_matrix_dict[errorgen1]@errorgen_matrix_dict[errorgen2] + else: + comp = errorgen_matrix_dict[errorgen1.to_local_eel()]@errorgen_matrix_dict[errorgen2.to_local_eel()] + else: + if isinstance(errorgen1, _LSE): + comp = errorgen_matrix_dict[errorgen1]@errorgen_matrix_dict[errorgen2] + else: + comp = errorgen_matrix_dict[_LSE.cast(errorgen1)]@errorgen_matrix_dict[_LSE.cast(errorgen2)] + return comp + +def bch_numerical(propagated_errorgen_layers, error_propagator, bch_order=1): + """ + Iteratively compute effective error generator layer produced by applying the BCH approximation + to the list of input error generator matrices. Note this is primarily intended + as part of testing and validation infrastructure. + + Parameters + ---------- + propagated_errorgen_layers : list of numpy.ndarrays + List of the error generator layers to combine using the BCH approximation (in circuit ordering) + + error_propagator : `ErrorGeneratorPropagator` + An `ErrorGeneratorPropagator` instance to use as part of the BCH calculation. + + bch_order : int, optional (default 1) + Order of the BCH approximation to apply (up to 5 is supported currently). + + Returns + ------- + numpy.ndarray + A dense numpy array corresponding to the result of the iterative application of the BCH + approximation. + """ + #Need to build an appropriate basis for getting the error generator matrices. + #accumulate the error generator coefficients needed. + collected_coeffs = [] + for layer in propagated_errorgen_layers: + for coeff in layer.keys(): + collected_coeffs.append(coeff.to_local_eel()) + #only want the unique ones. + unique_coeffs = list(set(collected_coeffs)) + + num_qubits = len(error_propagator.model.state_space.qubit_labels) + + errorgen_basis = _ExplicitElementaryErrorgenBasis(_QubitSpace(num_qubits), unique_coeffs, basis_1q=_BuiltinBasis('PP', 4)) + errorgen_lbl_matrix_dict = {lbl:mat for lbl,mat in zip(errorgen_basis.labels, errorgen_basis.elemgen_matrices)} + + #iterate through each of the propagated error generator layers and turn these into dense numpy arrays + errorgen_layer_mats = [] + for layer in propagated_errorgen_layers: + errorgen_layer_mats.append(error_propagator.errorgen_layer_dict_to_errorgen(layer, mx_basis='pp')) + + #initialize a matrix for storing the result of doing BCH. + bch_result = _np.zeros((4**num_qubits, 4**num_qubits), dtype=_np.complex128) + + if len(errorgen_layer_mats)==1: + return errorgen_layer_mats[0] + + #otherwise iterate through in reverse order (the propagated layers are + #in circuit ordering and not matrix multiplication ordering at the moment) + #and combine the terms pairwise + combined_err_layer = errorgen_layer_mats[-1] + for i in range(len(errorgen_layer_mats)-2, -1, -1): + combined_err_layer = pairwise_bch_numerical(combined_err_layer, errorgen_layer_mats[i], order=bch_order) + + return combined_err_layer + +def pairwise_bch_numerical(mat1, mat2, order=1): + """ + Helper function for doing the numerical BCH in a pairwise fashion. Note this function is primarily intended + for numerical validations as part of testing infrastructure. + """ + bch_result = _np.zeros(mat1.shape, dtype=_np.complex128) + if order >= 1: + bch_result += mat1 + mat2 + if order >= 2: + commutator12 = _matrix_commutator(mat1, mat2) + bch_result += .5*commutator12 + if order >= 3: + commutator112 = _matrix_commutator(mat1, commutator12) + commutator212 = _matrix_commutator(mat2, commutator12) + bch_result += (1/12)*(commutator112-commutator212) + if order >= 4: + commutator2112 = _matrix_commutator(mat2, commutator112) + bch_result += (-1/24)*commutator2112 + if order >= 5: + commutator1112 = _matrix_commutator(mat1, commutator112) + commutator2212 = _matrix_commutator(mat2, commutator212) + + commutator22212 = _matrix_commutator(mat2, commutator2212) + commutator11112 = _matrix_commutator(mat1, commutator1112) + commutator12212 = _matrix_commutator(mat1, commutator2212) + commutator21112 = _matrix_commutator(mat2, commutator1112) + commutator21212 = _matrix_commutator(mat2, _matrix_commutator(mat1, commutator212)) + commutator12112 = _matrix_commutator(mat1, commutator2112) + + bch_result += (-1/720)*(commutator11112 - commutator22212) + bch_result += (1/360)*(commutator21112 - commutator12212) + bch_result += (1/120)*(commutator21212 - commutator12112) + return bch_result + +def _matrix_commutator(mat1, mat2): + return mat1@mat2 - mat2@mat1 + +def iterative_error_generator_composition_numerical(errorgen_labels, rates, errorgen_matrix_dict=None, num_qubits=None): + """ + Iteratively compute error generator compositions. The function computes a dense representation of this composition + numerically and is primarily intended as part of testing infrastructure. + + Parameters + ---------- + errorgen_labels : tuple of `LocalStimErrorgenLabel` + A tuple of the elementary error generator labels to be composed. + + rates : tuple of float + A tuple of corresponding error generator rates of the same length as the tuple + of error generator labels. + + errorgen_matrix_dict : dict, optional (default None) + An optional dictionary mapping `ElementaryErrorgenLabel`s to numpy arrays for their dense representation. + If not specified this will be constructed from scratch each call, so specifying this can provide a performance + benefit. + + num_qubits : int, optional (default None) + Number of qubits for the error generator commutator being computed. Only required if `errorgen_matrix_dict` is None. + + Returns + ------- + numpy.ndarray + Dense numpy array representation of the super operator corresponding to the iterated composition written in + the standard basis. + """ + + if errorgen_matrix_dict is None: + #create an error generator basis. + errorgen_basis = _CompleteElementaryErrorgenBasis('PP', _QubitSpace(num_qubits), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + composition = errorgen_matrix_dict[errorgen_labels[0]] + for lbl in errorgen_labels[1:]: + composition = composition@errorgen_matrix_dict[lbl] + composition *= _np.prod(rates) + return composition + +#-----------First-Order Approximate Error Generator Probabilities and Expectation Values---------------# + +def random_support(tableau, return_support=False): + """ + Compute the number of bits over which the stabilizer state corresponding to this stim tableau + would have measurement outcomes which are random. + + Parameters + ---------- + tableau : stim.Tableau + stim.Tableau corresponding to the stabilizer state we want the random support + for. + + return_support : bool, optional (default False) + If True also returns a list of qubit indices over which the distribution of outcome + bit strings is random. + """ + #TODO Test for correctness on support + sim = stim.TableauSimulator() + sim.set_inverse_tableau(tableau**-1) + num_random = 0 + support = [] + for i in range(len(tableau)): + z = sim.peek_z(i) + if z == 0: + num_random+=1 + support.append(i) + # For a phase reference, use the smallest state with non-zero amplitude. + forced_bit = z == -1 + sim.postselect_z(i, desired_value=forced_bit) + return (num_random, support) if return_support else num_random + +#Courtesy of Gidney +#https://quantumcomputing.stackexchange.com/questions/38826/how-do-i-efficiently-compute-the-fidelity-between-two-stabilizer-tableau-states +def tableau_fidelity(tableau1, tableau2): + """ + Calculate the fidelity between the stabilizer states corresponding to the given stim + tableaus. This returns a result in units of probability (so this may be squared + fidelity depending on your convention). + + Parameters + ---------- + tableau1 : stim.Tableau + Stim tableau for first stabilizer state. + tableau2 : stim.Tableau + Stim tableau for second stabilizer state. + """ + t3 = tableau2**-1 * tableau1 + sim = stim.TableauSimulator() + sim.set_inverse_tableau(t3) + p = 1 + #note to future selves: stim uses little endian convention by default, and we typically use + #big endian. That doesn't make a difference in this case, but does elsewhere to be mindful to + #save on grief. + for q in range(len(t3)): + e = sim.peek_z(q) + if e == -1: + return 0 + if e == 0: + p *= 0.5 + sim.postselect_z(q, desired_value=False) + return p + +def bitstring_to_tableau(bitstring): + """ + Map a computational basis bit string into a corresponding Tableau which maps the all zero + state into that state. + + Parameters + ---------- + bitstring : str + String of 0's and 1's corresponding to the computational basis state to prepare the Tableau for. + + Returns + ------- + stim.Tableau + Tableau which maps the all zero string to this computational basis state + """ + pauli_string = stim.PauliString(''.join(['I' if bit=='0' else 'X' for bit in bitstring])) + #convert this to a stim.Tableau + pauli_tableau = pauli_string.to_tableau() + return pauli_tableau + + +#Modified from Gidney +#https://quantumcomputing.stackexchange.com/questions/34610/get-the-amplitude-of-a-computational-basis-in-stim +def amplitude_of_state(tableau, desired_state): + """ + Get the amplitude of a particular computational basis state for given + stabilizer state. + + Parameters + ---------- + tableau : stim.Tableau + Stim tableau corresponding to the stabilizer state we wish to extract + the amplitude from. + + desired_state : str + String of 0's and 1's corresponding to the computational basis state to extract the amplitude for. + """ + + sim = stim.TableauSimulator() + sim.set_inverse_tableau(tableau**-1) + n = sim.num_qubits + + #convert desired state into a list of bools + desired_state = [desired_state[i] == '1' for i in range(n)] + + # Determine the magnitude of the target state. + copy = sim.copy() + num_random = 0 + for q in range(n): + desired_bit = desired_state[q] + z = copy.peek_z(q) + forced_bit = z == -1 + if z == 0: + num_random += 1 + elif desired_bit != forced_bit: #forced bit is true if the state is |1>, so this is checking whether the bits match. + return 0 + copy.postselect_z(q, desired_value=desired_bit) + magnitude = 2**-(num_random / 2) + # For a phase reference, use the smallest state with non-zero amplitude. + copy = sim.copy() + ref_state = [False]*n + for q in range(n): + z = copy.peek_z(q) + forced_bit = z == -1 + ref_state[q] = forced_bit + copy.postselect_z(q, desired_value=forced_bit) + if ref_state == desired_state: + return magnitude + + # Postselect away states that aren't the desired or reference states. + # Also move the ref state to |00..00> and the desired state to |00..01>. + copy = sim.copy() + found_difference = False + for q in range(n): + desired_bit = desired_state[q] + ref_bit = ref_state[q] + if desired_bit == ref_bit: + copy.postselect_z(q, desired_value=ref_bit) + if desired_bit: + copy.x(q) + elif not found_difference: + found_difference = True + if q: + copy.swap(0, q) + if ref_bit: + copy.x(0) + else: + # Remove difference between target state and ref state at this bit. + copy.cnot(0, q) + copy.postselect_z(q, desired_value=ref_bit) + + # The phase difference between |00..00> and |00..01> is what we want. + # Since other states are gone, this is the bloch vector phase of qubit 0. + assert found_difference + s = str(copy.peek_bloch(0)) + + if s == "+X": + phase_factor = 1 + if s == "-X": + phase_factor = -1 + if s == "+Y": + phase_factor = 1j + if s == "-Y": + phase_factor = -1j + + return phase_factor*magnitude + +def pauli_phase_update(pauli, bitstring, dual=False): + """ + Takes as input a pauli and a bit string and computes the output bitstring + and the overall phase that bit string accumulates. + + Parameters + ---------- + pauli : str or stim.PauliString + Pauli to apply + + bitstring : str + String of 0's and 1's representing the bit string to apply the pauli to. + + dual : bool, optional (default False) + If True then then the pauli is acting to the left on a row vector. + Returns + ------- + Tuple whose first element is the phase accumulated, and whose second element + is a string corresponding to the updated bit string. + """ + + if isinstance(pauli, str): + pauli = stim.PauliString(pauli) + + bitstring = [False if bit=='0' else True for bit in bitstring] + if not dual: + #list of phase correction for each pauli (conditional on 0) + #Read [I, X, Y, Z] + pauli_phases_0 = [1, 1, 1j, 1] + + #list of the phase correction for each pauli (conditional on 1) + #Read [I, X, Y, Z] + pauli_phases_1 = [1, 1, -1j, -1] + else: + #list of phase correction for each pauli (conditional on 0) + #Read [I, X, Y, Z] + pauli_phases_0 = [1, 1, -1j, 1] + + #list of the phase correction for each pauli (conditional on 1) + #Read [I, X, Y, Z] + pauli_phases_1 = [1, 1, 1j, -1] + + #list of bools corresponding to whether each pauli flips the target bit + pauli_flips = [False, True, True, False] + + overall_phase = 1 + indices_to_flip = [] + for i, (elem, bit) in enumerate(zip(pauli, bitstring)): + if bit: + overall_phase*=pauli_phases_1[elem] + else: + overall_phase*=pauli_phases_0[elem] + if pauli_flips[elem]: + indices_to_flip.append(i) + #if the input pauli had any overall phase associated with it add that back + #in too. + overall_phase*=pauli.sign + #apply the flips to get the output bit string. + for idx in indices_to_flip: + bitstring[idx] = not bitstring[idx] + #turn this back into a string + output_bitstring = ''.join(['1' if bit else '0' for bit in bitstring]) + + return overall_phase, output_bitstring + +#TODO: This function needs a more evocative name +def phi(tableau, desired_bitstring, P, Q): + """ + This function computes a quantity whose value is used in expression for the sensitivity of probabilities to error generators. + + Parameters + ---------- + tableau : stim.Tableau + A stim Tableau corresponding to the input stabilizer state. + + desired_bitstring : str + A string of zeros and ones corresponding to the bit string being measured. + + P : str or stim.PauliString + The first pauli string index. + + Q : str or stim.PauliString + The second pauli string index. + + Returns + ------- + A complex number corresponding to the value of the phi function. + """ + + #start by getting the pauli string which maps the all-zeros string to the target bitstring. + initial_pauli_string = stim.PauliString(''.join(['I' if bit=='0' else 'X' for bit in desired_bitstring])) + #map P and Q to stim.PauliString if needed. + if isinstance(P, str): + P = stim.PauliString(P) + if isinstance(Q, str): + Q = stim.PauliString(Q) + + #combine this initial pauli string with the two input paulis + eff_P = initial_pauli_string*P + eff_Q = Q*initial_pauli_string + + #now get the bit strings which need their amplitudes extracted from the input stabilizer state and get + #the corresponding phase corrections. + all_zeros = '0'*len(eff_P) + phase1, bitstring1 = pauli_phase_update(eff_P, all_zeros, dual=True) + phase2, bitstring2 = pauli_phase_update(eff_Q, all_zeros) + + #get the amplitude of these two bitstrings in the stabilizer state. + amp1 = amplitude_of_state(tableau, bitstring1) + amp2 = amplitude_of_state(tableau, bitstring2).conjugate() #The second amplitude also needs a complex conjugate applied + + #now apply the phase corrections. + amp1*=phase1 + amp2*=phase2 + + #calculate phi. + phi = amp1*amp2 + + #phi should ultimately be either 0, +/-1 or +/-i, scaling might overflow + #so avoid scaling and just identify which of these it should be. For really + #tiny phi this may still have an issue... + if abs(phi)>1e-14: + if abs(phi.real) > 1e-14: + if phi.real > 0: + return complex(1) + else: + return complex(-1) + else: + if phi.imag > 0: + return 1j + else: + return -1j + else: + return complex(0) + +#helper function for numerically computing phi, primarily used for testing. +def phi_numerical(tableau, desired_bitstring, P, Q): + """ + This function computes a quantity whose value is used in expression for the sensitivity of probabilities to error generators. + (This version does this calculation numerically and is primarily intended for testing infrastructure.) + + Parameters + ---------- + tableau : stim.Tableau + A stim Tableau corresponding to the input stabilizer state. + + desired_bitstring : str + A string of zeros and ones corresponding to the bit string being measured. + + P : str or stim.PauliString + The first pauli string index. + Q : str or stim.PauliString + The second pauli string index. + + Returns + ------- + A complex number corresponding to the value of the phi function. + """ + + #start by getting the pauli string which maps the all-zeros string to the target bitstring. + initial_pauli_string = stim.PauliString(''.join(['I' if bit=='0' else 'X' for bit in desired_bitstring])).to_unitary_matrix(endian = 'big') + + + #map P and Q to stim.PauliString if needed. + if isinstance(P, str): + P = stim.PauliString(P) + if isinstance(Q, str): + Q = stim.PauliString(Q) + + stabilizer_state = tableau.to_state_vector(endian = 'big') + stabilizer_state.reshape((len(stabilizer_state),1)) + #combine this initial pauli string with the two input paulis + eff_P = initial_pauli_string@P.to_unitary_matrix(endian = 'big') + eff_Q = Q.to_unitary_matrix(endian = 'big')@initial_pauli_string + + #now get the bit strings which need their amplitudes extracted from the input stabilizer state and get + #the corresponding phase corrections. + #all_zeros = '0'*len(eff_P) + all_zeros = _np.zeros((2**len(desired_bitstring),1)) + all_zeros[0] = 1 + #calculate phi. + #The second amplitude also needs a complex conjugate applied + phi = (all_zeros.T@eff_P@stabilizer_state) * (stabilizer_state.conj().T@eff_Q@all_zeros) + + num_random = random_support(tableau) + scale = 2**(num_random) + + return phi*scale + +def alpha(errorgen, tableau, desired_bitstring): + """ + First-order error generator sensitivity function for probability. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator label for which to calculate sensitivity. + + tableau : stim.Tableau + Stim Tableau corresponding to the stabilizer state to calculate the sensitivity for. + + desired_bitstring : str + Bit string to calculate the sensitivity for. + """ + + errgen_type = errorgen.errorgen_type + basis_element_labels = errorgen.basis_element_labels + + if not isinstance(basis_element_labels[0], stim.PauliString): + basis_element_labels = tuple([stim.PauliString(lbl) for lbl in basis_element_labels]) + + identity_pauli = stim.PauliString('I'*len(basis_element_labels[0])) + + if errgen_type == 'H': + sensitivity = 2*phi(tableau, desired_bitstring, basis_element_labels[0], identity_pauli).imag + elif errgen_type == 'S': + sensitivity = (phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[0]) \ + - phi(tableau, desired_bitstring, identity_pauli, identity_pauli)).real + elif errgen_type == 'C': + first_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0], basis_element_labels[1]) + sensitivity = first_term.real + if basis_element_labels[0].commutes(basis_element_labels[1]): + second_term = 2*phi(tableau, desired_bitstring, basis_element_labels[0]*basis_element_labels[1], identity_pauli) + sensitivity -= second_term.real + else: #A + first_term = phi(tableau, desired_bitstring, basis_element_labels[1], basis_element_labels[0]) + if not basis_element_labels[0].commutes(basis_element_labels[1]): + second_term = phi(tableau, desired_bitstring, basis_element_labels[1]*basis_element_labels[0], identity_pauli) + sensitivity = 2*((first_term + second_term).imag) + else: + sensitivity = 2*first_term.imag + return sensitivity + +def alpha_numerical(errorgen, tableau, desired_bitstring): + """ + First-order error generator sensitivity function for probability. This implementation calculates + this quantity numerically, and as such is primarily intended for used as parting of testing + infrastructure. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator label for which to calculate sensitivity. + + tableau : stim.Tableau + Stim Tableau corresponding to the stabilizer state to calculate the sensitivity for. + + desired_bitstring : str + Bit string to calculate the sensitivity for. + """ + + #get the stabilizer state corresponding to the tableau. + stabilizer_state = tableau.to_state_vector(endian='big') + stabilizer_state_dmvec = state_to_dmvec(stabilizer_state) + stabilizer_state_dmvec.reshape((len(stabilizer_state_dmvec),1)) + #also get the superoperator (in the standard basis) corresponding to the elementary error generator + if isinstance(errorgen, _LSE): + local_eel = errorgen.to_local_eel() + elif isinstance(errorgen, _GEEL): + local_eel = _LEEL.cast(errorgen) + else: + local_eel = errorgen + + errgen_type = local_eel.errorgen_type + basis_element_labels = local_eel.basis_element_labels + basis_1q = _BuiltinBasis('PP', 4) + errorgen_superop = create_elementary_errorgen_nqudit(errgen_type, basis_element_labels, basis_1q, normalize=False, sparse=False, + tensorprod_basis=False) + + #also need a superbra for the desired bitstring. + desired_bitstring_vec = _np.zeros(2**len(desired_bitstring)) + desired_bitstring_vec[_bitstring_to_int(desired_bitstring)] = 1 + desired_bitstring_dmvec = state_to_dmvec(desired_bitstring_vec) + desired_bitstring_dmvec.reshape((1, len(desired_bitstring_dmvec))) + num_random = random_support(tableau) + scale = 2**(num_random) + + #compute the needed trace inner product. + alpha = _np.real_if_close(scale*(desired_bitstring_dmvec.conj().T@errorgen_superop@stabilizer_state_dmvec)) + + return alpha + +def alpha_pauli(errorgen, tableau, pauli): + """ + First-order error generator sensitivity function for pauli expectations. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator label for which to calculate sensitivity. + + tableau : stim.Tableau + Stim Tableau corresponding to the stabilizer state to calculate the sensitivity for. + + pauli : stim.PauliString + Pauli to calculate the sensitivity for. + """ + + sim = stim.TableauSimulator() + sim.set_inverse_tableau(tableau**-1) + + errgen_type = errorgen.errorgen_type + basis_element_labels = errorgen.basis_element_labels + + if not isinstance(basis_element_labels[0], stim.PauliString): + basis_element_labels = tuple([stim.PauliString(lbl) for lbl in basis_element_labels]) + + identity_pauli = stim.PauliString('I'*len(basis_element_labels[0])) + + if errgen_type == 'H': + pauli_bel_0_comm = com(pauli, basis_element_labels[0]) + if pauli_bel_0_comm is not None: + sign = -1j*pauli_bel_0_comm[0] + expectation = sim.peek_observable_expectation(pauli_bel_0_comm[1]) + return _np.real_if_close(sign*expectation) + else: + return 0 + elif errgen_type == 'S': + if pauli.commutes(basis_element_labels[0]): + return 0 + else: + expectation = sim.peek_observable_expectation(pauli) + return _np.real_if_close(-2*expectation) + elif errgen_type == 'C': + A = basis_element_labels[0] + B = basis_element_labels[1] + com_AP = A.commutes(pauli) + com_BP = B.commutes(pauli) #TODO: can skip computing this in some cases for minor performance boost. + if A.commutes(B): + if com_AP: + return 0 + else: + if com_BP: + return 0 + else: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return _np.real_if_close(-4*expectation) + else: #{A,B} = 0 + if com_AP: + if com_BP: + return 0 + else: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return _np.real_if_close(-2*expectation) + else: + if com_BP: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return _np.real_if_close(2*expectation) + else: + return 0 + else: #A + A = basis_element_labels[0] + B = basis_element_labels[1] + com_AP = A.commutes(pauli) + com_BP = B.commutes(pauli) #TODO: can skip computing this in some cases for minor performance boost. + if A.commutes(B): + if com_AP: + if com_BP: + return 0 + else: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return _np.real_if_close(1j*2*expectation) + else: + if com_BP: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return _np.real_if_close(-1j*2*expectation) + else: + return 0 + else: #{A,B} = 0 + if com_AP: + return 0 + else: + if com_BP: + return 0 + else: + ABP = pauli_product(A*B, pauli) + expectation = ABP[0]*sim.peek_observable_expectation(ABP[1]) + return _np.real_if_close(1j*4*expectation) + +def alpha_pauli_numerical(errorgen, tableau, pauli): + """ + First-order error generator sensitivity function for pauli expectatons. This implementation calculates + this quantity numerically, and as such is primarily intended for used as parting of testing + infrastructure. + + Parameters + ---------- + errorgen : `ElementaryErrorgenLabel` + Error generator label for which to calculate sensitivity. + + tableau : stim.Tableau + Stim Tableau corresponding to the stabilizer state to calculate the sensitivity for. + + pauli : stim.PauliString + Pauli to calculate the sensitivity for. + """ + + #get the stabilizer state corresponding to the tableau. + stabilizer_state = tableau.to_state_vector(endian='big') + stabilizer_state_dmvec = state_to_dmvec(stabilizer_state) + stabilizer_state_dmvec.reshape((len(stabilizer_state_dmvec),1)) + #also get the superoperator (in the standard basis) corresponding to the elementary error generator + if isinstance(errorgen, _LSE): + local_eel = errorgen.to_local_eel() + elif isinstance(errorgen, _GEEL): + local_eel = _LEEL.cast(errorgen) + else: + local_eel = errorgen + + errgen_type = local_eel.errorgen_type + basis_element_labels = local_eel.basis_element_labels + basis_1q = _BuiltinBasis('PP', 4) + errorgen_superop = create_elementary_errorgen_nqudit(errgen_type, basis_element_labels, basis_1q, normalize=False, sparse=False, + tensorprod_basis=False) + + #finally need the superoperator for the selected pauli. + pauli_unitary = pauli.to_unitary_matrix(endian='big') + #flatten this row-wise + pauli_vec = _np.ravel(pauli_unitary) + pauli_vec.reshape((len(pauli_vec),1)) + + #compute the needed trace inner product. + alpha = _np.real_if_close(pauli_vec.conj().T@errorgen_superop@stabilizer_state_dmvec).item() + + return alpha + +def _bitstring_to_int(bitstring) -> int: + if isinstance(bitstring, str): + # If the input is a string, convert it directly + return int(bitstring, 2) + elif isinstance(bitstring, tuple): + # If the input is a tuple, join the elements to form a string + return int(''.join(bitstring), 2) + else: + raise ValueError("Input must be either a string or a tuple of '0's and '1's") + +def stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, order = 1, truncation_threshold = 1e-14): + """ + Compute the kth-order correction to the probability of the specified bit string. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + tableau : stim.Tableau + Stim tableau corresponding to a particular stabilizer state being measured. + + desired_bitstring : str + String of 0's and 1's corresponding to the output bitstring being measured. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding rates + are below this value. + + Returns + ------- + correction : float + float corresponding to the correction to the output probability for the + desired bitstring induced by the error generator (to specified order). + """ + + num_random = random_support(tableau) + scale = 1/2**(num_random) #TODO: This might overflow + + #do the first order correction separately since it doesn't require composition logic: + #now get the sum over the alphas and the error generator rate products needed. + alpha_errgen_prods = _np.zeros(len(errorgen_dict)) + + + for i, (lbl, rate) in enumerate(errorgen_dict.items()): + if abs(rate) > truncation_threshold: + alpha_errgen_prods[i] = alpha(lbl, tableau, desired_bitstring)*rate + correction = scale*_np.sum(alpha_errgen_prods) + if order > 1: + #The order of the approximation determines the combinations of error generators + #which need to be composed. (given by cartesian products of labels in errorgen_dict). + labels_by_order = [list(product(errorgen_dict.keys(), repeat = i+1)) for i in range(1,order)] + #Get a similar structure for the corresponding rates + rates_by_order = [list(product(errorgen_dict.values(), repeat = i+1)) for i in range(1,order)] + for current_order, (current_order_labels, current_order_rates) in enumerate(zip(labels_by_order, rates_by_order), start=2): + current_order_scale = 1/factorial(current_order) + composition_results = [] + for label_tup, rate_tup in zip(current_order_labels, current_order_rates): + composition_results.extend(iterative_error_generator_composition(label_tup, rate_tup)) + #aggregate together any overlapping terms in composition_results + composition_results_dict = dict() + for lbl, rate in composition_results: + if composition_results_dict.get(lbl,None) is None: + composition_results_dict[lbl] = rate + else: + composition_results_dict[lbl] += rate + alpha_errgen_prods = _np.zeros(len(composition_results_dict)) + for i, (lbl, rate) in enumerate(composition_results_dict.items()): + if current_order_scale*abs(rate) > truncation_threshold: + sensitivity = alpha(lbl, tableau, desired_bitstring) + alpha_errgen_prods[i] = _np.real_if_close(sensitivity*rate) + correction += current_order_scale*scale*_np.sum(alpha_errgen_prods) + + return correction + +#TODO: The implementations for the pauli expectation value correction and probability correction +#are basically identical modulo some additional scale factors and the alpha function used. Should be able to combine +#the implementations into one function. +def stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order = 1, truncation_threshold = 1e-14): + """ + Compute the kth-order correction to the expectation value of the specified pauli. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + tableau : stim.Tableau + Stim tableau corresponding to a particular stabilizer state being measured. + + pauli : stim.PauliString + Pauli operator to compute expectation value correction for. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding rates + are below this value. + + Returns + ------- + correction : float + float corresponding to the correction to the expectation value for the + selected pauli operator induced by the error generator (to specified order). + """ + + #do the first order correction separately since it doesn't require composition logic: + #now get the sum over the alphas and the error generator rate products needed. + alpha_errgen_prods = _np.zeros(len(errorgen_dict)) + + for i, (lbl, rate) in enumerate(errorgen_dict.items()): + if abs(rate) > truncation_threshold: + alpha_errgen_prods[i] = alpha_pauli(lbl, tableau, pauli)*rate + correction = _np.sum(alpha_errgen_prods) + if order > 1: + #The order of the approximation determines the combinations of error generators + #which need to be composed. (given by cartesian products of labels in errorgen_dict). + labels_by_order = [list(product(errorgen_dict.keys(), repeat = i+1)) for i in range(1,order)] + #Get a similar structure for the corresponding rates + rates_by_order = [list(product(errorgen_dict.values(), repeat = i+1)) for i in range(1,order)] + for current_order, (current_order_labels, current_order_rates) in enumerate(zip(labels_by_order, rates_by_order), start=2): + current_order_scale = 1/factorial(current_order) + composition_results = [] + for label_tup, rate_tup in zip(current_order_labels, current_order_rates): + composition_results.extend(iterative_error_generator_composition(label_tup, rate_tup)) + #aggregate together any overlapping terms in composition_results + composition_results_dict = dict() + for lbl, rate in composition_results: + if composition_results_dict.get(lbl,None) is None: + composition_results_dict[lbl] = rate + else: + composition_results_dict[lbl] += rate + alpha_errgen_prods = _np.zeros(len(composition_results_dict)) + for i, (lbl, rate) in enumerate(composition_results_dict.items()): + if current_order_scale*abs(rate) > truncation_threshold: + sensitivity = alpha_pauli(lbl, tableau, pauli) + alpha_errgen_prods[i] = _np.real_if_close(sensitivity*rate) + correction += current_order_scale*_np.sum(alpha_errgen_prods) + + return correction + +def stabilizer_pauli_expectation_correction_numerical(errorgen_dict, errorgen_propagator, circuit, pauli, order = 1): + """ + Compute the kth-order correction to the expectation value of the specified pauli. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + errorgen_propagator : `ErrorGeneratorPropagator` + Error generator propagator used for constructing dense representation of the error generator dictionary. + + circuit : `Circuit` + Circuit the expectation value is being measured against. + + pauli : stim.PauliString + Pauli operator to compute expectation value correction for. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + Returns + ------- + correction : float + float corresponding to the correction to the expectation value for the + selected pauli operator induced by the error generator (to specified order). + """ + tableau = circuit.convert_to_stim_tableau() + + stabilizer_state = tableau.to_state_vector(endian='big') + stabilizer_state_dmvec = state_to_dmvec(stabilizer_state) + stabilizer_state_dmvec.reshape((len(stabilizer_state_dmvec),1)) + + #also get the superoperator (in the standard basis) corresponding to the taylor series + #expansion of the specified error generator dictionary. + taylor_expanded_errorgen = error_generator_taylor_expansion_numerical(errorgen_dict, errorgen_propagator, order=order, mx_basis='std') + + #finally need the superoperator for the selected pauli. + pauli_unitary = pauli.to_unitary_matrix(endian='big') + #flatten this row-wise + pauli_vec = _np.ravel(pauli_unitary) + pauli_vec.reshape((len(pauli_vec),1)) + + expectation_correction = _np.linalg.multi_dot([pauli_vec.conj().T, taylor_expanded_errorgen,stabilizer_state_dmvec]).item() + return expectation_correction + +def stabilizer_probability(tableau, desired_bitstring): + """ + Calculate the output probability for the specifed output bitstring. + + TODO: Should be able to do this more efficiently for many bitstrings + by looking at the structure of the random support. + + Parameters + ---------- + tableau : stim.Tableau + Stim tableau for the stabilizer state being measured. + + desired_bitstring : str + String of 0's and 1's corresponding to the output bitstring being measured. + + Returns + ------- + p : float + probability of desired bitstring. + """ + #compute what Gidney calls the tableau fidelity (which in this case gives the probability). + return tableau_fidelity(tableau, bitstring_to_tableau(desired_bitstring)) + +def stabilizer_pauli_expectation(tableau, pauli): + """ + Calculate the output probability for the specifed output bitstring. + + Parameters + ---------- + tableau : stim.Tableau + Stim tableau for the stabilizer state being measured. + + pauli : stim.PauliString + Pauli operator to compute expectation value for. + + Returns + ------- + expected_value : float + Expectation value of specified pauli + """ + if pauli.sign != 1: + pauli_sign = pauli.sign + unsigned_pauli = pauli/pauli_sign + else: + pauli_sign = 1 + unsigned_pauli = pauli + + sim = stim.TableauSimulator() + sim.set_inverse_tableau(tableau**-1) + expectation = pauli_sign*sim.peek_observable_expectation(unsigned_pauli) + return expectation + +def approximate_stabilizer_probability(errorgen_dict, circuit, desired_bitstring, order=1, truncation_threshold=1e-14): + """ + Calculate the approximate probability of a desired bit string using an nth-order taylor series approximation. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `ElementaryErrorgenLabel` and whose values are corresponding + rates. + + circuit : `Circuit` or `stim.Tableau` + A pygsti `Circuit` or a stim.Tableau to compute the output probability for. In either + case this should be a Clifford circuit and convertable to a stim.Tableau. + + desired_bitstring : str + String of 0's and 1's corresponding to the output bitstring being measured. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding error generator rates + are below this value. (Used internally in computation of probability corrections) + + Returns + ------- + p : float + Approximate output probability for desired bitstring. + """ + + if isinstance(circuit, _Circuit): + tableau = circuit.convert_to_stim_tableau() + elif isinstance(circuit, stim.Tableau): + tableau = circuit + else: + raise ValueError('`circuit` should either be a pygsti `Circuit` or a stim.Tableau.') + + #recast keys to local stim ones if needed. + first_lbl = next(iter(errorgen_dict)) + if isinstance(first_lbl, (_GEEL, _LEEL)): + errorgen_dict = {_LSE.cast(lbl):val for lbl,val in errorgen_dict.items()} + + ideal_prob = stabilizer_probability(tableau, desired_bitstring) + correction = stabilizer_probability_correction(errorgen_dict, tableau, desired_bitstring, order, truncation_threshold) + return ideal_prob + correction + +def approximate_stabilizer_pauli_expectation(errorgen_dict, circuit, pauli, order=1, truncation_threshold=1e-14): + """ + Calculate the approximate probability of a desired bit string using a first-order approximation. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `ElementaryErrorgenLabel` and whose values are corresponding + rates. + + circuit : `Circuit` or `stim.Tableau` + A pygsti `Circuit` or a stim.Tableau to compute the output probability for. In either + case this should be a Clifford circuit and convertable to a stim.Tableau. + + pauli : str or stim.PauliString + Pauli operator to compute expectation value for. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding error generator rates + are below this value. (Used internally in computation of probability corrections) + + Returns + ------- + expectation_value : float + Approximate expectation value for desired pauli. + """ + + if isinstance(circuit, _Circuit): + tableau = circuit.convert_to_stim_tableau() + elif isinstance(circuit, stim.Tableau): + tableau = circuit + else: + raise ValueError('`circuit` should either be a pygsti `Circuit` or a stim.Tableau.') + + if isinstance(pauli, str): + pauli = stim.PauliString(pauli) + + #recast keys to local stim ones if needed. + first_lbl = next(iter(errorgen_dict)) + if isinstance(first_lbl, (_GEEL, _LEEL)): + errorgen_dict = {_LSE.cast(lbl):val for lbl,val in errorgen_dict.items()} + + ideal_expectation = stabilizer_pauli_expectation(tableau, pauli) + correction = stabilizer_pauli_expectation_correction(errorgen_dict, tableau, pauli, order, truncation_threshold) + return ideal_expectation + correction + +def approximate_stabilizer_pauli_expectation_numerical(errorgen_dict, errorgen_propagator, circuit, pauli, order=1): + """ + Calculate the approximate probability of a desired bit string using a first-order approximation. + This function performs the corrections numerically and so it primarily intended for testing + infrastructure. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `ElementaryErrorgenLabel` and whose values are corresponding + rates. + + errorgen_propagator : `ErrorGeneratorPropagator` + Error generator propagator used for constructing dense representation of the error generator dictionary. + + circuit : `Circuit` + A pygsti `Circuit` or a stim.Tableau to compute the output pauli expectation value for. + + pauli : stim.PauliString + Pauli operator to compute expectation value for. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding error generator rates + are below this value. (Used internally in computation of probability corrections) + + Returns + ------- + expectation_value : float + Approximate expectation value for desired pauli. + """ + + tableau = circuit.convert_to_stim_tableau() + + #recast keys to local stim ones if needed. + first_lbl = next(iter(errorgen_dict)) + if isinstance(first_lbl, (_GEEL, _LEEL)): + errorgen_dict = {_LSE.cast(lbl):val for lbl,val in errorgen_dict.items()} + + ideal_expectation = stabilizer_pauli_expectation(tableau, pauli) + correction = stabilizer_pauli_expectation_correction_numerical(errorgen_dict, errorgen_propagator, circuit, pauli, order) + return ideal_expectation + correction + +def approximate_stabilizer_probabilities(errorgen_dict, circuit, order=1, truncation_threshold=1e-14): + """ + Calculate the approximate probability distribution over all bitstrings using a first-order approximation. + Note the size of this distribtion scales exponentially in the qubit count, so this is very inefficient for + any more than a few qubits. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `ElementaryErrorgenLabel` and whose values are corresponding + rates. + + circuit : `Circuit` or `stim.Tableau` + A pygsti `Circuit` or a stim.Tableau to compute the output probability for. In either + case this should be a Clifford circuit and convertable to a stim.Tableau. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding error generator rates + are below this value. (Used internally in computation of probability corrections) + + Returns + ------- + p : float + Approximate output probability for desired bitstring. + """ + if isinstance(circuit, _Circuit): + tableau = circuit.convert_to_stim_tableau() + elif isinstance(circuit, stim.Tableau): + tableau = circuit + else: + raise ValueError('`circuit` should either be a pygsti `Circuit` or a stim.Tableau.') + + #get set of all bit strings + num_qubits = len(tableau) + bitstrings = ["".join(bitstring) for bitstring in product(['0','1'], repeat=num_qubits)] + + #initialize an array for the probabilities + probs = _np.zeros(2**num_qubits) + + for i, bitstring in enumerate(bitstrings): + probs[i] = approximate_stabilizer_probability(errorgen_dict, tableau, bitstring, order, truncation_threshold) + + return probs + +def error_generator_taylor_expansion(errorgen_dict, order = 1, truncation_threshold = 1e-14): + """ + Compute the nth-order taylor expansion for the exponentiation of the error generator described by the input + error generator dictionary. (Excluding the zeroth-order identity). + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + truncation_threshold : float, optional (default 1e-14) + Optional threshold used to truncate corrections whose corresponding rates + are below this value. + + Returns + ------- + list of dictionaries + List of dictionaries whose keys are error generator labels and whose values are rates (including + whatever scaling comes from order of taylor expansion). Each list corresponds to an order + of the taylor expansion. + """ + + + taylor_order_terms = [dict() for _ in range(order)] + + for lbl, rate in errorgen_dict.items(): + if abs(rate) > truncation_threshold: + taylor_order_terms[0][lbl] = rate + + if order > 1: + #The order of the approximation determines the combinations of error generators + #which need to be composed. (given by cartesian products of labels in errorgen_dict). + labels_by_order = [list(product(errorgen_dict.keys(), repeat = i+1)) for i in range(1,order)] + #Get a similar structure for the corresponding rates + rates_by_order = [list(product(errorgen_dict.values(), repeat = i+1)) for i in range(1,order)] + for current_order, (current_order_labels, current_order_rates) in enumerate(zip(labels_by_order, rates_by_order), start=2): + order_scale = 1/factorial(current_order) + composition_results = [] + for label_tup, rate_tup in zip(current_order_labels, current_order_rates): + composition_results.extend(iterative_error_generator_composition(label_tup, rate_tup)) + #aggregate together any overlapping terms in composition_results + composition_results_dict = dict() + for lbl, rate in composition_results: + if composition_results_dict.get(lbl,None) is None: + composition_results_dict[lbl] = rate + else: + composition_results_dict[lbl] += rate + for lbl, rate in composition_results_dict.items(): + if order_scale*abs(rate) > truncation_threshold: + taylor_order_terms[current_order-1][lbl] = order_scale*rate + + return taylor_order_terms + +def error_generator_taylor_expansion_numerical(errorgen_dict, errorgen_propagator, order = 1, mx_basis = 'pp'): + """ + Compute the nth-order taylor expansion for the exponentiation of the error generator described by the input + error generator dictionary. (Excluding the zeroth-order identity). This function computes a dense representation + of this taylor expansion as a numpy array and is primarily intended for testing infrastructure. + + Parameters + ---------- + errorgen_dict : dict + Dictionary whose keys are `LocalStimErrorgenLabel` and whose values are corresponding + rates. + + errorgen_propagator : `ErrorGeneratorPropagator` + Error generator propagator used for constructing dense representation of the error generator dictionary. + + order : int, optional (default 1) + Order of the correction (i.e. order of the taylor series expansion for + the exponentiated error generator) to compute. + + mx_basis : `Basis` or str, optional (default 'pp') + Basis in which to return the matrix. + + Returns + ------- + numpy.ndarray + A dense numpy array corresponding to the nth order taylor expansion of the specified error generator. + """ + + errorgen_mat = errorgen_propagator.errorgen_layer_dict_to_errorgen(errorgen_dict, mx_basis) + taylor_expansion = _np.zeros(errorgen_mat.shape, dtype=_np.complex128) + for i in range(1, order+1): + taylor_expansion += 1/factorial(i)*_np.linalg.matrix_power(errorgen_mat, i) + + return taylor_expansion \ No newline at end of file diff --git a/pygsti/tools/internalgates.py b/pygsti/tools/internalgates.py index 2d5356297..d2f276fd8 100644 --- a/pygsti/tools/internalgates.py +++ b/pygsti/tools/internalgates.py @@ -370,7 +370,46 @@ def unitary_to_standard_gatename(unitary, up_to_phase = False, return_phase = Fa return std_name return None +def standard_gatenames_stim_conversions(): + """ + A dictionary converting the gates with standard names to stim tableus for these gates. Currently is only capable of converting + clifford gates, no capability for T gates + + TODO: Add all standard clifford gate names in + Returns + ------- + A dict mapping string to tableu + """ + try: + import stim + except ImportError: + raise ImportError("Stim is required for this operation, and it does not appear to be installed.") + gate_dict = { + 'Gi' : stim.Tableau.from_named_gate('I'), + 'Gxpi' : stim.Tableau.from_named_gate('X'), + 'Gypi' : stim.Tableau.from_named_gate('Y'), + 'Gzpi' : stim.Tableau.from_named_gate('Z'), + 'Gxpi2' : stim.Tableau.from_named_gate('SQRT_X'), + 'Gypi2' : stim.Tableau.from_named_gate('SQRT_Y'), + 'Gzpi2' : stim.Tableau.from_named_gate('SQRT_Z'), + 'Gxmpi2': stim.Tableau.from_named_gate('SQRT_X_DAG'), + 'Gympi2': stim.Tableau.from_named_gate('SQRT_Y_DAG'), + 'Gzmpi2': stim.Tableau.from_named_gate('SQRT_Z_DAG'), + 'Gs' : stim.Tableau.from_named_gate('S'), + 'Gsm' : stim.Tableau.from_named_gate('S_DAG'), + 'Gh' : stim.Tableau.from_named_gate('H'), + 'Gxx' : stim.Tableau.from_named_gate('SQRT_XX'), + 'Gzz' : stim.Tableau.from_named_gate('SQRT_ZZ'), + 'Gcnot' : stim.Tableau.from_named_gate('CNOT'), + 'Gswap' : stim.Tableau.from_named_gate('SWAP'), + 'Gcphase' : stim.Tableau.from_named_gate('CZ') + } + ecr_unitary = _np.array([[0, 1, 0., 1j], [1., 0, -1j, 0.], + [0., 1j, 0, 1], [-1j, 0., 1, 0]], complex)/_np.sqrt(2) + gate_dict['Gecres'] = stim.Tableau.from_unitary_matrix(ecr_unitary, endian='big') + + return gate_dict def standard_gatenames_cirq_conversions(): """ diff --git a/pygsti/tools/jamiolkowski.py b/pygsti/tools/jamiolkowski.py index 8c659f550..abe70ca08 100644 --- a/pygsti/tools/jamiolkowski.py +++ b/pygsti/tools/jamiolkowski.py @@ -327,7 +327,7 @@ def sums_of_negative_choi_eigenvalues(model): """ ret = [] for (_, gate) in model.operations.items(): - J = fast_jamiolkowski_iso_std(gate, model.basis) # Choi mx basis doesn't matter + J = fast_jamiolkowski_iso_std(gate.to_dense(on_space='HilbertSchmidt'), model.basis) # Choi mx basis doesn't matter evals = _np.linalg.eigvals(J) # could use eigvalsh, but wary of this since eigh can be wrong... sumOfNeg = 0.0 for ev in evals: diff --git a/pygsti/tools/lindbladtools.py b/pygsti/tools/lindbladtools.py index f28a838b9..6a8a443d6 100644 --- a/pygsti/tools/lindbladtools.py +++ b/pygsti/tools/lindbladtools.py @@ -14,7 +14,11 @@ import scipy.sparse as _sps from pygsti.tools.basistools import basis_matrices - +import pygsti.baseobjs as _bo +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel as _GlobalElementaryErrorgenLabel, \ + LocalElementaryErrorgenLabel as _LocalElementaryErrorgenLabel +from pygsti.baseobjs.statespace import QubitSpace as _QubitSpace +import warnings as _warnings def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_factor='auto'): """ @@ -55,6 +59,14 @@ def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_ sparse : bool, optional Whether to construct a sparse or dense (the default) matrix. + normalization_factor : str or float, optional (default 'auto') + String or float specifying the normalization factor to apply. If + a string the options are 'auto' and 'auto_return', which both use + the corresponding (primal) elementary error generator to calculate + this automatically and only differ in whether they return this + normalization factor. If a float, the reciprocal of the input value + is used directly. + Returns ------- ndarray or Scipy CSR matrix @@ -72,35 +84,158 @@ def create_elementary_errorgen_dual(typ, p, q=None, sparse=False, normalization_ assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \ "Wrong number of basis elements provided for %s-type elementary errorgen!" % typ - # Loop through the standard basis as all possible input density matrices - for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx - # Only difference between H/S/C/A is how they transform input density matrices - if typ == 'H': - rho1 = -1j * (p @ rho0 - rho0 @ p) # -1j / (2 * d2) * - elif typ == 'S': - rho1 = (p @ rho0 @ pdag) # 1 / d2 * - elif typ == 'C': - rho1 = (p @ rho0 @ qdag + q @ rho0 @ pdag) # 1 / (2 * d2) * - elif typ == 'A': - rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag) # 1j / (2 * d2) - elem_errgen[:, i] = rho1.ravel() - # ^ That line used to branch depending on the value of "sparse", but it - # turns out that both codepaths produced the same result. + #if p or q is a sparse matrix fall back to original implementation + if not isinstance(p, _np.ndarray) or (q is not None and not isinstance(q, _np.ndarray)): + for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = -1j * (p @ rho0 - rho0 @ p) # -1j / (2 * d2) * + elif typ == 'S': + rho1 = (p @ rho0 @ pdag) # 1 / d2 * + elif typ == 'C': + rho1 = (p @ rho0 @ qdag + q @ rho0 @ pdag) # 1 / (2 * d2) * + elif typ == 'A': + rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag) # 1j / (2 * d2) + elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): # rho0 == input density mx + for j in range(d): + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elif typ == 'S': + rho1 = p[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + elif typ == 'C': + rho1 = p[:,i].reshape((d,1))@qdag[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + elif typ == 'A': + rho1 = 1j*(p[:,i].reshape((d,1))@ qdag[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d))) + + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() return_normalization = bool(normalization_factor == 'auto_return') if normalization_factor in ('auto', 'auto_return'): primal = create_elementary_errorgen(typ, p, q, sparse) if sparse: - normalization_factor = _np.vdot(elem_errgen.toarray(), primal.toarray()) + normalization_factor = _np.vdot(elem_errgen.toarray().flatten(), primal.toarray().flatten()) else: - normalization_factor = _np.vdot(elem_errgen, primal) + normalization_factor = _np.vdot(elem_errgen.flatten(), primal.flatten()) elem_errgen *= _np.real_if_close(1 / normalization_factor).item() # item() -> scalar + if sparse: elem_errgen = elem_errgen.tocsr() + return (elem_errgen, normalization_factor) if return_normalization else elem_errgen + +#TODO: Should be able to leverage the structure of the paulis as generalized permutation +#matrices to avoid explicitly doing outer products +def create_elementary_errorgen_dual_pauli(typ, p, q=None, sparse=False): + """ + Construct a "dual" elementary error generator matrix in the "standard" (matrix-unit) basis. + Specialized to p and q being elements of the (unnormalized) pauli basis. + + The elementary error generator that is dual to the one computed by calling + :func:`create_elementary_errorgen` with the same argument. This dual element + can be used to find the coefficient of the original, or "primal" elementary generator. + For example, if `A = sum(c_i * E_i)`, where `E_i` are the elementary error generators given + by :func:`create_elementary_errorgen`), then `c_i = dot(D_i.conj(), A)` where `D_i` + is the dual to `E_i`. + + There are four different types of dual elementary error generators: 'H' (Hamiltonian), + 'S' (stochastic), 'C' (correlation), and 'A' (active). See arxiv:2103.01928. + Each type transforms an input density matrix differently. The action of an elementary + error generator `L` on an input density matrix `rho` is given by: + + Hamiltonian: `L(rho) = -1j/(2d^2) * [ p, rho ]` + Stochastic: `L(rho) = 1/(d^2) p * rho * p` + Correlation: `L(rho) = 1/(2d^2) ( p * rho * q + q * rho * p)` + Active: `L(rho) = 1j/(2d^2) ( p * rho * q - q * rho * p)` + + where `d` is the dimension of the Hilbert space, e.g. 2 for a single qubit. Square + brackets denotes the commutator and curly brackets the anticommutator. + `L` is returned as a superoperator matrix that acts on vectorized density matrices. + + Parameters + ---------- + typ : {'H','S','C','A'} + The type of dual error generator to construct. + + p : numpy.ndarray + d-dimensional basis matrix. + + q : numpy.ndarray, optional + d-dimensional basis matrix; must be non-None if and only if `typ` is `'C'` or `'A'`. + + sparse : bool, optional + Whether to construct a sparse or dense (the default) matrix. + + Returns + ------- + ndarray or Scipy CSR matrix + """ + d = p.shape[0]; d2 = d**2 if sparse: - elem_errgen = elem_errgen.tocsr() - return (elem_errgen, normalization_factor) if return_normalization else elem_errgen + elem_errgen = _sps.lil_matrix((d2, d2), dtype=p.dtype) + else: + elem_errgen = _np.empty((d2, d2), dtype=p.dtype) + + assert(typ in ('H', 'S', 'C', 'A')), "`typ` must be one of 'H', 'S', 'C', or 'A'" + assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \ + "Wrong number of basis elements provided for %s-type elementary errorgen!" % typ + + #if p or q is a sparse matrix fall back to original implementation + if not isinstance(p, _np.ndarray) or (q is not None and not isinstance(q, _np.ndarray)): + for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = -1j * (p @ rho0 - rho0 @ p) # -1j / (2 * d2) * + elif typ == 'S': + rho1 = (p @ rho0 @ p) # 1 / d2 * + elif typ == 'C': + rho1 = (p @ rho0 @ q + q @ rho0 @ p) # 1 / (2 * d2) * + elif typ == 'A': + rho1 = 1j * (p @ rho0 @ q - q @ rho0 @ p) # 1j / (2 * d2) + elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + if typ == 'H': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'S': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + elif typ == 'C': + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@q[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = 1j*(p[:,i].reshape((d,1))@ q[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@p[j,:].reshape((1,d))) + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + + if typ in 'HCA': + normalization_factor = 1/(2*d2) + else: + normalization_factor = 1/d2 + + elem_errgen *= normalization_factor + if sparse: elem_errgen = elem_errgen.tocsr() + return elem_errgen +#TODO: The construction can be made a bit more efficient if we know we will be constructing multiple +#error generators with overlapping indices by reusing intermediate results. def create_elementary_errorgen(typ, p, q=None, sparse=False): """ Construct an elementary error generator as a matrix in the "standard" (matrix-unit) basis. @@ -154,26 +289,157 @@ def create_elementary_errorgen(typ, p, q=None, sparse=False): pq_plus_qp = pdag @ q + qdag @ p pq_minus_qp = pdag @ q - qdag @ p - # Loop through the standard basis as all possible input density matrices - for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx - # Only difference between H/S/C/A is how they transform input density matrices + #if p or q is a sparse matrix fall back to original implementation + if not isinstance(p, _np.ndarray) or (q is not None and not isinstance(q, _np.ndarray)): + # Loop through the standard basis as all possible input density matrices + for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = -1j * (p @ rho0 - rho0 @ p) # Add "/2" to have PP ham gens match previous versions of pyGSTi + elif typ == 'S': + pdag_p = (pdag @ p) + rho1 = p @ rho0 @ pdag - 0.5 * (pdag_p @ rho0 + rho0 @ pdag_p) + elif typ == 'C': + rho1 = p @ rho0 @ qdag + q @ rho0 @ pdag - 0.5 * (pq_plus_qp @ rho0 + rho0 @ pq_plus_qp) + elif typ == 'A': + rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag + 0.5 * (pq_minus_qp @ rho0 + rho0 @ pq_minus_qp)) + elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elif typ == 'S': + pdag_p = (pdag @ p) + rho1 = p[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + rho1[:, j] += -.5*pdag_p[:, i] + rho1[i, :] += -.5*pdag_p[j, :] + elif typ == 'C': + rho1 = p[:,i].reshape((d,1))@qdag[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d)) + rho1[:, j] += -.5*pq_plus_qp[:, i] + rho1[i, :] += -.5*pq_plus_qp[j, :] + elif typ == 'A': + rho1 = 1j*(p[:,i].reshape((d,1))@ qdag[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@pdag[j,:].reshape((1,d))) + rho1[:, j] += 1j*.5*pq_minus_qp[:, i] + rho1[i, :] += 1j*.5*pq_minus_qp[j, :] + + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + + if sparse: elem_errgen = elem_errgen.tocsr() + + return elem_errgen + +#TODO: Should be able to leverage the structure of the paulis as generalized permutation +#matrices to avoid explicitly doing outer products +def create_elementary_errorgen_pauli(typ, p, q=None, sparse=False): + """ + Construct an elementary error generator as a matrix in the "standard" (matrix-unit) basis. + Specialized to the case where p and q are elements of the (unnormalized) pauli basis. + + There are four different types of elementary error generators: 'H' (Hamiltonian), + 'S' (stochastic), 'C' (correlation), and 'A' (active). See arxiv:2103.01928. + Each type transforms an input density matrix differently. The action of an elementary + error generator `L` on an input density matrix `rho` is given by: + + Hamiltonian: `L(rho) = -1j * [ p, rho ]` + Stochastic: `L(rho) = p * rho * p - rho` + Correlation: `L(rho) = p * rho * q + q * rho * p - 0.5 {{p,q}, rho}` + Active: `L(rho) = 1j( p * rho * q - q * rho * p + 0.5 {[p,q], rho} )` + + Square brackets denotes the commutator and curly brackets the anticommutator. + `L` is returned as a superoperator matrix that acts on vectorized density matrices. + + Parameters + ---------- + typ : {'H','S','C','A'} + The type of error generator to construct. + + p : numpy.ndarray + d-dimensional basis matrix. + + q : numpy.ndarray, optional + d-dimensional basis matrix; must be non-None if and only if `typ` is `'C'` or `'A'`. + + sparse : bool, optional + Whether to construct a sparse or dense (the default) matrix. + + Returns + ------- + ndarray or Scipy CSR matrix + """ + d = p.shape[0] + d2 = d**2 + if sparse: + elem_errgen = _sps.lil_matrix((d2, d2), dtype=p.dtype) + else: + elem_errgen = _np.empty((d2, d2), dtype=p.dtype) + + assert(typ in ('H', 'S', 'C', 'A')), "`typ` must be one of 'H', 'S', 'C', or 'A'" + assert((typ in 'HS' and q is None) or (typ in 'CA' and q is not None)), \ + "Wrong number of basis elements provided for %s-type elementary errorgen!" % typ + + #should be able to get away with just doing one product here. + if typ in 'CA': + pq = p@q + qp = q@p + pq_plus_qp = pq + qp + pq_minus_qp = pq - qp + + #if p or q is a sparse matrix fall back to original implementation + if not isinstance(p, _np.ndarray) or (q is not None and not isinstance(q, _np.ndarray)): + # Loop through the standard basis as all possible input density matrices + for i, rho0 in enumerate(basis_matrices('std', d2)): # rho0 == input density mx + # Only difference between H/S/C/A is how they transform input density matrices + if typ == 'H': + rho1 = -1j * (p @ rho0 - rho0 @ p) # Add "/2" to have PP ham gens match previous versions of pyGSTi + elif typ == 'S': + rho1 = p @ rho0 @ p - rho0 + elif typ == 'C': + rho1 = p @ rho0 @ q + q @ rho0 @ p - 0.5 * (pq_plus_qp @ rho0 + rho0 @ pq_plus_qp) + elif typ == 'A': + rho1 = 1j * (p @ rho0 @ q - q @ rho0 @ p + 0.5 * (pq_minus_qp @ rho0 + rho0 @ pq_minus_qp)) + elem_errgen[:, i] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: if typ == 'H': - rho1 = -1j * (p @ rho0 - rho0 @ p) # Add "/2" to have PP ham gens match previous versions of pyGSTi + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = _np.zeros((d,d), dtype=_np.complex128) + rho1[:, j] = -1j*p[:, i] + rho1[i, :] += 1j*p[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() elif typ == 'S': - pdag_p = (pdag @ p) - rho1 = p @ rho0 @ pdag - 0.5 * (pdag_p @ rho0 + rho0 @ pdag_p) + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + rho1[i,j] += -1 + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() elif typ == 'C': - rho1 = p @ rho0 @ qdag + q @ rho0 @ pdag - 0.5 * (pq_plus_qp @ rho0 + rho0 @ pq_plus_qp) - elif typ == 'A': - rho1 = 1j * (p @ rho0 @ qdag - q @ rho0 @ pdag + 0.5 * (pq_minus_qp @ rho0 + rho0 @ pq_minus_qp)) - elem_errgen[:, i] = rho1.ravel() - # ^ That line used to branch depending on the value of sparse, but both - # branches had the same effect. - - if sparse: - elem_errgen = elem_errgen.tocsr() + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = p[:,i].reshape((d,1))@q[j,:].reshape((1,d)) + q[:,i].reshape((d,1))@p[j,:].reshape((1,d)) + rho1[:, j] += -.5*pq_plus_qp[:, i] + rho1[i, :] += -.5*pq_plus_qp[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + else: + # Loop through the standard basis as all possible input density matrices + for i in range(d): + for j in range(d): + rho1 = 1j*(p[:,i].reshape((d,1))@ q[j,:].reshape((1,d))) - 1j*(q[:,i].reshape((d,1))@p[j,:].reshape((1,d))) + rho1[:, j] += 1j*.5*pq_minus_qp[:, i] + rho1[i, :] += 1j*.5*pq_minus_qp[j, :] + elem_errgen[:, d*i+j] = rho1.flatten()[:, None] if sparse else rho1.flatten() + + if sparse: elem_errgen = elem_errgen.tocsr() return elem_errgen + def create_lindbladian_term_errorgen(typ, Lm, Ln=None, sparse=False): # noqa N803 """ Construct the superoperator for a term in the common Lindbladian expansion of an error generator. @@ -238,3 +504,338 @@ def create_lindbladian_term_errorgen(typ, Lm, Ln=None, sparse=False): # noqa N8 if sparse: lind_errgen = lind_errgen.tocsr() return lind_errgen + + +def random_CPTP_error_generator_rates(num_qubits, errorgen_types=('H', 'S', 'C', 'A'), max_weights=None, + H_params=(0.,.01), SCA_params=(0.,.01), error_metric=None, error_metric_value=None, + relative_HS_contribution=None, fixed_errorgen_rates=None, sslbl_overlap=None, + label_type='global', seed=None, qubit_labels=None): + """ + Function for generating a random set of CPTP error generator rates. + + Parameters + ---------- + num_qubits : int + Number of qubits the error generator acts upon. + + errorgen_types : tuple of str, optional (default('H', 'S', 'C', 'A')) + Tuple of strings designating elementary error generator types to include in this + basis. Note that due to the CP constraint, certain values are not allowed, + and any tuple containing 'C' or 'A' terms must also include 'S'. + + max_weights : dict, optional (default None) + An optional dictionary specifying the maximum weight + for each of the elementary error generator types, with keys + given by the strings 'H', 'S', 'C' and 'A'. If None then + there is no maximum weight. If specified, any error generator + types without entries will have no maximum weight associated + with them. + + H_params : tuple of floats, optional (default (0.,.01)) + Mean and standard deviation parameters for a normal distribution + from which the H rates will be sampled. Note that specifying a non-zero + value for the mean with generator_infidelity set to a non-trivial value + is not supported, and will raise an error. + + SCA_params : tuple of floats, optional (default (0.,.01)) + Mean and standard deviation parameters for a normal distribution + from which the entries of the matrix used in the construction of the S, C and A rates + will be construction is sampled. Note that specifying a non-zero + value for the mean with generator_infidelity set to a non-trivial value + is not supported, and will raise an error. + + error_metric : str, optional (default None) + An optional string, used in conjunction with the error_metric_value + kwarg which specifies which metric to use in setting the sampled + channel's overall error rate. If None, no target value for the channel's + overall error rate is used. Currently supported options include: + + - 'generator_infidelity' + - 'total_generator_error' + + error_metric_value : float, optional (default None) + An float between 0 and 1 which gives the target value of the + error metric specified in 'error_metric' for the channel induced by + the randomly produced error generator. If None + then no target value is used and the returned error generator + will have a random generator infidelity. + + relative_HS_contribution : tuple, optional (default None) + An optional tuple, used in conjunction with the `generator_infidelity` kwarg, + specifying the relative contributions of the H and S error generators to the + generator infidelity. The values in this tuple should sum to 1. The first entry + corresponds to the H sector, and the second the S sector. + + sslbl_overlap : list of sslbls, optional (default None) + A list of state space labels corresponding to qudits the support of + an error generator must overlap with (i.e. the support must include at least + one of these qudits) in order to be included in this basis. + + fixed_errorgen_rates : dict, optional (default None) + An optional dictionary whose keys are `LocalElementaryErrorgenLabel` + objects, and whose values are error generator rates. When specified, the + rates in this dictionary will override any randomly selected values in the + final returned error generator rate dictionary. The inclusion of these + rates is performed independently of any of the kwargs which otherwise + control the weight and allowed types of the error generators in this + model. If specifying fixed C and A rates it is possible for the final + error generator to be non-CP. + + label_type : str, optional (default 'global') + String which can be either 'global' or 'local', indicating whether to + return a dictionary with keys which are `GlobalElementaryErrorgenLabel` + or `LocalElementaryErrorgenLabel` objects respectively. + + seed : int, optional (default None) + An optional integer used in seeding the RNG. + + qubit_labels : list or int or str, optional (default None) + An optional list of qubit labels upon which the error generator should act. + Only utilized when returning global labels. + + Returns + ------- + Dictionary of error generator coefficient labels and rates + """ + + #Add various assertions + if fixed_errorgen_rates is None: + fixed_errorgen_rates = dict() + + if error_metric is not None: + assert H_params[0] == 0. and SCA_params[0] == 0., 'Specifying non-zero HSCA means together with a target error metric is not supported.' + if error_metric not in ('generator_infidelity', 'total_generator_error'): + raise ValueError('Unsupported error metric type. Currently supported options are generator_infidelity and total_generator_error') + #Add a check that the desired error metric value is attainable given the values of fixed_errorgen_rates. + if fixed_errorgen_rates: + #verify that all of the keys are LocalElementaryErrorgenLabel objects. + msg = 'All keys of fixed_errorgen_rates must be LocalElementaryErrorgenLabel.' + assert all([isinstance(key, _LocalElementaryErrorgenLabel) for key in fixed_errorgen_rates.keys()]), msg + + #get the H and S rates from the dictionary. + fixed_H_rates = _np.array([val for key, val in fixed_errorgen_rates.items() if key.errorgen_type == 'H']) + fixed_S_rates = _np.array([val for key, val in fixed_errorgen_rates.items() if key.errorgen_type == 'S']) + fixed_S_contribution = _np.sum(fixed_S_rates) + if error_metric == 'generator_infidelity': + fixed_H_contribution = _np.sum(fixed_H_rates**2) + fixed_error_metric_value = fixed_S_contribution + fixed_H_contribution + elif error_metric == 'total_generator_error': + fixed_H_contribution = _np.sum(_np.abs(fixed_H_rates)) + fixed_error_metric_value = fixed_S_contribution + fixed_H_contribution + msg = f'Incompatible values of error_metric_value and fixed_errorgen_rates. The value of {error_metric}={error_metric_value}'\ + + f' is less than the value of {fixed_error_metric_value} corresponding to the given fixed_errorgen_rates_dict.' + assert fixed_error_metric_value < error_metric_value, msg + + if relative_HS_contribution is not None: + msg_H = f'Fixed H contribution to {error_metric} of {fixed_H_contribution} exceeds overall H contribution target value of {relative_HS_contribution[0]*error_metric_value}.' + msg_S = f'Fixed S contribution to {error_metric} of {fixed_S_contribution} exceeds overall S contribution target value of {relative_HS_contribution[1]*error_metric_value}.' + assert fixed_H_contribution < relative_HS_contribution[0]*error_metric_value, msg_H + assert fixed_S_contribution < relative_HS_contribution[1]*error_metric_value, msg_S + else: + fixed_H_contribution = 0 + fixed_S_contribution = 0 + + if relative_HS_contribution is not None: + assert ('H' in errorgen_types and 'S' in errorgen_types), 'Invalid relative_HS_contribution, one of either H or S is not in errorgen_types.' + if error_metric is None: + _warnings.warn('The relative_HS_contribution kwarg is only utilized when error_metric is not None, the specified value is ignored otherwise.') + else: + assert abs(1-sum(relative_HS_contribution))<=1e-7, 'The relative_HS_contribution should sum to 1.' + + if 'C' in errorgen_types or 'A' in errorgen_types: + assert 'S' in errorgen_types, 'Must include S terms when C and A present. Cannot have a CP error generator otherwise.' + + if max_weights is not None: + assert max_weights.get('C', 0) <= max_weights.get('S', 0) and max_weights.get('A', 0) <= max_weights.get('S', 0), 'The maximum weight of the C and A terms should be less than or equal to the maximum weight of S.' + rng = _np.random.default_rng(seed) + + #create a state space with this dimension. + state_space = _QubitSpace.cast(num_qubits) + + #create an error generator basis according the our weight specs + errorgen_basis = _bo.CompleteElementaryErrorgenBasis('PP', state_space, elementary_errorgen_types=errorgen_types, + max_weights=max_weights, sslbl_overlap=sslbl_overlap, default_label_type='local') + + #Get the labels, broken out by sector, of each of the error generators in this basis. + errgen_labels_H = _sort_errorgen_labels(errorgen_basis.sublabels('H')) + errgen_labels_S = _sort_errorgen_labels(errorgen_basis.sublabels('S')) + errgen_labels_C = _sort_errorgen_labels(errorgen_basis.sublabels('C')) + errgen_labels_A = _sort_errorgen_labels(errorgen_basis.sublabels('A')) + + #filter out any C or A terms which can't be present with CP constraints due to lack of correct S term. + filtered_errgen_labels_C = [] + for lbl in errgen_labels_C: + first_label = _LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],)) + second_label = _LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[1],)) + if first_label not in errgen_labels_S or second_label not in errgen_labels_S: + continue + else: + filtered_errgen_labels_C.append(lbl) + filtered_errgen_labels_A = [] + for lbl in errgen_labels_A: + first_label = _LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],)) + second_label = _LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[1],)) + if first_label not in errgen_labels_S or second_label not in errgen_labels_S: + continue + else: + filtered_errgen_labels_A.append(lbl) + errgen_labels_C = filtered_errgen_labels_C + errgen_labels_A = filtered_errgen_labels_A + + #Get the number of H and S error generators. These are stored in HSCA order in the labels + num_H_rates = len(errgen_labels_H) + num_S_rates = len(errgen_labels_S) + + random_rates_dicts = dict() + #Generate random H rates + random_rates_dicts['H'] = {lbl: val for lbl,val in zip(errgen_labels_H, rng.normal(loc=H_params[0], scale=H_params[1], size = num_H_rates))} + + #Create a random matrix with complex gaussian entries which will be used to generator a PSD matrix for the SCA rates. + random_SC_gen_mat = rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + random_SA_gen_mat = rng.normal(loc=SCA_params[0], scale=SCA_params[1], size=(num_S_rates, num_S_rates)) + random_SC_mat = random_SC_gen_mat @ random_SC_gen_mat.T + random_SA_mat = random_SA_gen_mat @ random_SA_gen_mat.T + random_S_rates = _np.real(_np.diag(random_SC_mat) + _np.diag(random_SA_mat)) + + #The random S rates are just the sum of the diagonals of random SC and SA mats. + random_rates_dicts['S'] = {lbl: val for lbl,val in zip(errgen_labels_S, random_S_rates)} + #The random C rates are the real part of the off diagonal entries, and the A rates the imaginary part. + random_rates_dicts['C'] = {lbl: val for lbl,val in zip(errgen_labels_C, random_SC_mat[_np.triu_indices_from(random_SC_mat, k=1)])} + random_rates_dicts['A'] = {lbl: val for lbl,val in zip(errgen_labels_A, random_SA_mat[_np.triu_indices_from(random_SA_mat, k=1)])} + #manually check conditions on C and A + for lbl, rate in random_rates_dicts['C'].items(): + first_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],))] + second_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[1],))] + + if not (abs(rate) <= _np.sqrt(first_S_rate*second_S_rate)): + print(f'{lbl}: {rate}') + raise ValueError('Invalid C rate') + + #manually check conditions on C and A + for lbl, rate in random_rates_dicts['A'].items(): + first_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[0],))] + second_S_rate = random_rates_dicts['S'][_LocalElementaryErrorgenLabel('S', (lbl.basis_element_labels[1],))] + + if not (abs(rate) <= _np.sqrt(first_S_rate*second_S_rate)): + print(f'{lbl}: {rate}') + raise ValueError('Invalid A rate') + + #Add in/override the fixed rates for each of the sectors. + H_fixed_keys = [] + S_fixed_keys = [] + C_fixed_keys = [] + A_fixed_keys = [] + for key in fixed_errorgen_rates: + if key.errorgen_type == 'H': + H_fixed_keys.append(key) + elif key.errorgen_type == 'S': + S_fixed_keys.append(key) + elif key.errorgen_type == 'C': + C_fixed_keys.append(key) + else: + A_fixed_keys.append(key) + + random_rates_dicts['H'].update({key:fixed_errorgen_rates[key] for key in H_fixed_keys}) + random_rates_dicts['S'].update({key:fixed_errorgen_rates[key] for key in S_fixed_keys}) + random_rates_dicts['C'].update({key:fixed_errorgen_rates[key] for key in C_fixed_keys}) + random_rates_dicts['A'].update({key:fixed_errorgen_rates[key] for key in A_fixed_keys}) + + #For each sector construct a complementary structure of the free(ish) parameters error generator parameters for + #that sector. + H_free_keys = [key for key in errgen_labels_H if key not in fixed_errorgen_rates] #membership checking is (often) faster with dicts + S_free_keys = [key for key in errgen_labels_S if key not in fixed_errorgen_rates] + C_free_keys = [key for key in errgen_labels_C if key not in fixed_errorgen_rates] + A_free_keys = [key for key in errgen_labels_A if key not in fixed_errorgen_rates] + + #Now it is time to apply the various normalizations necessary to get the desired target + #generator infidelity and sector weights. + if error_metric is not None: + #Get the free parameter's For both generator infidelity we use the sum of the S rates + current_S_sum_free = _np.sum([random_rates_dicts['S'][key] for key in S_free_keys]) + if error_metric == 'generator_infidelity': + #for generator infidelity we use the sum of the squared H rates. + current_H_sum_free = _np.sum([random_rates_dicts['H'][key]**2 for key in H_free_keys]) + elif error_metric == 'total_generator_error': + #for total generator error we use the sum of the H rates directly. + current_H_sum_free = _np.sum([abs(random_rates_dicts['H'][key]) for key in H_free_keys]) + + total_H_sum = current_H_sum_free + fixed_H_contribution + total_S_sum = current_S_sum_free + fixed_S_contribution + + if relative_HS_contribution is not None: + #calculate the target values of the H and S contributions to the error metric + #given the specified contributions + req_H_sum = relative_HS_contribution[0]*error_metric_value + req_S_sum = relative_HS_contribution[1]*error_metric_value + + #If we haven't specified a relative contribution for H and S then we will scale these + #to give the correct generator infidelity while preserving whatever relative contribution + #to the generator infidelity they were randomly sampled to have. + else: + #Get the current relative contributions. + current_H_contribution = total_H_sum/(total_H_sum+total_S_sum) + current_S_contribution = 1-current_H_contribution + req_H_sum = current_H_contribution*error_metric_value + req_S_sum = current_S_contribution*error_metric_value + + #this is how much we still need to be contributed by the free parameters + needed_H_free = req_H_sum - fixed_H_contribution + needed_S_free = req_S_sum - fixed_S_contribution + + if error_metric == 'generator_infidelity': + #The scale factor for the H rates is sqrt(req_squared_H_sum/current_squared_H_sum) + H_scale_factor = _np.sqrt(needed_H_free/current_H_sum_free) + elif error_metric == 'total_generator_error': + #The scale factor for the S rates is req_S_sum/current_S_sum + H_scale_factor = needed_H_free/current_H_sum_free + #The scale factor for the S rates is req_S_sum/current_S_sum + S_scale_factor = needed_S_free/current_S_sum_free + + #Rescale the free random rates, note that the free SCA terms will all be scaled by the S_scale_factor + #to preserve PSD. + for key in H_free_keys: + random_rates_dicts['H'][key]*=H_scale_factor + for key in S_free_keys: + random_rates_dicts['S'][key]*=S_scale_factor + for key in C_free_keys: + random_rates_dicts['C'][key]*=S_scale_factor + for key in A_free_keys: + random_rates_dicts['A'][key]*=S_scale_factor + + #Now turn this into a rates dict + errorgen_rates_dict = dict() + for errgen_type in errorgen_types: + errorgen_rates_dict.update(random_rates_dicts[errgen_type]) + + if label_type not in ['global', 'local']: + raise ValueError('Unsupported label type {label_type}.') + + if label_type == 'global': + errorgen_rates_dict = {_GlobalElementaryErrorgenLabel.cast(lbl, sslbls=state_space.state_space_labels): val + for lbl, val in errorgen_rates_dict.items()} + if qubit_labels is not None: + mapper= {i:lbl for i,lbl in enumerate(qubit_labels)} + errorgen_rates_dict = {lbl.map_state_space_labels(mapper):val for lbl,val in errorgen_rates_dict.items()} + return errorgen_rates_dict + +def _sort_errorgen_labels(errgen_labels): + """ + This function sorts error generator coefficients in canonical way. + Helper function for random error generator rate construction. + """ + if not errgen_labels: + return [] + + assert isinstance(errgen_labels[0], _LocalElementaryErrorgenLabel), 'Can only sort local labels at the moment' + + errorgen_types = [lbl.errorgen_type for lbl in errgen_labels] + assert len(set(errorgen_types))==1, 'only one error generator type at a time is supported presently' + + errorgen_type = errorgen_types[0] + if errorgen_type in ('H', 'S'): + sorted_errgen_labels = sorted(errgen_labels, key= lambda lbl:lbl.basis_element_labels[0]) + else: + sorted_errgen_labels = sorted(errgen_labels, key= lambda lbl:(lbl.basis_element_labels[0], lbl.basis_element_labels[1])) + + return sorted_errgen_labels + diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index f7320effa..663773501 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -1689,9 +1689,44 @@ def elementary_errorgens_dual(dim, typ, basis): return elem_errgens -def extract_elementary_errorgen_coefficients(errorgen, elementary_errorgen_labels, elementary_errorgen_basis='pp', +def extract_elementary_errorgen_coefficients(errorgen, elementary_errorgen_labels, elementary_errorgen_basis='PP', errorgen_basis='pp', return_projected_errorgen=False): - """ TODO: docstring """ + """ + Extract a dictionary of elemenary error generator coefficients and rates fromt he specified dense error generator + matrix. + + Parameters + ---------- + errorgen : numpy.ndarray + Error generator matrix + + elementary_errorgen_labels : list of `ElementaryErrorgenLabel`s + A list of `ElementaryErrorgenLabel`s corresponding to the coefficients + to extract from the input error generator. + + elementary_errorgen_basis : str or `Basis`, optional (default 'PP') + Basis used in construction of elementary error generator dual matrices. + + errorgen_basis : str or `Basis`, optional (default 'pp') + Basis of the input matrix specified in `errorgen`. + + return_projected_errorgen : bool, optional (default False) + If True return a new dense error generator matrix which has been + projected onto the subspace of error generators spanned by + `elementary_errorgen_labels`. + + Returns + ------- + projections : dict + Dictionary whose keys are the coefficients specified in `elementary_errorgen_labels` + (cast to `LocalElementaryErrorgenLabel`), and values are corresponding rates. + + projected_errorgen : np.ndarray + Returned if return_projected_errorgen is True, a new dense error generator matrix which has been + projected onto the subspace of error generators spanned by + `elementary_errorgen_labels`. + + """ # the same as decompose_errorgen but given a dict/list of elementary errorgens directly instead of a basis and type if isinstance(errorgen_basis, _Basis): errorgen_std = _bt.change_basis(errorgen, errorgen_basis, errorgen_basis.create_equivalent('std')) @@ -1718,7 +1753,8 @@ def extract_elementary_errorgen_coefficients(errorgen, elementary_errorgen_label bmx0 = elementary_errorgen_basis[bel_lbls[0]] bmx1 = elementary_errorgen_basis[bel_lbls[1]] if (len(bel_lbls) > 1) else None flat_projector = _lt.create_elementary_errorgen_dual(key.errorgen_type, bmx0, bmx1, sparse=False).ravel() - projections[key] = _np.real_if_close(_np.vdot(flat_projector, flat_errorgen_std), tol=1000) + projections[key] = _np.real_if_close(_np.vdot(flat_projector, flat_errorgen_std), tol=1000).item() + if return_projected_errorgen: space_projector[:, i] = flat_projector @@ -1875,55 +1911,216 @@ def _assert_shape(ar, shape, sparse=False): def create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize=False, sparse=False, tensorprod_basis=False): """ - TODO: docstring - labels can be, e.g. ('H', 'XX') and basis should be a 1-qubit basis w/single-char labels + Construct the elementary error generator matrix, either in a dense or sparse representation, + corresponding to the specified type and basis element subscripts. + + Parameters + ---------- + typ : str + String specifying the type of error generator to be constructed. Can be either 'H', 'S', 'C' or 'A'. + + basis_element_labels : list or tuple of str + A list or tuple of strings corresponding to the basis element labels subscripting the desired elementary + error generators. If `typ` is 'H' or 'S' this should be length-1, and for 'C' and 'A' length-2. + + basis_1q : `Basis` + A one-qubit `Basis` object used in the construction of the elementary error generator. + + normalize : bool, optional (default False) + If True the elementary error generator is normalized to have unit Frobenius norm. + + sparse : bool, optional (default False) + If True the elementary error generator is returned as a sparse array. + + tensorprod_basis : bool, optional (default False) + If True, the returned array is given in a basis consisting of the appropriate tensor product of + single-qubit standard bases, as opposed to the N=2^n dimensional standard basis (the values are the same + but this may result in some reordering of entries). + + Returns + ------- + np.ndarray or Scipy CSR matrix """ - return _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, + eglist = _create_elementary_errorgen_nqudit([typ], [basis_element_labels], basis_1q, normalize, sparse, tensorprod_basis, create_dual=False) - + return eglist[0] def create_elementary_errorgen_nqudit_dual(typ, basis_element_labels, basis_1q, normalize=False, sparse=False, tensorprod_basis=False): """ - TODO: docstring - labels can be, e.g. ('H', 'XX') and basis should be a 1-qubit basis w/single-char labels + Construct the dual elementary error generator matrix, either in a dense or sparse representation, + corresponding to the specified type and basis element subscripts. + + Parameters + ---------- + typ : str + String specifying the type of dual error generator to be constructed. Can be either 'H', 'S', 'C' or 'A'. + + basis_element_labels : list or tuple of str + A list or tuple of strings corresponding to the basis element labels subscripting the desired dual elementary + error generators. If `typ` is 'H' or 'S' this should be length-1, and for 'C' and 'A' length-2. + + basis_1q : `Basis` + A one-qubit `Basis` object used in the construction of the dual elementary error generator. + + normalize : bool, optional (default False) + If True the dual elementary error generator is normalized to have unit Frobenius norm. + + sparse : bool, optional (default False) + If True the dual elementary error generator is returned as a sparse array. + + tensorprod_basis : bool, optional (default False) + If True, the returned array is given in a basis consisting of the appropriate tensor product of + single-qubit standard bases, as opposed to the N=2^n dimensional standard basis (the values are the same + but this may result in some reordering of entries). + + Returns + ------- + np.ndarray or Scipy CSR matrix """ - return _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, + eglist = _create_elementary_errorgen_nqudit([typ], [basis_element_labels], basis_1q, normalize, sparse, tensorprod_basis, create_dual=True) + return eglist[0] +def bulk_create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize=False, + sparse=False, tensorprod_basis=False): + """ + Construct the elementary error generator matrices, either in a dense or sparse representation, + corresponding to the specified types and list of basis element subscripts. + + Parameters + ---------- + typ : list of str + List of strings specifying the types of error generator to be constructed. Entries can be 'H', 'S', 'C' or 'A'. + + basis_element_labels : list of lists or tuples of str + A list containing sublists or subtuple of strings corresponding to the basis element labels subscripting the desired elementary + error generators. For each sublist, if the corresponding entry of `typ` is 'H' or 'S' this should be length-1, + and for 'C' and 'A' length-2. + + basis_1q : `Basis` + A one-qubit `Basis` object used in the construction of the elementary error generators. + + normalize : bool, optional (default False) + If True the elementary error generators are normalized to have unit Frobenius norm. + + sparse : bool, optional (default False) + If True the elementary error generators are returned as a sparse array. + + tensorprod_basis : bool, optional (default False) + If True, the returned arrays are given in a basis consisting of the appropriate tensor product of + single-qubit standard bases, as opposed to the N=2^n dimensional standard basis (the values are the same + but this may result in some reordering of entries). + + Returns + ------- + list of np.ndarray or Scipy CSR matrix + """ + + return _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize, + sparse, tensorprod_basis, create_dual=False) + + +def bulk_create_elementary_errorgen_nqudit_dual(typ, basis_element_labels, basis_1q, normalize=False, + sparse=False, tensorprod_basis=False): + """ + Construct the dual elementary error generator matrices, either in a dense or sparse representation, + corresponding to the specified types and list of basis element subscripts. + + Parameters + ---------- + typ : list of str + List of strings specifying the types of dual error generators to be constructed. Entries can be 'H', 'S', 'C' or 'A'. + + basis_element_labels : list of lists or tuples of str + A list containing sublists or subtuple of strings corresponding to the basis element labels subscripting the desired dual elementary + error generators. For each sublist, if the corresponding entry of `typ` is 'H' or 'S' this should be length-1, + and for 'C' and 'A' length-2. + + basis_1q : `Basis` + A one-qubit `Basis` object used in the construction of the dual elementary error generators. + + normalize : bool, optional (default False) + If True the dual elementary error generators are normalized to have unit Frobenius norm. + + sparse : bool, optional (default False) + If True the dual elementary error generators are returned as a sparse array. + + tensorprod_basis : bool, optional (default False) + If True, the returned arrays are given in a basis consisting of the appropriate tensor product of + single-qubit standard bases, as opposed to the N=2^n dimensional standard basis (the values are the same + but this may result in some reordering of entries). + + Returns + ------- + list of np.ndarray or Scipy CSR matrix + """ + + return _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize, + sparse, tensorprod_basis, create_dual=True) def _create_elementary_errorgen_nqudit(typ, basis_element_labels, basis_1q, normalize=False, sparse=False, tensorprod_basis=False, create_dual=False): - create_fn = _lt.create_elementary_errorgen_dual if create_dual else _lt.create_elementary_errorgen - if typ in 'HS': - B = _functools.reduce(_np.kron, [basis_1q[bel] for bel in basis_element_labels[0]]) - ret = create_fn(typ, B, sparse=sparse) # in std basis - elif typ in 'CA': - B = _functools.reduce(_np.kron, [basis_1q[bel] for bel in basis_element_labels[0]]) - C = _functools.reduce(_np.kron, [basis_1q[bel] for bel in basis_element_labels[1]]) - ret = create_fn(typ, B, C, sparse=sparse) # in std basis - else: - raise ValueError("Invalid elementary error generator type: %s" % str(typ)) + #See docstrings for `bulk_create_elementary_errorgen_nqudit` and `bulk_create_elementary_errorgen_nqudit_dual`. - if normalize: - normfn = _spsl.norm if sparse else _np.linalg.norm - norm = normfn(ret) # same as norm(term.flat) - if not _np.isclose(norm, 0): - ret /= norm # normalize projector - assert(_np.isclose(normfn(ret), 1.0)) + #check if we're using the pauli basis + is_pauli = set(basis_1q.name.split('*')) == set(['PP']) + if create_dual: + if is_pauli: + create_fn = _lt.create_elementary_errorgen_dual_pauli + else: + create_fn = _lt.create_elementary_errorgen_dual + else: + if is_pauli: + create_fn = _lt.create_elementary_errorgen_pauli + else: + create_fn = _lt.create_elementary_errorgen + normfn = _spsl.norm if sparse else _np.linalg.norm + if tensorprod_basis: # convert from "flat" std basis to tensorprod of std bases (same elements but in # a different order). Important if want to also construct ops by kroneckering the # returned maps with, e.g., identities - nQubits = int(round(_np.log(ret.shape[0]) / _np.log(4))); assert(ret.shape[0] == 4**nQubits) - current_basis = _Basis.cast('std', ret.shape[0]) - tensorprod_basis = _Basis.cast('std', [(4,) * nQubits]) - ret = _bt.change_basis(ret, current_basis, tensorprod_basis) - - return ret + orig_bases = dict() #keys will be numbers of qubits, values basis objects. + tensorprod_bases = dict() + + eglist = [] + for egtyp, bels in zip(typ, basis_element_labels): + if egtyp in 'HS': + B = _functools.reduce(_np.kron, [basis_1q[bel] for bel in bels[0]]) + ret = create_fn(egtyp, B, sparse=sparse) # in std basis + elif egtyp in 'CA': + B = _functools.reduce(_np.kron, [basis_1q[bel] for bel in bels[0]]) + C = _functools.reduce(_np.kron, [basis_1q[bel] for bel in bels[1]]) + ret = create_fn(egtyp, B, C, sparse=sparse) # in std basis + else: + raise ValueError("Invalid elementary error generator type: %s" % str(typ)) + + if normalize: + norm = normfn(ret) # same as norm(term.flat) + if not _np.isclose(norm, 0): + ret /= norm # normalize projector + assert(_np.isclose(normfn(ret), 1.0)) + + if tensorprod_basis: + num_qudits = int(round(_np.log(ret.shape[0]) / _np.log(basis_1q.dim))); + assert(ret.shape[0] == basis_1q.dim**num_qudits) + current_basis = orig_bases.get(num_qudits, None) + tensorprod_basis = tensorprod_bases.get(num_qudits, None) + if current_basis is None: + current_basis = _Basis.cast('std', basis_1q.dim**num_qudits) + orig_bases[num_qudits] = current_basis + if tensorprod_basis is None: + tensorprod_basis = _Basis.cast('std', [(basis_1q.dim,)*num_qudits]) + tensorprod_bases[num_qudits] = tensorprod_basis + + ret = _bt.change_basis(ret, current_basis, tensorprod_basis) + eglist.append(ret) + + return eglist -#TODO: replace two_qubit_gate, one_qubit_gate, unitary_to_pauligate_* with -# calls to this one and unitary_to_std_processmx def rotation_gate_mx(r, mx_basis="gm"): """ Construct a rotation operation matrix. @@ -2019,16 +2216,6 @@ def project_model(model, target_model, basis = model.basis proj_basis = basis # just use the same basis here (could make an arg later?) - #OLD REMOVE - ##The projection basis needs to be a basis for density matrices - ## (i.e. 2x2 mxs in 1Q case) rather than superoperators (4x4 mxs - ## in 1Q case) - whcih is what model.basis is. So, we just extract - ## a builtin basis name for the projection basis. - #if basis.name in ('pp', 'gm', 'std', 'qt'): - # proj_basis_name = basis.name - #else: - # proj_basis_name = 'pp' # model.basis is weird so just use paulis as projection basis - if basis.name != target_model.basis.name: raise ValueError("Basis mismatch between model (%s) and target (%s)!" % (model.basis.name, target_model.basis.name)) @@ -2069,8 +2256,6 @@ def project_model(model, target_model, otherGens = otherBlk.create_lindblad_term_superoperators(mx_basis=basis) #Note: return values *can* be None if an empty/None basis is given - #lnd_error_gen = _np.einsum('i,ijk', HProj, HGens) + \ - # _np.einsum('ij,ijkl', OProj, OGens) lnd_error_gen = _np.tensordot(HBlk.block_data, HGens, (0, 0)) + \ _np.tensordot(otherBlk.block_data, otherGens, ((0, 1), (0, 1))) @@ -2101,32 +2286,13 @@ def project_model(model, target_model, pos_evals = evals.clip(0, 1e100) # clip negative eigenvalues to 0 OProj_cp = _np.dot(U, _np.dot(_np.diag(pos_evals), _np.linalg.inv(U))) #OProj_cp is now a pos-def matrix - #lnd_error_gen_cp = _np.einsum('i,ijk', HProj, HGens) + \ - # _np.einsum('ij,ijkl', OProj_cp, OGens) lnd_error_gen_cp = _np.tensordot(HBlk.block_data, HGens, (0, 0)) + \ _np.tensordot(OProj_cp, otherGens, ((0, 1), (0, 1))) - #lnd_error_gen_cp = _bt.change_basis(lnd_error_gen_cp, "std", basis) gsDict['LND'].operations[gl] = operation_from_error_generator( lnd_error_gen_cp, targetOp, basis, gen_type) NpDict['LND'] += HBlk.block_data.size + otherBlk.block_data.size - #Removed attempt to contract H+S to CPTP by removing positive stochastic projections, - # but this doesn't always return the gate to being CPTP (maybe b/c of normalization)... - #sto_error_gen_cp = _np.einsum('i,ijk', stoProj.clip(None,0), stoGens) - # # (only negative stochastic projections OK) - #sto_error_gen_cp = _tools.std_to_pp(sto_error_gen_cp) - #gsHSCP.operations[gl] = _tools.operation_from_error_generator( - # ham_error_gen, targetOp, gen_type) #+sto_error_gen_cp - - #DEBUG!!! - #print("DEBUG: BEST sum neg evals = ",_tools.sum_of_negative_choi_eigenvalues(model)) - #print("DEBUG: LNDCP sum neg evals = ",_tools.sum_of_negative_choi_eigenvalues(gsDict['LND'])) - - #Check for CPTP where expected - #assert(_tools.sum_of_negative_choi_eigenvalues(gsHSCP) < 1e-6) - #assert(_tools.sum_of_negative_choi_eigenvalues(gsDict['LND']) < 1e-6) - #Collect and return requrested results: ret_gs = [gsDict[p] for p in projectiontypes] ret_Nps = [NpDict[p] for p in projectiontypes] diff --git a/pyproject.toml b/pyproject.toml index 8eb61fce6..dc760b8c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ authors = [ dependencies=[ 'numpy>=1.15.0', 'scipy', + 'stim', 'plotly', 'pandas', 'networkx' diff --git a/requirements.txt b/requirements.txt index fee654528..5b3b1fe78 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ scipy plotly pandas networkx +stim diff --git a/test/unit/objects/test_errorgenbasis.py b/test/unit/objects/test_errorgenbasis.py new file mode 100644 index 000000000..4d2f1da7f --- /dev/null +++ b/test/unit/objects/test_errorgenbasis.py @@ -0,0 +1,258 @@ +from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis, ExplicitElementaryErrorgenBasis +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel +from pygsti.baseobjs import BuiltinBasis, QubitSpace +from ..util import BaseCase + +class CompleteElementaryErrorgenBasisTester(BaseCase): + + def setUp(self): + self.basis_1q = BuiltinBasis('PP', 4) + self.state_space_1Q = QubitSpace(1) + self.state_space_2Q = QubitSpace(2) + + #create a complete basis with default settings for reuse. + self.complete_errorgen_basis_default_1Q = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q) + + def test_default_construction(self): + assert len(self.complete_errorgen_basis_default_1Q.labels) == 12 + #may as well also test the __len__ method while we're here. + assert len(self.complete_errorgen_basis_default_1Q) == 12 + + def test_sector_restrictions(self): + errorgen_basis_H = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H',)) + errorgen_basis_S = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('S',)) + errorgen_basis_C = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('C',)) + errorgen_basis_A = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('A',)) + + for lbl in errorgen_basis_H.labels: + assert lbl.errorgen_type == 'H' + for lbl in errorgen_basis_S.labels: + assert lbl.errorgen_type == 'S' + for lbl in errorgen_basis_C.labels: + assert lbl.errorgen_type == 'C' + for lbl in errorgen_basis_A.labels: + assert lbl.errorgen_type == 'A' + + assert len(errorgen_basis_H.labels) == 3 + assert len(errorgen_basis_S.labels) == 3 + assert len(errorgen_basis_C.labels) == 3 + assert len(errorgen_basis_A.labels) == 3 + + #confirm multiple sectors work right too. + errorgen_basis_HSC = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H','S','C')) + for lbl in errorgen_basis_HSC.labels: + assert lbl.errorgen_type in ('H', 'S', 'C') + assert len(errorgen_basis_HSC.labels) == 9 + + def test_max_weights(self): + errorgen_basis = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_2Q, + max_weights = {'H':2, 'S':2, 'C':1, 'A':1}) + + for lbl in errorgen_basis.labels: + if lbl.errorgen_type in ('H', 'S'): + assert len(lbl.sslbls) in (1,2) + else: + assert len(lbl.sslbls)==1 + + def test_to_explicit_basis(self): + explicit_errorgen_basis = self.complete_errorgen_basis_default_1Q.to_explicit_basis() + + assert self.complete_errorgen_basis_default_1Q.labels == explicit_errorgen_basis.labels + + def test_global_local_labels(self): + global_labels = self.complete_errorgen_basis_default_1Q.global_labels() + local_labels = self.complete_errorgen_basis_default_1Q.local_labels() + + assert isinstance(global_labels[0], GlobalElementaryErrorgenLabel) + assert isinstance(local_labels[0], LocalElementaryErrorgenLabel) + + def test_sublabels(self): + H_labels = self.complete_errorgen_basis_default_1Q.sublabels('H') + S_labels = self.complete_errorgen_basis_default_1Q.sublabels('S') + C_labels = self.complete_errorgen_basis_default_1Q.sublabels('C') + A_labels = self.complete_errorgen_basis_default_1Q.sublabels('A') + + for lbl in H_labels: + assert lbl.errorgen_type == 'H' + for lbl in S_labels: + assert lbl.errorgen_type == 'S' + for lbl in C_labels: + assert lbl.errorgen_type == 'C' + for lbl in A_labels: + assert lbl.errorgen_type == 'A' + + def test_elemgen_supports(self): + errorgen_basis = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_2Q) + + #there should be 24 weight 1 and 216 weight 2 terms. + elemgen_supports = errorgen_basis.elemgen_supports + num_weight_1 = 0 + num_weight_2 = 0 + for support in elemgen_supports: + if len(support) == 1: + num_weight_1+=1 + elif len(support) == 2: + num_weight_2+=1 + else: + raise ValueError('Invalid support length for two-qubit error gen basis.') + + assert num_weight_1==24 and num_weight_2==216 + + def test_elemgen_and_dual_construction(self): + #just test for running w/o failure. + elemgens = self.complete_errorgen_basis_default_1Q.elemgen_matrices + duals = self.complete_errorgen_basis_default_1Q.elemgen_dual_matrices + + def test_label_index(self): + labels = self.complete_errorgen_basis_default_1Q.labels + + test_eg = GlobalElementaryErrorgenLabel('C', ['X', 'Y'], (0,)) + test_eg_local = LocalElementaryErrorgenLabel('C', ['XI', 'YI']) + test_eg_missing = GlobalElementaryErrorgenLabel('C', ['X', 'Y'], (1,)) + + lbl_idx = self.complete_errorgen_basis_default_1Q.label_index(test_eg) + lbl_idx_1 = self.complete_errorgen_basis_default_1Q.label_index(test_eg_local) + assert lbl_idx == lbl_idx_1 + assert lbl_idx == labels.index(test_eg) + + with self.assertRaises(KeyError): + self.complete_errorgen_basis_default_1Q.label_index(test_eg_missing) + assert self.complete_errorgen_basis_default_1Q.label_index(test_eg_missing, ok_if_missing=True) is None + + def test_create_subbasis(self): + errorgen_basis = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_2Q) + subbasis = errorgen_basis.create_subbasis(sslbl_overlap=(0,)) + + #should have 12 weight-1 terms on zero and 216 weight 2, for 228 total in this subbasis. + assert len(subbasis) == 228 + + def test_union(self): + errorgen_basis_H = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H',)) + errorgen_basis_S = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('S',)) + + union_basis = errorgen_basis_H.union(errorgen_basis_S) + #should now have 6 items. + assert len(union_basis) == 6 + for lbl in union_basis.labels: + assert lbl.errorgen_type in ('H', 'S') + + def test_intersection(self): + errorgen_basis_HSC = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H','S','C')) + errorgen_basis_H = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H',)) + + intersection_basis = errorgen_basis_HSC.intersection(errorgen_basis_H) + #should now have 3 items + assert len(intersection_basis) == 3 + for lbl in intersection_basis.labels: + assert lbl.errorgen_type == 'H' + + def test_difference(self): + errorgen_basis_HSC = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H','S','C')) + errorgen_basis_H = CompleteElementaryErrorgenBasis(self.basis_1q, self.state_space_1Q, elementary_errorgen_types=('H',)) + + intersection_basis = errorgen_basis_HSC.difference(errorgen_basis_H) + #should now have 6 items + assert len(intersection_basis) == 6 + for lbl in intersection_basis.labels: + assert lbl.errorgen_type in ('S', 'C') + +class ExplicitElementaryErrorgenBasisTester(BaseCase): + + def setUp(self): + self.basis_1q = BuiltinBasis('PP', 4) + self.state_space_1Q = QubitSpace(1) + self.state_space_2Q = QubitSpace(2) + + self.labels_1Q = [LocalElementaryErrorgenLabel('H', ['X']), + LocalElementaryErrorgenLabel('S', ['Y']), + LocalElementaryErrorgenLabel('C', ['X','Y']), + LocalElementaryErrorgenLabel('A', ['X','Y'])] + self.labels_2Q = [LocalElementaryErrorgenLabel('H', ['XI']), + LocalElementaryErrorgenLabel('S', ['YY']), + LocalElementaryErrorgenLabel('C', ['XX','YY']), + LocalElementaryErrorgenLabel('A', ['XX','YY'])] + self.labels_2Q_alt = [LocalElementaryErrorgenLabel('H', ['IX']), + LocalElementaryErrorgenLabel('S', ['ZZ']), + LocalElementaryErrorgenLabel('C', ['XX','YY']), + LocalElementaryErrorgenLabel('A', ['XX','YY'])] + + + self.explicit_basis_1Q = ExplicitElementaryErrorgenBasis(self.state_space_1Q, self.labels_1Q, self.basis_1q) + self.explicit_basis_2Q = ExplicitElementaryErrorgenBasis(self.state_space_2Q, self.labels_2Q, self.basis_1q) + self.explicit_basis_2Q_alt = ExplicitElementaryErrorgenBasis(self.state_space_2Q, self.labels_2Q_alt, self.basis_1q) + + + + def test_elemgen_supports(self): + #there should be 1 weight 1 and 3 weight 2 terms. + elemgen_supports = self.explicit_basis_2Q.elemgen_supports + num_weight_1 = 0 + num_weight_2 = 0 + for support in elemgen_supports: + if len(support) == 1: + num_weight_1+=1 + elif len(support) == 2: + num_weight_2+=1 + else: + raise ValueError('Invalid support length for two-qubit error gen basis.') + + assert num_weight_1==1 and num_weight_2==3 + + def test_elemgen_and_dual_construction(self): + #just test for running w/o failure. + elemgens = self.explicit_basis_2Q.elemgen_matrices + duals = self.explicit_basis_2Q.elemgen_dual_matrices + + def test_label_index(self): + labels = self.explicit_basis_1Q.labels + + test_eg = LocalElementaryErrorgenLabel('C', ['X', 'Y']) + test_eg_missing = LocalElementaryErrorgenLabel('C', ['X', 'Z']) + + lbl_idx = self.explicit_basis_1Q.label_index(test_eg) + + assert lbl_idx == labels.index(test_eg) + + with self.assertRaises(KeyError): + self.explicit_basis_1Q.label_index(test_eg_missing) + assert self.explicit_basis_1Q.label_index(test_eg_missing, ok_if_missing=True) is None + + def test_create_subbasis(self): + subbasis = self.explicit_basis_2Q.create_subbasis(sslbl_overlap=(1,)) + + #should have 3 elements remaining in the subbasis. + assert len(subbasis) == 3 + + def test_union(self): + union_basis = self.explicit_basis_2Q.union(self.explicit_basis_2Q_alt) + correct_union_labels = [LocalElementaryErrorgenLabel('H', ['XI']), + LocalElementaryErrorgenLabel('S', ['YY']), + LocalElementaryErrorgenLabel('H', ['IX']), + LocalElementaryErrorgenLabel('S', ['ZZ']), + LocalElementaryErrorgenLabel('C', ['XX','YY']), + LocalElementaryErrorgenLabel('A', ['XX','YY'])] + #should now have 6 items. + assert len(union_basis) == 6 + for lbl in union_basis.labels: + assert lbl in correct_union_labels + + def test_intersection(self): + intersection_basis = self.explicit_basis_2Q.intersection(self.explicit_basis_2Q_alt) + correct_intersection_labels = [LocalElementaryErrorgenLabel('C', ['XX','YY']), + LocalElementaryErrorgenLabel('A', ['XX','YY'])] + #should now have 2 items. + assert len(intersection_basis) == 2 + for lbl in intersection_basis.labels: + assert lbl in correct_intersection_labels + + def test_difference(self): + difference_basis = self.explicit_basis_2Q.difference(self.explicit_basis_2Q_alt) + correct_difference_labels = [LocalElementaryErrorgenLabel('H', ['XI']), + LocalElementaryErrorgenLabel('S', ['YY'])] + #should now have 2 items. + assert len(difference_basis) == 2 + for lbl in difference_basis.labels: + assert lbl in correct_difference_labels + + + diff --git a/test/unit/objects/test_errorgenlabel.py b/test/unit/objects/test_errorgenlabel.py new file mode 100644 index 000000000..898b4d2f8 --- /dev/null +++ b/test/unit/objects/test_errorgenlabel.py @@ -0,0 +1,127 @@ +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as LEEL, GlobalElementaryErrorgenLabel as GEEL +from ..util import BaseCase + +class LocalElementaryErrorgenLabelTester(BaseCase): + + def test_cast(self): + #from local + leel_to_cast = LEEL('H', ['X']) + leel_cast = LEEL.cast(leel_to_cast) + assert leel_cast is leel_to_cast + + #from global + geel_to_cast = GEEL('H', ['X'], (0,)) + leel_cast = LEEL.cast(geel_to_cast, sslbls=(0,1)) + assert leel_cast.basis_element_labels == ('XI',) + + #from string + string_to_cast = 'H(XX)' + leel_cast = LEEL.cast(string_to_cast) + assert leel_cast.errorgen_type == 'H' + assert leel_cast.basis_element_labels == ('XX',) + + #from tuple + #global style tuple + global_tup_to_cast = ('H', ('X',), (1,)) + leel_cast = LEEL.cast(global_tup_to_cast, sslbls=(0,1)) + assert leel_cast.errorgen_type == 'H' + assert leel_cast.basis_element_labels == ('IX',) + + local_tup_to_cast = ('H', 'IX') + leel_cast = LEEL.cast(local_tup_to_cast) + assert leel_cast.errorgen_type == 'H' + assert leel_cast.basis_element_labels == ('IX',) + + #different identity label + geel_to_cast = GEEL('H', ['X'], (0,)) + leel_cast = LEEL.cast(geel_to_cast, sslbls=(0,1), identity_label='F') + assert leel_cast.basis_element_labels == ('XF',) + + def test_eq(self): + assert LEEL('H', ('XX',)) == LEEL('H', ('XX',)) + assert LEEL('H', ('XX',)) != LEEL('S', ('XX',)) + assert LEEL('H', ('XX',)) != LEEL('H', ('XY',)) + + def test_support_indices(self): + assert LEEL('H', ('XX',)).support_indices() == (0,1) + assert LEEL('C', ['IX', 'XI']).support_indices() == (0,1) + assert LEEL('C', ['IXI', 'XII']).support_indices() == (0,1) + #nonstandard identity label + assert LEEL('C', ['FXF', 'XFF']).support_indices(identity_label='F') == (0,1) + +class GlobalElementaryErrorgenLabelTester(BaseCase): + + def test_cast(self): + #from global + geel_to_cast = GEEL('H', ['X'], (0,)) + geel_cast = GEEL.cast(geel_to_cast) + assert geel_cast is geel_to_cast + + #from local + leel_to_cast = LEEL('H', ['XI']) + geel_cast = GEEL.cast(leel_to_cast, sslbls=(0,1)) + assert geel_cast.basis_element_labels == ('X',) + assert geel_cast.sslbls == (0,) + + #from string + string_to_cast = 'H(XX:0,1)' + geel_cast = GEEL.cast(string_to_cast) + assert geel_cast.errorgen_type == 'H' + assert geel_cast.basis_element_labels == ('XX',) + assert geel_cast.sslbls == (0,1) + + string_to_cast = 'SXX:0,1' + geel_cast = GEEL.cast(string_to_cast) + assert geel_cast.errorgen_type == 'S' + assert geel_cast.basis_element_labels == ('XX',) + assert geel_cast.sslbls == (0,1) + + string_to_cast = 'SXX' + geel_cast = GEEL.cast(string_to_cast, sslbls=(0,1)) + assert geel_cast.errorgen_type == 'S' + assert geel_cast.basis_element_labels == ('XX',) + assert geel_cast.sslbls == (0,1) + + #from tuple + #global style tuple + global_tup_to_cast = ('H', ('X',), (1,)) + geel_cast = GEEL.cast(global_tup_to_cast, sslbls=(0,1)) + assert geel_cast.errorgen_type == 'H' + assert geel_cast.basis_element_labels == ('X',) + assert geel_cast.sslbls == (1,) + + local_tup_to_cast = ('H', 'IX') + geel_cast = GEEL.cast(local_tup_to_cast, sslbls=(0,1)) + assert geel_cast.errorgen_type == 'H' + assert geel_cast.basis_element_labels == ('X',) + assert geel_cast.sslbls == (1,) + + def test_eq(self): + assert GEEL('H', ('X',), (0,)) == GEEL('H', ('X',), (0,)) + assert GEEL('H', ('X',), (0,)) != GEEL('H', ('X',), (1,)) + assert GEEL('H', ('X',), (0,)) != GEEL('H', ('Y',), (0,)) + + def test_padded_basis_element_labels(self): + assert GEEL('H', ('X',), (0,)).padded_basis_element_labels(all_sslbls=(0,1,2)) == ('XII',) + assert GEEL('C', ('XX','YY'), (1,2)).padded_basis_element_labels(all_sslbls=(0,1,2)) == ('IXX','IYY') + + def test_map_state_space_labels(self): + geel_to_test = GEEL('C', ['XX', 'YY'], (0,1)) + #dictionary mapper + mapper = {0:'Q0', 1:'Q1'} + mapped_geel = geel_to_test.map_state_space_labels(mapper) + assert mapped_geel.sslbls == ('Q0', 'Q1') + + #function mapper + mapper = lambda x:x+10 + mapped_geel = geel_to_test.map_state_space_labels(mapper) + assert mapped_geel.sslbls == (10, 11) + + def test_sort_sslbls(self): + geel_to_test = GEEL('C', ['XI', 'IX'], (1,0)) + sorted_sslbl_geel = geel_to_test.sort_sslbls() + + assert sorted_sslbl_geel.sslbls == (0,1) + assert sorted_sslbl_geel.basis_element_labels[0] == 'IX' and sorted_sslbl_geel.basis_element_labels[1] == 'XI' + + \ No newline at end of file diff --git a/test/unit/objects/test_errorgenpropagation.py b/test/unit/objects/test_errorgenpropagation.py new file mode 100644 index 000000000..73a889719 --- /dev/null +++ b/test/unit/objects/test_errorgenpropagation.py @@ -0,0 +1,350 @@ +from ..util import BaseCase +from pygsti.circuits import Circuit +from pygsti.algorithms.randomcircuit import create_random_circuit, find_all_sets_of_compatible_two_q_gates +from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator +from pygsti.processors import QubitProcessorSpec +from pygsti.models.modelconstruction import create_crosstalk_free_model, create_cloud_crosstalk_model +from pygsti.baseobjs import Label, BuiltinBasis, QubitSpace, CompleteElementaryErrorgenBasis, QubitGraph +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel +from pygsti.tools import errgenproptools as _eprop +from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE +from pygsti.tools.matrixtools import print_mx +from itertools import product +from math import floor +from pygsti.modelpacks import smq2Q_XYCPHASE +import numpy as np +import stim + + +class ErrorgenPropTester(BaseCase): + + def setUp(self): + num_qubits = 4 + gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] + availability = {'Gcphase':[(0,1), (1,2), (2,3), (3,0)]} + pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) + self.target_model = create_crosstalk_free_model(processor_spec = pspec) + self.circuit = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + self.circuit_length_1 = create_random_circuit(pspec, 1, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + typ = 'H' + max_stochastic = {'S': .0005, 'H': 0, 'H+S': .0001} + max_hamiltonian = {'S': 0, 'H': .00005, 'H+S': .0001} + max_strengths = {1: {'S': max_stochastic[typ], 'H': max_hamiltonian[typ]}, + 2: {'S': 3*max_stochastic[typ], 'H': 3*max_hamiltonian[typ]} + } + error_rates_dict = sample_error_rates_dict(pspec, max_strengths, seed=12345) + self.error_model = create_crosstalk_free_model(pspec, lindblad_error_coeffs=error_rates_dict) + + def test_exact_propagation_probabilities(self): + #This should simultaneously confirm that the propagation code runs + #and also that it is giving the correct values by directly comparing + #to the probabilities from direct forward simulation. + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + probabilities_exact_propagation = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit) + probabilities_forward_simulation = probabilities_fwdsim(self.error_model, self.circuit) + + self.assertTrue(np.linalg.norm(probabilities_exact_propagation - probabilities_forward_simulation, ord=1) < 1e-10) + + def test_approx_propagation_probabilities_BCH(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + probabilities_BCH_order_1 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=1) + probabilities_BCH_order_2 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=2) + probabilities_BCH_order_3 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=3) + probabilities_BCH_order_4 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=4) + probabilities_BCH_order_5 = probabilities_errorgen_prop(error_propagator, self.target_model, self.circuit, use_bch=True, bch_order=5) + probabilities_forward_simulation = probabilities_fwdsim(self.error_model, self.circuit) + + #use a much looser constraint on the agreement between the BCH results and forward simulation. Mostly testing to catch things exploding. + TVD_order_1 = np.linalg.norm(probabilities_BCH_order_1 - probabilities_forward_simulation, ord=1) + TVD_order_2 = np.linalg.norm(probabilities_BCH_order_2 - probabilities_forward_simulation, ord=1) + TVD_order_3 = np.linalg.norm(probabilities_BCH_order_3 - probabilities_forward_simulation, ord=1) + TVD_order_4 = np.linalg.norm(probabilities_BCH_order_4 - probabilities_forward_simulation, ord=1) + TVD_order_5 = np.linalg.norm(probabilities_BCH_order_5 - probabilities_forward_simulation, ord=1) + + #loose bound is just to make sure nothing exploded. + self.assertTrue(TVD_order_1 < 1e-2) + self.assertTrue(TVD_order_2 < 1e-2) + self.assertTrue(TVD_order_3 < 1e-2) + self.assertTrue(TVD_order_4 < 1e-2) + self.assertTrue(TVD_order_5 < 1e-2) + + #also assert that the TVDs get smaller in general as you go up in order. + self.assertTrue((TVD_order_1>TVD_order_2) and (TVD_order_2>TVD_order_3) and (TVD_order_3>TVD_order_4) and (TVD_order_4>TVD_order_5)) + + def test_eoc_error_channel(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + eoc_error_channel = error_propagator.eoc_error_channel(self.circuit) + + #manually compute end-of-circuit error generator + ideal_channel = self.target_model.sim.product(self.circuit) + noisy_channel_exact = self.error_model.sim.product(self.circuit) + eoc_error_channel_exact = noisy_channel_exact@ideal_channel.conj().T + + assert np.linalg.norm(eoc_error_channel - eoc_error_channel_exact) < 1e-10 + + def test_propagation_length_zero_one(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + empty_circuit = Circuit([], line_labels=(0,1,2,3)) + error_propagator.propagate_errorgens(self.circuit_length_1) + error_propagator.propagate_errorgens(empty_circuit, include_spam=True) + error_propagator.propagate_errorgens(empty_circuit, include_spam=False) + + def test_errorgen_transform_map(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + errorgen_input_output_map = error_propagator.errorgen_transform_map(self.circuit, include_spam=True) + + assert errorgen_input_output_map[(_LSE('H', (stim.PauliString("+___X"),)), 1)] == (_LSE('H', (stim.PauliString("+__ZY"),)), 1.0) + assert errorgen_input_output_map[(_LSE('S', (stim.PauliString("+X___"),)), 2)] == (_LSE('S', (stim.PauliString("+Z___"),)), 1.0) + assert errorgen_input_output_map[(_LSE('H', (stim.PauliString("+X___"),)), 3)] == (_LSE('H', (stim.PauliString("+Z___"),)), -1.0) + + def test_errorgen_gate_contributors(self): + error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + test_1 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['XIII']), self.circuit, 1, include_spam=True) + assert test_1 == [Label(('Gypi2', 0))] + + test_2 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IYII']), self.circuit, 2, include_spam=False) + assert test_2 == [Label(('Gypi2', 1))] + + test_3 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IIIX']), self.circuit, 3, include_spam=True) + assert test_3 == [Label(('Gxpi2', 3))] + + test_4 = error_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IIYX']), self.circuit, 4, include_spam=True) + assert test_4 == [Label(('Gcphase', 2, 3))] + + def test_explicit_model(self): + + target_model = smq2Q_XYCPHASE.target_model('full TP') + noisy_model = target_model.copy() + noisy_model = noisy_model.rotate(max_rotate = .01) + noisy_model.set_all_parameterizations('GLND') + errorgen_propagator = ErrorGeneratorPropagator(noisy_model) + circuit_2Q = list(smq2Q_XYCPHASE.create_gst_experiment_design(4).all_circuits_needing_data)[-1] + + #make sure that the various methods don't die. + propagated_errorgens = errorgen_propagator.propagate_errorgens(circuit_2Q) + gate_contributors = errorgen_propagator.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['XI']), circuit_2Q, 1, include_spam=True) + + def test_cloud_crosstalk_model(self): + oq=['Gxpi2','Gypi2','Gzpi2'] + qbts=4 + gate_names=oq+['Gcphase'] + max_strengths = {1: {'S': 10**(-3), 'H': 10**(-2)}, + 2: {'S': (1/6)*10**(-2), 'H': 2*10**(-3)} + } + + #Build circuit models + qubit_labels =range(qbts) + gate_names = ['Gxpi2','Gzpi2','Gcphase','Gypi2'] + ps = QubitProcessorSpec(qbts, gate_names,availability= {'Gcphase':[(i,(i+1)%qbts) for i in range(qbts)]} , qubit_labels=qubit_labels) + lindblad_error_coeffs=sample_error_rates_cloud_crosstalk(max_strengths,4,gate_names) + mdl_cloudnoise = create_cloud_crosstalk_model(ps, lindblad_error_coeffs=lindblad_error_coeffs, errcomp_type="errorgens") + errorgen_prop=ErrorGeneratorPropagator(mdl_cloudnoise) + propagated_errorgens = errorgen_prop.propagate_errorgens(self.circuit) + gate_contributors = errorgen_prop.errorgen_gate_contributors(LocalElementaryErrorgenLabel('H', ['IZZI']), self.circuit, 1, include_spam=True) + +class LocalStimErrorgenLabelTester(BaseCase): + def setUp(self): + self.local_eel = LocalElementaryErrorgenLabel('C', ['XX', 'YY']) + self.global_eel = GlobalElementaryErrorgenLabel('C', ['XX', 'YY'], (0,1)) + self.sslbls = [0,1] + self.tableau = stim.PauliString('XI').to_tableau() + + def test_cast(self): + correct_lse = _LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]) + + self.assertEqual(correct_lse, _LSE.cast(self.local_eel)) + self.assertEqual(correct_lse, _LSE.cast(self.global_eel, self.sslbls)) + + def test_to_local_global_eel(self): + lse = _LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]) + + self.assertEqual(lse.to_local_eel(), self.local_eel) + self.assertEqual(lse.to_global_eel(), self.global_eel) + + def test_propagate_error_gen_tableau(self): + lse = _LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]) + propagated_lse = lse.propagate_error_gen_tableau(self.tableau, 1) + self.assertEqual(propagated_lse, (_LSE('C', [stim.PauliString('XX'), stim.PauliString('YY')]), -1)) + + lse = _LSE('S', [stim.PauliString('ZI')]) + propagated_lse = lse.propagate_error_gen_tableau(self.tableau, 1) + self.assertEqual(propagated_lse, (_LSE('S', [stim.PauliString('ZI')]), 1)) + +#Helper Functions: +def probabilities_errorgen_prop(error_propagator, target_model, circuit, use_bch=False, bch_order=1, truncation_threshold=1e-14): + #get the eoc error channel, and the process matrix for the ideal circuit: + if use_bch: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True, use_bch=use_bch, + bch_kwargs={'bch_order':bch_order, + 'truncation_threshold':truncation_threshold}) + else: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True) + ideal_channel = target_model.sim.product(circuit) + #also get the ideal state prep and povm: + ideal_prep = target_model.circuit_layer_operator(Label('rho0'), typ='prep').copy() + ideal_meas = target_model.circuit_layer_operator(Label('Mdefault'), typ='povm').copy() + #calculate the probabilities. + prob_vec = np.zeros(len(ideal_meas)) + for i, effect in enumerate(ideal_meas.values()): + dense_effect = effect.to_dense().copy() + dense_prep = ideal_prep.to_dense().copy() + prob_vec[i] = np.linalg.multi_dot([dense_effect.reshape((1,len(dense_effect))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]) + return prob_vec + +def probabilities_fwdsim(noise_model, circuit): + prob_dict = noise_model.sim.probs(circuit) + prob_vec = np.fromiter(prob_dict.values(), dtype=np.double) + return prob_vec + +def sample_error_rates_dict(pspec, strengths, seed=None): + """ + For example: + strengths = {1: {'S':0.001, 'H':0.01}, + 2: {'S':0.01,'H':0.1}} + + The 'S' and 'H' entries in the strengths dictionary give + the maximum possible contribution to the infidelity from a given gate. + """ + qubits = pspec.qubit_labels + errors_rates_dict = {} + for gate, availability in pspec.availability.items(): + n = pspec.gate_num_qubits(gate) + if availability == 'all-edges': + assert(n == 1), "Currently require all 2-qubit gates have a specified availability!" + qubits_for_gate = qubits + else: + qubits_for_gate = availability + for qs in qubits_for_gate: + label = Label(gate, qs) + # First, check if there's a strength specified for this specific gate. + max_stength = strengths.get(label, None) # to get highly biased errors can set generic error rates to be low, then set it to be high for one or two particular gates. + # Next, check if there's a strength specified for all gates with this name + if max_stength is None: + max_stength = strengths.get(gate, None) + # Finally, get error rate for all gates on this number of qubits. + if max_stength is None: + max_stength = strengths[n] + # Sample error rates. + errors_rates_dict[label] = sample_error_rates(max_stength, n, seed) + return errors_rates_dict + +def sample_error_rates(strengths, n, seed = None): + ''' + Samples an error rates dictionary for dependent gates. + ''' + error_rates_dict = {} + + #create a basis to get the basis element labels. + basis = BuiltinBasis('pp', 4**n) + + #set the rng + rng = np.random.default_rng(seed) + + # Sample stochastic error rates. First we sample the overall stochastic error rate. + # Then we sample (and normalize) the individual stochastic error rates + stochastic_strength = strengths['S'] * rng.random() + s_error_rates = rng.random(4 ** n - 1) + s_error_rates = s_error_rates / np.sum(s_error_rates) * stochastic_strength + + hamiltonian_strength = strengths['H'] * rng.random() + h_error_rates = rng.random(4 ** n - 1) + h_error_rates = h_error_rates * np.sqrt(hamiltonian_strength) / np.sqrt(np.sum(h_error_rates**2)) + + error_rates_dict.update({('S', basis.labels[i + 1]): s_error_rates[i] for i in range(4 ** n - 1)}) + error_rates_dict.update({('H', basis.labels[i + 1]): h_error_rates[i] for i in range(4 ** n - 1)}) + + return error_rates_dict + +def comm_list_to_matrix(comm_list, errorgen_matrix_dict, num_qubits): + #if the list is empty return all zeros + #initialize empty array for accumulation. + mat = np.zeros((4**num_qubits, 4**num_qubits), dtype=np.complex128) + if not comm_list: + return mat + + #infer the correct label type. + if errorgen_matrix_dict: + first_label = next(iter(errorgen_matrix_dict)) + if isinstance(first_label, LocalElementaryErrorgenLabel): + label_type = 'local' + elif isinstance(first_label, GlobalElementaryErrorgenLabel): + label_type = 'global' + else: + msg = f'Label type {type(first_label)} is not supported as a key for errorgen_matrix_dict.'\ + + 'Please use either LocalElementaryErrorgenLabel or GlobalElementaryErrorgenLabel.' + raise ValueError() + else: + raise ValueError('Non-empty commutatory result list, but the dictionary is empty. Cannot convert.') + + #loop through comm_list and accumulate the weighted error generators prescribed. + if label_type == 'local': + for comm_tup in comm_list: + mat += comm_tup[1]*errorgen_matrix_dict[comm_tup[0].to_local_eel()] + else: + for comm_tup in comm_list: + mat += comm_tup[1]*errorgen_matrix_dict[comm_tup[0].to_global_eel()] + + return mat + +def error_generator_commutator_numerical(errorgen_1, errorgen_2, errorgen_matrix_dict): + return errorgen_matrix_dict[errorgen_1]@errorgen_matrix_dict[errorgen_2] - errorgen_matrix_dict[errorgen_2]@errorgen_matrix_dict[errorgen_1] + + +#--------- Cloud crosstalk helper functions---------------------# +def sample_error_rates_cloud_crosstalk(strengths,qbts, gates): + error_rates_dict = {} + for gate in gates: + if not gate =='Gcphase': + for el in range(qbts): + stochastic_strength = strengths[1]['S']*np.random.random() + hamiltonian_strength = 2*strengths[1]['H']*np.random.random()-strengths[1]['H'] + paulis=['X','Y','Z'] + error_rates_dict[(gate,el)]=dict() + for pauli_label in paulis: + if (gate=='Gxpi2' and pauli_label=='X') or (gate=='Gypi2' and pauli_label=='Y') or (gate=='Gzpi2' and pauli_label=='Z'): + error_rates_dict[(gate,el)].update({('H', pauli_label+':'+str(el)):hamiltonian_strength}) + error_rates_dict[(gate,el)].update({('S', pauli_label+':'+str(el)): stochastic_strength}) + else: + error_rates_dict[(gate,el)].update({('H', pauli_label+':'+str(el)):0.0}) + error_rates_dict[(gate,el)].update({('S', pauli_label+':'+str(el)): 0.0}) + else: + for qbt in range(qbts): + + gate_lbl=('Gcphase',qbt,(qbt+1)%4) + error_rates_dict[gate_lbl]=dict() + for qbt1 in range(qbts): + for qbt2 in range(qbts): + if qbt1 < qbt2: + hamiltonian_strength = 2*strengths[2]['H']*np.random.random()-strengths[2]['H'] + for pauli in two_qbt_pauli_str(): + if pauli =='ZZ': + error_rates_dict[gate_lbl].update({('H',pauli+':'+str(qbt1)+','+str(qbt2)):hamiltonian_strength}) + else: + error_rates_dict[gate_lbl].update({('H',pauli+':'+str(qbt1)+','+str(qbt2)):0.0}) + + for qbt1 in range(qbts): + hamiltonian_strength = 2*strengths[2]['H']*np.random.random()-strengths[2]['H'] + for pauli in ['X','Y','Z']: + if pauli=='Z': + error_rates_dict[gate_lbl].update({('H',pauli+':'+str(qbt1)):hamiltonian_strength}) + else: + error_rates_dict[gate_lbl].update({('H',pauli+':'+str(qbt1)):0.0}) + + + stochastic_strength = strengths[2]['S']*np.random.random() + error_rates_dict[gate_lbl].update({('S', 'ZZ:'+str(gate_lbl[1])+','+str(gate_lbl[2])): stochastic_strength}) + stochastic_strength = strengths[2]['S']*np.random.random() + error_rates_dict[gate_lbl].update({('S', 'Z:'+str(gate_lbl[1])): stochastic_strength}) + stochastic_strength = strengths[2]['S']*np.random.random() + error_rates_dict[gate_lbl].update({('S', 'Z:'+str(gate_lbl[2])): stochastic_strength}) + + return error_rates_dict + +def two_qbt_pauli_str(): + paulis=['I','X','Y','Z'] + pauli_strs=[] + for p1 in paulis: + for p2 in paulis: + pauli_strs.append(p1+p2) + pauli_strs.remove('II') + return pauli_strs diff --git a/test/unit/tools/test_errgenproptools.py b/test/unit/tools/test_errgenproptools.py new file mode 100644 index 000000000..38c33d717 --- /dev/null +++ b/test/unit/tools/test_errgenproptools.py @@ -0,0 +1,625 @@ +import numpy as np +from scipy.linalg import logm +from pygsti.baseobjs import Label, QubitSpace, BuiltinBasis +from pygsti.baseobjs.errorgenbasis import CompleteElementaryErrorgenBasis +from pygsti.algorithms.randomcircuit import create_random_circuit +from pygsti.models.modelconstruction import create_crosstalk_free_model +from pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as LEEL +from pygsti.errorgenpropagation.localstimerrorgen import LocalStimErrorgenLabel as _LSE +from pygsti.tools import errgenproptools as _eprop +from pygsti.tools.matrixtools import print_mx +from pygsti.tools.basistools import change_basis +from ..util import BaseCase +from itertools import product +import random +import stim +from pygsti.processors import QubitProcessorSpec +from pygsti.errorgenpropagation.errorpropagator import ErrorGeneratorPropagator + +#TODO: errorgen_layer_to_matrix, stim_pauli_string_less_than + +class ErrgenCompositionCommutationTester(BaseCase): + + def setUp(self): + num_qubits = 4 + gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] + availability = {'Gcphase':[(0,1), (1,2), (2,3), (3,0)]} + pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) + self.target_model = create_crosstalk_free_model(processor_spec = pspec) + self.circuit = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + max_strengths = {1: {'S': 0, 'H': .0001}, + 2: {'S': 0, 'H': .0001}} + error_rates_dict = sample_error_rates_dict(pspec, max_strengths, seed=12345) + self.error_model = create_crosstalk_free_model(pspec, lindblad_error_coeffs=error_rates_dict) + self.errorgen_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + self.propagated_errorgen_layers = self.errorgen_propagator.propagate_errorgens(self.circuit) + + def test_errorgen_commutators(self): + #confirm we get the correct analytic commutators by comparing to numerics. + + #create an error generator basis. + errorgen_basis = CompleteElementaryErrorgenBasis('PP', QubitSpace(2), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls = errorgen_basis.labels + errorgen_lbl_matrix_dict = {lbl: mat for lbl, mat in zip(errorgen_lbls, errorgen_basis.elemgen_matrices)} + + #loop through all of the pairs of indices. + errorgen_label_pairs = list(product(errorgen_lbls, repeat=2)) + + #also get a version of this list where the labels are local stim ones + local_stim_errorgen_lbls = [_LSE.cast(lbl) for lbl in errorgen_lbls] + stim_errorgen_label_pairs = list(product(local_stim_errorgen_lbls, repeat=2)) + + #for each pair compute the commutator directly and compute it analytically (then converting it to + #a numeric array) and see how they compare. + for pair1, pair2 in zip(errorgen_label_pairs, stim_errorgen_label_pairs): + numeric_commutator = _eprop.error_generator_commutator_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict) + analytic_commutator = _eprop.error_generator_commutator(pair2[0], pair2[1]) + analytic_commutator_mat = _eprop.errorgen_layer_to_matrix(analytic_commutator, 2, errorgen_lbl_matrix_dict) + + norm_diff = np.linalg.norm(numeric_commutator-analytic_commutator_mat) + if norm_diff > 1e-10: + print(f'Difference in commutators for pair {pair1} is greater than 1e-10.') + print(f'{np.linalg.norm(numeric_commutator-analytic_commutator_mat)=}') + print('numeric_commutator=') + print_mx(numeric_commutator) + + #Decompose the numerical commutator into rates. + for lbl, dual in zip(errorgen_lbls, errorgen_basis.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_commutator) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_commutator=}') + print('analytic_commutator_mat=') + print_mx(analytic_commutator_mat) + raise ValueError() + + def test_errorgen_composition(self): + + #create an error generator basis. + complete_errorgen_basis_2Q = CompleteElementaryErrorgenBasis('PP', QubitSpace(2), default_label_type='local') + complete_errorgen_basis_3Q = CompleteElementaryErrorgenBasis('PP', QubitSpace(3), default_label_type='local') + + #use this basis to construct a dictionary from error generator labels to their + #matrices. + errorgen_lbls_2Q = complete_errorgen_basis_2Q.labels + errorgen_lbl_matrix_dict_2Q = {lbl: mat for lbl, mat in zip(errorgen_lbls_2Q, complete_errorgen_basis_2Q.elemgen_matrices)} + + #augment testing with random selection of 3Q labels (some commutation relations for C and A terms require a minimum of 3 qubits). + errorgen_lbls_3Q, errorgen_mats_3Q = select_random_items_from_multiple_lists([complete_errorgen_basis_3Q.labels, complete_errorgen_basis_3Q.elemgen_matrices], 1000, seed= 1234) + errorgen_lbl_matrix_dict_3Q = {lbl: mat for lbl, mat in zip(errorgen_lbls_3Q, errorgen_mats_3Q)} + + complete_errorgen_lbl_matrix_dict_3Q = {lbl: mat for lbl, mat in zip(complete_errorgen_basis_3Q.labels, complete_errorgen_basis_3Q.elemgen_matrices)} + + #loop through all of the pairs of indices. + errorgen_label_pairs_2Q = list(product(errorgen_lbls_2Q, repeat=2)) + errorgen_label_pairs_3Q = list(product(errorgen_lbls_3Q, repeat=2)) + + #also get a version of this list where the labels are local stim ones + local_stim_errorgen_lbls_2Q = [_LSE.cast(lbl) for lbl in errorgen_lbls_2Q] + local_stim_errorgen_lbls_3Q = [_LSE.cast(lbl) for lbl in errorgen_lbls_3Q] + + stim_errorgen_label_pairs_2Q = list(product(local_stim_errorgen_lbls_2Q, repeat=2)) + stim_errorgen_label_pairs_3Q = list(product(local_stim_errorgen_lbls_3Q, repeat=2)) + + #for each pair compute the composition directly and compute it analytically (then converting it to + #a numeric array) and see how they compare. + for pair1, pair2 in zip(errorgen_label_pairs_2Q, stim_errorgen_label_pairs_2Q): + numeric_composition = _eprop.error_generator_composition_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict_2Q) + analytic_composition = _eprop.error_generator_composition(pair2[0], pair2[1]) + try: + analytic_composition_mat = _eprop.errorgen_layer_to_matrix(analytic_composition, 2, errorgen_matrix_dict = errorgen_lbl_matrix_dict_2Q) + except KeyError: + print(f'{analytic_composition=}') + norm_diff = np.linalg.norm(numeric_composition-analytic_composition_mat) + if norm_diff > 1e-10: + print(f'Difference in compositions for pair {pair1} is greater than 1e-10.') + print(f'{np.linalg.norm(numeric_composition-analytic_composition_mat)=}') + print('numeric_composition=') + print_mx(numeric_composition) + + #Decompose the numerical composition into rates. + for lbl, dual in zip(complete_errorgen_basis_2Q.labels, complete_errorgen_basis_2Q.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_composition) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_composition=}') + print('analytic_composition_mat=') + print_mx(analytic_composition_mat) + raise ValueError('Numeric and analytic error generator compositions were not found to be identical!') + + for pair1, pair2 in zip(errorgen_label_pairs_3Q, stim_errorgen_label_pairs_3Q): + numeric_composition = _eprop.error_generator_composition_numerical(pair1[0], pair1[1], errorgen_lbl_matrix_dict_3Q) + analytic_composition = _eprop.error_generator_composition(pair2[0], pair2[1]) + try: + analytic_composition_mat = _eprop.errorgen_layer_to_matrix(analytic_composition, 3, errorgen_matrix_dict = complete_errorgen_lbl_matrix_dict_3Q) + except KeyError: + print(f'{analytic_composition=}') + norm_diff = np.linalg.norm(numeric_composition-analytic_composition_mat) + if norm_diff > 1e-10: + print(f'Difference in compositions for pair {pair1} is greater than 1e-10.') + print(f'{np.linalg.norm(numeric_composition-analytic_composition_mat)=}') + print('numeric_composition=') + print_mx(numeric_composition) + + #Decompose the numerical composition into rates. + for lbl, dual in zip(complete_errorgen_basis_3Q.labels, complete_errorgen_basis_3Q.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_composition) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_composition=}') + print('analytic_composition_mat=') + print_mx(analytic_composition_mat) + raise ValueError('Numeric and analytic error generator compositions were not found to be identical!') + + def test_iterative_error_generator_composition(self): + test_labels = [(_LSE('H', [stim.PauliString('X')]), _LSE('H', [stim.PauliString('X')]), _LSE('H', [stim.PauliString('X')])), + (_LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')])), + (_LSE('S', [stim.PauliString('YY')]), _LSE('H', [stim.PauliString('IX')]), _LSE('H', [stim.PauliString('XI')]))] + rates = [(1,1,1), (1,1,1), (1,1,1)] + + correct_iterative_compositions = [[(_LSE('H', (stim.PauliString("+X"),)), (-4-0j))], + [(_LSE('H', (stim.PauliString("+X_"),)), (-2+0j)), (_LSE('A', (stim.PauliString("+_X"), stim.PauliString("+XX"))), (2+0j))], + [(_LSE('C', (stim.PauliString("+YZ"), stim.PauliString("+ZY"))), (1+0j)), (_LSE('C', (stim.PauliString("+YY"), stim.PauliString("+ZZ"))), (1+0j)), + (_LSE('C', (stim.PauliString("+_X"), stim.PauliString("+X_"))), -1)] + ] + + for lbls, rates, correct_lbls in zip(test_labels, rates, correct_iterative_compositions): + iterated_composition = _eprop.iterative_error_generator_composition(lbls, rates) + self.assertEqual(iterated_composition, correct_lbls) + + _compare_analytic_numeric_iterative_composition(2) + + + def test_bch_approximation(self): + first_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=1) + propagated_errorgen_layers_bch_order_1 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=1) + first_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_1,mx_basis='pp') + assert np.linalg.norm(first_order_bch_analytical-first_order_bch_numerical) < 1e-14 + + propagated_errorgen_layers_bch_order_2 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=2) + second_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=2) + second_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_2, mx_basis='pp') + assert np.linalg.norm(second_order_bch_analytical-second_order_bch_numerical) < 1e-14 + + third_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=3) + propagated_errorgen_layers_bch_order_3 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=3) + third_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_3, mx_basis='pp') + assert np.linalg.norm(third_order_bch_analytical-third_order_bch_numerical) < 1e-14 + + fourth_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=4) + propagated_errorgen_layers_bch_order_4 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=4) + fourth_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_4, mx_basis='pp') + assert np.linalg.norm(fourth_order_bch_analytical-fourth_order_bch_numerical) < 1e-14 + + fifth_order_bch_numerical = _eprop.bch_numerical(self.propagated_errorgen_layers, self.errorgen_propagator, bch_order=5) + propagated_errorgen_layers_bch_order_5 = self.errorgen_propagator.propagate_errorgens_bch(self.circuit, bch_order=5, truncation_threshold=0) + fifth_order_bch_analytical = self.errorgen_propagator.errorgen_layer_dict_to_errorgen(propagated_errorgen_layers_bch_order_5, mx_basis='pp') + assert np.linalg.norm(fifth_order_bch_analytical-fifth_order_bch_numerical) < 1e-14 + + exact_errorgen = logm(self.errorgen_propagator.eoc_error_channel(self.circuit)) + exact_vs_first_order_norm = np.linalg.norm(first_order_bch_analytical-exact_errorgen) + exact_vs_second_order_norm = np.linalg.norm(second_order_bch_analytical-exact_errorgen) + exact_vs_third_order_norm = np.linalg.norm(third_order_bch_analytical-exact_errorgen) + exact_vs_fourth_order_norm = np.linalg.norm(fourth_order_bch_analytical-exact_errorgen) + exact_vs_fifth_order_norm = np.linalg.norm(fifth_order_bch_analytical-exact_errorgen) + + self.assertTrue((exact_vs_first_order_norm > exact_vs_second_order_norm) and (exact_vs_second_order_norm > exact_vs_third_order_norm) + and (exact_vs_third_order_norm > exact_vs_fourth_order_norm) and (exact_vs_fourth_order_norm > exact_vs_fifth_order_norm)) + +class ApproxStabilizerMethodTester(BaseCase): + def setUp(self): + num_qubits = 4 + gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] + availability = {'Gcphase':[(0,1), (1,2), (2,3), (3,0)]} + pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) + self.target_model = create_crosstalk_free_model(processor_spec = pspec) + self.circuit = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + self.circuit_alt = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + max_strengths = {1: {'S': .0005, 'H': .0001}, + 2: {'S': .0005, 'H': .0001}} + error_rates_dict = sample_error_rates_dict(pspec, max_strengths, seed=12345) + self.error_model = create_crosstalk_free_model(pspec, lindblad_error_coeffs=error_rates_dict) + self.error_propagator = ErrorGeneratorPropagator(self.error_model.copy()) + self.propagated_errorgen_layer = self.error_propagator.propagate_errorgens_bch(self.circuit, bch_order=1) + self.circuit_tableau = self.circuit.convert_to_stim_tableau() + self.circuit_tableau_alt = self.circuit_alt.convert_to_stim_tableau() + + #also create a 3-qubit pspec for making some tests faster. + num_qubits = 3 + gate_names = ['Gcphase', 'Gxpi2', 'Gypi2'] + availability = {'Gcphase':[(0,1), (1,2)]} + pspec = QubitProcessorSpec(num_qubits, gate_names, availability=availability) + self.target_model_3Q = create_crosstalk_free_model(processor_spec = pspec) + self.circuit_3Q = create_random_circuit(pspec, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345) + self.circuit_tableau_3Q = self.circuit_3Q.convert_to_stim_tableau() + + + def test_random_support(self): + num_random = _eprop.random_support(self.circuit_tableau) + self.assertEqual(num_random, 3) + + #This unit test for tableau fidelity is straight out of Craig Gidney's stackexchange post. + def test_tableau_fidelity(self): + def _assert_correct_tableau_fidelity(u, v): + expected = abs(np.dot(u, np.conj(v)))**2 + ut = stim.Tableau.from_state_vector(u, endian='little') + vt = stim.Tableau.from_state_vector(v, endian='little') + actual = _eprop.tableau_fidelity(ut, vt) + np.testing.assert_allclose(actual, expected, atol=1e-14, rtol=1e-5) + + s = 0.5**0.5 + _assert_correct_tableau_fidelity([1, 0], [0, 1]) + _assert_correct_tableau_fidelity([1, 0], [1, 0]) + _assert_correct_tableau_fidelity([0, 1], [1, 0]) + _assert_correct_tableau_fidelity([s, s], [s, s]) + _assert_correct_tableau_fidelity([s, s], [s, -s]) + _assert_correct_tableau_fidelity([s, -s], [s, s]) + _assert_correct_tableau_fidelity([s, 1j * s], [s, s]) + _assert_correct_tableau_fidelity([s, s], [s, s]) + _assert_correct_tableau_fidelity([1, 0], [s, s]) + _assert_correct_tableau_fidelity([0, 1], [s, s]) + _assert_correct_tableau_fidelity([1, 0, 0, 0], [0, 0, s, s]) + _assert_correct_tableau_fidelity([0, 0, 1, 0], [0, 0, s, s]) + _assert_correct_tableau_fidelity([0, 0, 1, 0], [0, 0, 1j * s, s]) + for n in range(6): + for _ in range(10): + _assert_correct_tableau_fidelity( + stim.Tableau.random(n).to_state_vector(), + stim.Tableau.random(n).to_state_vector(), + ) + + def test_amplitude_of_state(self): + amp0000 = _eprop.amplitude_of_state(self.circuit_tableau, '0000') + amp1111 = _eprop.amplitude_of_state(self.circuit_tableau, '1111') + self.assertTrue(abs(amp0000)<1e-7) + self.assertTrue(abs(amp1111 -(-1j*np.sqrt(.125)))<1e-7) + + amp0000 = _eprop.amplitude_of_state(self.circuit_tableau_alt, '0000') + amp1111 = _eprop.amplitude_of_state(self.circuit_tableau_alt, '1111') + + self.assertTrue(abs(amp0000)<1e-7) + self.assertTrue(abs(amp1111 - (-1j*np.sqrt(.125)))<1e-7) + + def test_bitstring_to_tableau(self): + tableau = _eprop.bitstring_to_tableau('1010') + self.assertEqual(tableau, stim.PauliString('XIXI').to_tableau()) + + def test_pauli_phase_update(self): + test_paulis = ['YII', 'ZII', stim.PauliString('XYZ'), stim.PauliString('+iIII')] + test_bitstring = '100' + + correct_phase_updates_standard = [-1j, -1, 1j, 1j] + correct_phase_updates_dual = [1j, -1, -1j, 1j] + correct_output_bitstrings = ['000', '100', '010', '100'] + + for i, test_pauli in enumerate(test_paulis): + print(i) + phase_update, output_bitstring = _eprop.pauli_phase_update(test_pauli, test_bitstring) + self.assertEqual(phase_update, correct_phase_updates_standard[i]) + self.assertEqual(output_bitstring, correct_output_bitstrings[i]) + + for i, test_pauli in enumerate(test_paulis): + phase_update, output_bitstring = _eprop.pauli_phase_update(test_pauli, test_bitstring, dual=True) + self.assertEqual(phase_update, correct_phase_updates_dual[i]) + self.assertEqual(output_bitstring, correct_output_bitstrings[i]) + + def test_phi(self): + bit_strings_3Q = list(product(['0','1'], repeat=3)) + for bit_string in bit_strings_3Q: + for pauli_1, pauli_2 in product(stim.PauliString.iter_all(3), stim.PauliString.iter_all(3)): + phi_num = _eprop.phi_numerical(self.circuit_tableau_3Q, bit_string, pauli_1, pauli_2) + phi_analytic = _eprop.phi(self.circuit_tableau_3Q, bit_string, pauli_1, pauli_2) + if abs(phi_num-phi_analytic) > 1e-4: + _eprop.phi(self.circuit_tableau_3Q, bit_string, pauli_1, pauli_2, debug=True) + raise ValueError(f'{pauli_1}, {pauli_2}, {bit_string}, {phi_num=}, {phi_analytic=}') + + def test_alpha(self): + bit_strings_3Q = list(product(['0','1'], repeat=3)) + complete_errorgen_basis_3Q = CompleteElementaryErrorgenBasis('PP', QubitSpace(3), default_label_type='local') + for bit_string in bit_strings_3Q: + for lbl in complete_errorgen_basis_3Q.labels: + alpha_num = _eprop.alpha_numerical(lbl, self.circuit_tableau_3Q, bit_string) + assert abs(alpha_num - _eprop.alpha(lbl, self.circuit_tableau_3Q, bit_string)) <1e-4 + + def test_alpha_pauli(self): + from pygsti.modelpacks import smq2Q_XYCPHASE + pspec_2Q = smq2Q_XYCPHASE.processor_spec() + random_circuits_2Q = [create_random_circuit(pspec_2Q, 4, sampler='edgegrab', samplerargs=[0.4,], rand_state=12345+i) for i in range(5)] + random_circuit_tableaus_2Q = [ckt.convert_to_stim_tableau() for ckt in random_circuits_2Q] + def _compare_alpha_pauli_analytic_numeric(num_qubits, tableau): + #loop through all error generators and all paulis + errorgen_basis = CompleteElementaryErrorgenBasis('PP', QubitSpace(num_qubits), default_label_type='local') + errorgen_labels = [_LSE.cast(lbl) for lbl in errorgen_basis.labels] + pauli_list = list(stim.PauliString.iter_all(num_qubits)) + for lbl in errorgen_labels: + for pauli in pauli_list: + alpha_analytic = _eprop.alpha_pauli(lbl, tableau, pauli) + alpha_numerical = _eprop.alpha_pauli_numerical(lbl, tableau, pauli) + + if abs(alpha_analytic - alpha_numerical)>1e-5: + print(f'{alpha_analytic=}') + print(f'{alpha_numerical=}') + print(f'error generator label: {lbl}') + print(f'pauli: {pauli}') + raise ValueError('Analytic and numerically computed alpha pauli values differ by more than 1e-5') + for ckt_tableau in random_circuit_tableaus_2Q: + _compare_alpha_pauli_analytic_numeric(2, ckt_tableau) + + def test_stabilizer_probability_correction(self): + #The corrections testing here will just be integration testing, we'll + #check for correctness with the probability functions instead. + bitstrings = ['0000', '1000'] + orders = [1,2,3] + for bitstring in bitstrings: + for order in orders: + _eprop.stabilizer_probability_correction(self.propagated_errorgen_layer, self.circuit_tableau, bitstring, order) + + def test_stabilizer_pauli_expectation_correction(self): + #The corrections testing here will just be integration testing, we'll + #check for correctness with the full expecation functions instead. + paulis = [stim.PauliString('XXXX'), stim.PauliString('ZIII')] + orders = [1,2,3] + for pauli in paulis: + for order in orders: + _eprop.stabilizer_pauli_expectation_correction(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order) + + def test_approximate_stabilizer_probability(self): + exact_prop_probs = probabilities_errorgen_prop(self.error_propagator, self.target_model, + self.circuit, use_bch=True, bch_order=1) + first_order_diff = exact_prop_probs[1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '0001') + second_order_diff = exact_prop_probs[1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '0001', order=2) + third_order_diff = exact_prop_probs[1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '0001', order=3) + + assert abs(first_order_diff) > abs(second_order_diff) + assert abs(second_order_diff) > abs(third_order_diff) + + first_order_diff = exact_prop_probs[-1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '1111') + second_order_diff = exact_prop_probs[-1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '1111', order=2) + third_order_diff = exact_prop_probs[-1] - _eprop.approximate_stabilizer_probability(self.propagated_errorgen_layer, self.circuit_tableau, '1111', order=3) + + assert abs(first_order_diff) > abs(second_order_diff) + assert abs(second_order_diff) > abs(third_order_diff) + + def test_approximate_stabilizer_probabilities(self): + exact_prop_probs = probabilities_errorgen_prop(self.error_propagator, self.target_model, + self.circuit, use_bch=True, bch_order=1) + approx_stab_prob_vec_order_1 = _eprop.approximate_stabilizer_probabilities(self.propagated_errorgen_layer, self.circuit_tableau) + approx_stab_prob_vec_order_2 = _eprop.approximate_stabilizer_probabilities(self.propagated_errorgen_layer, self.circuit_tableau, order=2) + + tvd_order_1 = np.linalg.norm(exact_prop_probs-approx_stab_prob_vec_order_1, ord=1) + tvd_order_2 = np.linalg.norm(exact_prop_probs-approx_stab_prob_vec_order_2, ord=1) + + assert tvd_order_1 > tvd_order_2 + + exact_prop_probs = probabilities_errorgen_prop(self.error_propagator, self.target_model, + self.circuit_alt, use_bch=True, bch_order=1) + approx_stab_prob_vec_order_1 = _eprop.approximate_stabilizer_probabilities(self.propagated_errorgen_layer, self.circuit_tableau_alt) + approx_stab_prob_vec_order_2 = _eprop.approximate_stabilizer_probabilities(self.propagated_errorgen_layer, self.circuit_tableau_alt, order=2) + + tvd_order_1 = np.linalg.norm(exact_prop_probs-approx_stab_prob_vec_order_1, ord=1) + tvd_order_2 = np.linalg.norm(exact_prop_probs-approx_stab_prob_vec_order_2, ord=1) + + assert tvd_order_1 > tvd_order_2 + + def test_approximate_stabilizer_pauli_expectation(self): + rng = np.random.default_rng(seed=12345) + paulis_4Q = list(stim.PauliString.iter_all(4)) + random_4Q_pauli_indices = rng.choice(len(paulis_4Q), 5, replace=False) + random_4Q_paulis = [paulis_4Q[idx] for idx in random_4Q_pauli_indices] + + for pauli in random_4Q_paulis: + + + first_order_diff = _eprop.approximate_stabilizer_pauli_expectation_numerical(self.propagated_errorgen_layer, self.error_propagator, self.circuit, pauli, order=1) -\ + _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=1) + second_order_diff = _eprop.approximate_stabilizer_pauli_expectation_numerical(self.propagated_errorgen_layer, self.error_propagator, self.circuit, pauli, order=2) -\ + _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=2) + third_order_diff = _eprop.approximate_stabilizer_pauli_expectation_numerical(self.propagated_errorgen_layer, self.error_propagator, self.circuit, pauli, order=3) -\ + _eprop.approximate_stabilizer_pauli_expectation(self.propagated_errorgen_layer, self.circuit_tableau, pauli, order=3) + + assert abs(first_order_diff) < 1e-6, f'{pauli=}' + assert abs(second_order_diff) < 1e-8, f'{pauli=}' + assert abs(third_order_diff) < 5e-8, f'{pauli=}' + + + def test_error_generator_taylor_expansion(self): + #this is just an integration test atm. + _eprop.error_generator_taylor_expansion(self.propagated_errorgen_layer, order=2) + +class ErrorGenPropUtilsTester(BaseCase): + pass +#helper functions + +def select_random_items_from_multiple_lists(input_lists, num_items, seed=None): + """ + Select a specified number of items at random from multiple lists without replacement. + + Parameters: + input_lists (list of lists): The lists from which to select items. + num_items (int): The number of items to select. + seed (int, optional): The seed for the random number generator. Defaults to None. + + Returns: + list of lists: A list of lists containing the randomly selected items from each input list. + """ + if not input_lists: + raise ValueError("input_lists cannot be empty") + + list_length = len(input_lists[0]) + for lst in input_lists: + if len(lst) != list_length: + raise ValueError("All input lists must have the same length") + + if num_items > list_length: + raise ValueError("num_items cannot be greater than the length of the input lists") + + if seed is not None: + random.seed(seed) + + indices = random.sample(range(list_length), num_items) + + return [[lst[i] for i in indices] for lst in input_lists] + +def sample_error_rates_dict(pspec, strengths, seed=None): + """ + For example: + strengths = {1: {'S':0.001, 'H':0.01}, + 2: {'S':0.01,'H':0.1}} + + The 'S' and 'H' entries in the strengths dictionary give + the maximum possible contribution to the infidelity from a given gate. + """ + qubits = pspec.qubit_labels + errors_rates_dict = {} + for gate, availability in pspec.availability.items(): + n = pspec.gate_num_qubits(gate) + if availability == 'all-edges': + assert(n == 1), "Currently require all 2-qubit gates have a specified availability!" + qubits_for_gate = qubits + else: + qubits_for_gate = availability + for qs in qubits_for_gate: + label = Label(gate, qs) + # First, check if there's a strength specified for this specific gate. + max_stength = strengths.get(label, None) # to get highly biased errors can set generic error rates to be low, then set it to be high for one or two particular gates. + # Next, check if there's a strength specified for all gates with this name + if max_stength is None: + max_stength = strengths.get(gate, None) + # Finally, get error rate for all gates on this number of qubits. + if max_stength is None: + max_stength = strengths[n] + # Sample error rates. + errors_rates_dict[label] = sample_error_rates(max_stength, n, seed) + return errors_rates_dict + +def sample_error_rates(strengths, n, seed = None): + ''' + Samples an error rates dictionary for dependent gates. + ''' + error_rates_dict = {} + + #create a basis to get the basis element labels. + basis = BuiltinBasis('pp', 4**n) + + #set the rng + rng = np.random.default_rng(seed) + + # Sample stochastic error rates. First we sample the overall stochastic error rate. + # Then we sample (and normalize) the individual stochastic error rates + stochastic_strength = strengths['S'] * rng.random() + s_error_rates = rng.random(4 ** n - 1) + s_error_rates = s_error_rates / np.sum(s_error_rates) * stochastic_strength + + hamiltonian_strength = strengths['H'] * rng.random() + h_error_rates = rng.random(4 ** n - 1) + h_error_rates = h_error_rates * np.sqrt(hamiltonian_strength) / np.sqrt(np.sum(h_error_rates**2)) + + error_rates_dict.update({('S', basis.labels[i + 1]): s_error_rates[i] for i in range(4 ** n - 1)}) + error_rates_dict.update({('H', basis.labels[i + 1]): h_error_rates[i] for i in range(4 ** n - 1)}) + + return error_rates_dict + +def probabilities_errorgen_prop(error_propagator, target_model, circuit, use_bch=False, bch_order=1, truncation_threshold=1e-14): + #get the eoc error channel, and the process matrix for the ideal circuit: + if use_bch: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True, use_bch=use_bch, + bch_kwargs={'bch_order':bch_order, + 'truncation_threshold':truncation_threshold}) + else: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True) + ideal_channel = target_model.sim.product(circuit) + #also get the ideal state prep and povm: + ideal_prep = target_model.circuit_layer_operator(Label('rho0'), typ='prep').copy() + ideal_meas = target_model.circuit_layer_operator(Label('Mdefault'), typ='povm').copy() + #calculate the probabilities. + prob_vec = np.zeros(len(ideal_meas)) + for i, effect in enumerate(ideal_meas.values()): + dense_effect = effect.to_dense().copy() + dense_prep = ideal_prep.to_dense().copy() + prob_vec[i] = np.linalg.multi_dot([dense_effect.reshape((1,len(dense_effect))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]) + return prob_vec + +def pauli_expectation_errorgen_prop(error_propagator, target_model, circuit, pauli, use_bch=False, bch_order=1, truncation_threshold=1e-14): + #get the eoc error channel, and the process matrix for the ideal circuit: + if use_bch: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True, use_bch=use_bch, + bch_kwargs={'bch_order':bch_order, + 'truncation_threshold':truncation_threshold}) + else: + eoc_channel = error_propagator.eoc_error_channel(circuit, include_spam=True) + ideal_channel = target_model.sim.product(circuit) + #also get the ideal state prep and povm: + ideal_prep = target_model.circuit_layer_operator(Label('rho0'), typ='prep').copy() + + #finally need the superoperator for the selected pauli. + pauli_unitary = pauli.to_unitary_matrix(endian='big') + #flatten this row-wise + pauli_vec = np.ravel(pauli_unitary) + pauli_vec.reshape((len(pauli_vec),1)) + #put this in pp basis (since these are paulis themselves I could just read this off directly). + pauli_vec = change_basis(pauli_vec, 'std', 'pp') + #print(pauli_vec) + dense_prep = ideal_prep.to_dense().copy() + expectation = np.linalg.multi_dot([pauli_vec.reshape((1,len(pauli_vec))), eoc_channel, ideal_channel, dense_prep.reshape((len(dense_prep),1))]).item() + return expectation + +#helper function for iterative composition testing +def _compare_analytic_numeric_iterative_composition(num_qubits): + #create an error generator basis. + complete_errorgen_basis = CompleteElementaryErrorgenBasis('PP', QubitSpace(num_qubits), default_label_type='local') + complete_errorgen_lbls = complete_errorgen_basis.labels + complete_errorgen_lbl_matrix_dict = {lbl: mat for lbl, mat in zip(complete_errorgen_lbls, complete_errorgen_basis.elemgen_matrices)} + + #loop through all triples. + errorgen_label_triples = list(product(complete_errorgen_lbls,repeat=3)) + + #select a random subset of these + rng = np.random.default_rng(seed=1234) + random_indices = rng.choice(len(errorgen_label_triples), 10000) + random_triples = [errorgen_label_triples[idx] for idx in random_indices] + + #create local stim error gen label versions: + random_triples_stim = [(_LSE.cast(a), _LSE.cast(b), _LSE.cast(c)) for a,b,c in random_triples] + + #for each triple compute the composition directly and compute it analytically (then converting it to + #a numeric array) and see how they compare. + for i, (triple_1, triple_2) in enumerate(zip(random_triples, random_triples_stim)): + numeric_composition = _eprop.iterative_error_generator_composition_numerical(triple_1, (1,1,1), complete_errorgen_lbl_matrix_dict) + analytic_composition = _eprop.iterative_error_generator_composition(triple_2, (1,1,1)) + analytic_composition_dict = dict() + for lbl, rate in analytic_composition: + local_lbl = lbl.to_local_eel() + if analytic_composition_dict.get(local_lbl, None) is None: + analytic_composition_dict[local_lbl] = rate + else: + analytic_composition_dict[local_lbl] += rate + analytic_composition = analytic_composition_dict + try: + analytic_composition_mat = _eprop.errorgen_layer_to_matrix(analytic_composition, num_qubits, errorgen_matrix_dict = complete_errorgen_lbl_matrix_dict) + except KeyError: + print(f'{analytic_composition=}') + norm_diff = np.linalg.norm(numeric_composition-analytic_composition_mat) + if norm_diff > 1e-10: + print(f'Difference in compositions for triple {triple_1} is greater than 1e-10.') + print(f'{triple_2=}') + print(f'Error encountered on iteration {i}') + print(f'{np.linalg.norm(numeric_composition-analytic_composition_mat)=}') + print('numeric_composition=') + print_mx(numeric_composition) + + #Decompose the numerical composition into rates. + for lbl, dual in zip(complete_errorgen_basis.labels, complete_errorgen_basis.elemgen_dual_matrices): + rate = np.trace(dual.conj().T@numeric_composition) + if abs(rate) >1e-3: + print(f'{lbl}: {rate}') + + print(f'{analytic_composition=}') + print('analytic_composition_mat=') + print_mx(analytic_composition_mat) + raise ValueError('Numeric and analytic error generator compositions were not found to be identical!') diff --git a/test/unit/tools/test_lindbladtools.py b/test/unit/tools/test_lindbladtools.py index 1be0230ab..e1999e11b 100644 --- a/test/unit/tools/test_lindbladtools.py +++ b/test/unit/tools/test_lindbladtools.py @@ -2,7 +2,9 @@ import scipy.sparse as sps from pygsti.tools import lindbladtools as lt -from pygsti.baseobjs import Basis +from pygsti.modelmembers.operations import LindbladErrorgen +from pygsti.baseobjs import Basis, QubitSpace +from pygsti.baseobjs.errorgenlabel import GlobalElementaryErrorgenLabel, LocalElementaryErrorgenLabel from ..util import BaseCase @@ -88,3 +90,154 @@ def test_elementary_errorgen_bases(self): dot_mx[i,j] = np.vdot(dual, primal) self.assertTrue(np.allclose(dot_mx, np.identity(len(lbls), 'd'))) + +class RandomErrorgenRatesTester(BaseCase): + + def test_default_settings(self): + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, seed=1234, label_type='local') + + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 240) + + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + def test_sector_restrictions(self): + #H-only: + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H',), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 15) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + #S-only + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('S',), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 15) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + #H+S + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 30) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + #H+S+A + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S','A'), seed=1234) + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 135) + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + def test_error_metric_restrictions(self): + #test generator_infidelity + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + error_metric= 'generator_infidelity', + error_metric_value=.99, seed=1234) + #confirm this has the correct generator infidelity. + gen_infdl = 0 + for coeff, rate in random_errorgen_rates.items(): + if coeff.errorgen_type == 'H': + gen_infdl+=rate**2 + elif coeff.errorgen_type == 'S': + gen_infdl+=rate + + assert abs(gen_infdl-.99)<1e-5 + + #test generator_error + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + error_metric= 'total_generator_error', + error_metric_value=.99, seed=1234) + #confirm this has the correct generator infidelity. + gen_error = 0 + for coeff, rate in random_errorgen_rates.items(): + if coeff.errorgen_type == 'H': + gen_error+=abs(rate) + elif coeff.errorgen_type == 'S': + gen_error+=rate + + assert abs(gen_error-.99)<1e-5 + + #test relative_HS_contribution: + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + error_metric= 'generator_infidelity', + error_metric_value=.99, + relative_HS_contribution=(.5, .5), seed=1234) + #confirm this has the correct generator infidelity contributions. + gen_infdl_H = 0 + gen_infdl_S = 0 + for coeff, rate in random_errorgen_rates.items(): + if coeff.errorgen_type == 'H': + gen_infdl_H+=rate**2 + elif coeff.errorgen_type == 'S': + gen_infdl_S+=rate + + assert abs(gen_infdl_S - gen_infdl_H)<1e-5 + + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + error_metric= 'total_generator_error', + error_metric_value=.99, + relative_HS_contribution=(.5, .5), seed=1234) + #confirm this has the correct generator error contributions. + gen_error_H = 0 + gen_error_S = 0 + for coeff, rate in random_errorgen_rates.items(): + if coeff.errorgen_type == 'H': + gen_error_H+=abs(rate) + elif coeff.errorgen_type == 'S': + gen_error_S+=rate + + assert abs(gen_error_S - gen_error_H)<1e-5 + + def test_fixed_errorgen_rates(self): + fixed_rates_dict = {GlobalElementaryErrorgenLabel('H', ('X',), (0,)): 1} + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + fixed_errorgen_rates=fixed_rates_dict, + seed=1234) + + self.assertEqual(random_errorgen_rates[GlobalElementaryErrorgenLabel('H', ('X',), (0,))], 1) + + def test_label_type(self): + + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + label_type='local', seed=1234) + assert isinstance(next(iter(random_errorgen_rates)), LocalElementaryErrorgenLabel) + + def test_sslbl_overlap(self): + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S'), + sslbl_overlap=(0,), + seed=1234) + for coeff in random_errorgen_rates: + assert 0 in coeff.sslbls + + def test_weight_restrictions(self): + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S','C','A'), + label_type='local', seed=1234, + max_weights={'H':1, 'S':1, 'C':1, 'A':1}) + assert len(random_errorgen_rates) == 24 + #confirm still CPTP + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, errorgen_types=('H','S','C','A'), + label_type='local', seed=1234, + max_weights={'H':2, 'S':2, 'C':1, 'A':1}) + assert len(random_errorgen_rates) == 42 + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) + + def test_global_labels(self): + random_errorgen_rates = lt.random_CPTP_error_generator_rates(num_qubits=2, seed=1234, label_type='global') + + #make sure that we get the expected number of rates: + self.assertEqual(len(random_errorgen_rates), 240) + + #also make sure this is CPTP, do so by constructing an error generator and confirming it doesn't fail + #with CPTP parameterization. This should fail if the error generator dictionary is not CPTP. + errorgen = LindbladErrorgen.from_elementary_errorgens(random_errorgen_rates, parameterization='CPTPLND', truncate=False, state_space=QubitSpace(2)) +