|
1 | | -from sympy import fwht |
| 1 | +from scipy.sparse import csr_matrix |
2 | 2 | from classiq import * |
3 | 3 | import numpy as np |
4 | | -from classiq.open_library.functions.state_preparation import apply_phase_table |
5 | 4 |
|
6 | 5 | ANGLE_THRESHOLD = 1e-13 |
7 | 6 |
|
| 7 | +""" |
| 8 | +Functions for treating banded block encoding |
| 9 | +""" |
8 | 10 |
|
9 | | -def get_graycode(size, i) -> int: |
10 | | - if i == 2**size: |
11 | | - return get_graycode(size, 0) |
12 | | - return i ^ (i >> 1) |
13 | 11 |
|
| 12 | +def nonzero_diagonals(sparse_mat: csr_matrix) -> list[int]: |
| 13 | + """ |
| 14 | + Return a sorted list of diagonal offsets (k) for which the |
| 15 | + diagonal of the sparse matrix is non-zero (i.e., has at least one nonzero element). |
14 | 16 |
|
15 | | -def get_graycode_angles_wh(size, angles): |
16 | | - transformed_angles = fwht(np.array(angles) / 2**size) |
17 | | - return [transformed_angles[get_graycode(size, j)] for j in range(2**size)] |
| 17 | + k = 0 -> main diagonal |
| 18 | + k > 0 -> superdiagonals |
| 19 | + k < 0 -> subdiagonals |
| 20 | + """ |
| 21 | + if not isinstance(sparse_mat, csr_matrix): |
| 22 | + raise TypeError("Input must be a scipy.sparse.csr_matrix") |
18 | 23 |
|
| 24 | + rows, cols = sparse_mat.shape |
| 25 | + diagonals = [] |
19 | 26 |
|
20 | | -def get_graycode_ctrls(size): |
21 | | - return [ |
22 | | - (get_graycode(size, i) ^ get_graycode(size, i + 1)).bit_length() - 1 |
23 | | - for i in range(2**size) |
24 | | - ] |
| 27 | + for k in range(-rows + 1, cols): # all possible diagonals |
| 28 | + diag = sparse_mat.diagonal(k) |
| 29 | + if np.any(diag != 0): |
| 30 | + diagonals.append(k) |
25 | 31 |
|
| 32 | + return diagonals |
26 | 33 |
|
27 | | -@qfunc |
28 | | -def multiplex_ra(a_y: float, a_z: float, angles: list[float], qba: QArray, ind: QBit): |
29 | | - assert a_y**2 + a_z**2 == 1 |
30 | | - # TODO support general (0,a_y,a_z) rotation |
31 | | - assert ( |
32 | | - a_z == 1.0 or a_y == 1.0 |
33 | | - ), "currently only strict y or z rotations are supported" |
34 | | - size = max(1, (len(angles) - 1).bit_length()) |
35 | | - extended_angles = angles + [0] * (2**size - len(angles)) |
36 | | - transformed_angles = get_graycode_angles_wh(size, extended_angles) |
37 | | - controllers = get_graycode_ctrls(size) |
38 | | - |
39 | | - for k in range(2**size): |
40 | | - if np.abs(transformed_angles[k]) > ANGLE_THRESHOLD: |
41 | | - if a_z == 0.0: |
42 | | - RY(transformed_angles[k], ind) |
43 | | - else: |
44 | | - RZ(transformed_angles[k], ind) |
45 | | - |
46 | | - skip_control(lambda: CX(qba[controllers[k]], ind)) |
47 | 34 |
|
| 35 | +def extract_diagonals(csr_mat: csr_matrix, offsets: list[int]) -> list[np.ndarray]: |
| 36 | + """extracts the diagonals of a csr matrix given a list with the offsets - minus sign means lower diagonal""" |
| 37 | + return [csr_mat.diagonal(offset) for offset in offsets] |
48 | 38 |
|
49 | | -@qfunc |
50 | | -def lcu_paulis_graycode(terms: list[SparsePauliTerm], data: QArray, block: QArray): |
51 | | - n_qubits = data.len |
52 | | - n_terms = len(terms) |
53 | | - table_z = np.zeros([n_qubits, n_terms]) |
54 | | - table_y = np.zeros([n_qubits, n_terms]) |
55 | | - probs = [abs(term.coefficient) for term in terms] + [0.0] * (2**block.len - n_terms) |
56 | | - hamiltonian_coeffs = np.angle([term.coefficient for term in terms]).tolist() + [ |
57 | | - 0.0 |
58 | | - ] * (2**block.len - n_terms) |
59 | | - accumulated_phase = np.zeros(2**block.len).tolist() |
60 | | - |
61 | | - for k in range(n_terms): |
62 | | - for pauli in terms[k].paulis: |
63 | | - if pauli.pauli == Pauli.Z: |
64 | | - table_z[pauli.index, k] = -np.pi |
65 | | - accumulated_phase[k] += np.pi / 2 |
66 | | - elif pauli.pauli == Pauli.Y: |
67 | | - table_y[pauli.index, k] = -np.pi |
68 | | - accumulated_phase[k] += np.pi / 2 |
69 | | - elif pauli.pauli == Pauli.X: |
70 | | - table_z[pauli.index, k] = -np.pi |
71 | | - table_y[pauli.index, k] = np.pi |
72 | | - accumulated_phase[k] += np.pi / 2 |
73 | | - |
74 | | - def select_graycode(block: QArray, data: QArray): |
75 | | - for i in range(n_qubits): |
76 | | - multiplex_ra(0, 1, table_z[i, :], block, data[i]) |
77 | | - multiplex_ra(1, 0, table_y[i, :], block, data[i]) |
78 | | - apply_phase_table( |
79 | | - [p1 - p2 for p1, p2 in zip(hamiltonian_coeffs, accumulated_phase)], block |
80 | | - ) |
81 | 39 |
|
82 | | - within_apply( |
83 | | - lambda: inplace_prepare_state(probs, 0.0, block), |
84 | | - lambda: select_graycode(block, data), |
| 40 | +def pad_arrays( |
| 41 | + arrays: list[np.ndarray], offsets: list[int], pad_value: int = 0 |
| 42 | +) -> list[np.ndarray]: |
| 43 | + """ |
| 44 | + Pads all NumPy arrays in a list to match the length of the longest one. |
| 45 | + For negative offsets, padding is added to the beginning of the array. |
| 46 | +
|
| 47 | + Parameters: |
| 48 | + - arrays (list of np.ndarray): List of NumPy arrays with different lengths. |
| 49 | + - offsets (list of int): List of diagonal offsets, same order as arrays. |
| 50 | + - pad_value (int, optional): The value to pad with (default is 0). |
| 51 | +
|
| 52 | + Returns: |
| 53 | + - list of np.ndarray: Padded NumPy arrays. |
| 54 | + """ |
| 55 | + if not arrays: # Handle case where list is empty |
| 56 | + return [] |
| 57 | + |
| 58 | + max_length = max(len(arr) for arr in arrays) # Find max length |
| 59 | + |
| 60 | + padded_arrays = [] |
| 61 | + for arr, offset in zip(arrays, offsets): |
| 62 | + padding_length = max_length - len(arr) |
| 63 | + if offset < 0: |
| 64 | + # Pad at the beginning |
| 65 | + pad_width = (padding_length, 0) |
| 66 | + else: |
| 67 | + # Pad at the end |
| 68 | + pad_width = (0, padding_length) |
| 69 | + |
| 70 | + padded_arr = np.pad(arr, pad_width, constant_values=pad_value) |
| 71 | + padded_arrays.append(padded_arr) |
| 72 | + |
| 73 | + return padded_arrays |
| 74 | + |
| 75 | + |
| 76 | +def get_be_banded_data( |
| 77 | + sparse_mat: csr_matrix, |
| 78 | +) -> tuple[list[int], list[list[float]], list[float], float]: |
| 79 | + offsets = nonzero_diagonals(sparse_mat) |
| 80 | + num_q_diag = int(np.ceil(np.log2(len(offsets)))) |
| 81 | + diags = extract_diagonals(sparse_mat, offsets) |
| 82 | + diags = pad_arrays(diags, offsets, 0) |
| 83 | + diags_maxima = [np.max(np.abs(d)) for d in diags] |
| 84 | + normalized_diags = [(d / d_max).tolist() for d, d_max in zip(diags, diags_maxima)] |
| 85 | + prepare_norm = sum(diags_maxima) |
| 86 | + normalized_diags_maxima = [d_max / prepare_norm for d_max in diags_maxima] + [0] * ( |
| 87 | + 2**num_q_diag - len(offsets) |
85 | 88 | ) |
86 | 89 |
|
| 90 | + return offsets, normalized_diags, normalized_diags_maxima, prepare_norm |
| 91 | + |
87 | 92 |
|
88 | 93 | """ Loading of a single diagonal with a given offset""" |
89 | 94 |
|
|
0 commit comments