|
27 | 27 | see baseline docstring URL). |
28 | 28 | - Total samples: 100 (2-class Iris). Full Iris has 150 (3 classes). |
29 | 29 |
|
30 | | -Only difference from baseline: encoding. Here we use QDP (QuantumDataLoader + amplitude) → StatePrep(encoded); |
| 30 | +Only difference from baseline: encoding. Here we use QDP (QdpEngine.encode + amplitude) → StatePrep(encoded); |
31 | 31 | baseline uses get_angles → state_preparation(angles). Rest: same circuit (Rot + CNOT), loss, optimizer, CLI. |
32 | 32 | """ |
33 | 33 |
|
|
36 | 36 | # --- Imports --- |
37 | 37 |
|
38 | 38 | import argparse |
39 | | -import os |
40 | | -import tempfile |
41 | 39 | import time |
42 | 40 | from typing import Any |
43 | 41 |
|
|
60 | 58 | "scikit-learn is required. Install with: uv sync --group benchmark" |
61 | 59 | ) from e |
62 | 60 |
|
63 | | -from qumat_qdp import QuantumDataLoader |
| 61 | +from qumat_qdp import QdpEngine |
64 | 62 | import torch |
65 | 63 |
|
66 | 64 |
|
@@ -113,38 +111,32 @@ def load_iris_binary_4d(seed: int = 42) -> tuple[np.ndarray, np.ndarray]: |
113 | 111 | return X_norm, Y |
114 | 112 |
|
115 | 113 |
|
116 | | -# --- Encoding: QDP (QuantumDataLoader + amplitude); 4-D → GPU tensor --- |
| 114 | +# --- Encoding: QDP (QdpEngine.encode + amplitude); 4-D → GPU tensor --- |
117 | 115 | def encode_via_qdp( |
118 | 116 | X_norm: np.ndarray, |
119 | | - batch_size: int, |
| 117 | + batch_size: int, # kept for CLI symmetry; not used here |
120 | 118 | device_id: int = 0, |
121 | 119 | data_dir: str | None = None, |
122 | 120 | filename: str = "iris_4d.npy", |
123 | 121 | ) -> torch.Tensor: |
124 | | - """QDP: save 4-D vectors to .npy, run QuantumDataLoader (amplitude), return encoded (n, 4) on GPU.""" |
| 122 | + """QDP: use QdpEngine.encode on 4-D vectors (amplitude), return encoded (n, 4) on GPU. |
| 123 | +
|
| 124 | + Uses in-memory encoding via QdpEngine instead of writing/reading .npy files. The returned |
| 125 | + tensor stays on the selected CUDA device and can be fed directly to qml.StatePrep. |
| 126 | + """ |
125 | 127 | n, dim = X_norm.shape |
126 | 128 | if dim != STATE_DIM: |
127 | 129 | raise ValueError( |
128 | 130 | f"X_norm must have {STATE_DIM} features for 2 qubits, got {dim}" |
129 | 131 | ) |
130 | | - if data_dir is None: |
131 | | - data_dir = tempfile.gettempdir() |
132 | | - os.makedirs(data_dir, exist_ok=True) |
133 | | - path = os.path.join(data_dir, filename) |
134 | | - np.save(path, X_norm.astype(np.float64)) |
135 | | - total_batches = (n + batch_size - 1) // batch_size |
136 | | - loader = ( |
137 | | - QuantumDataLoader(device_id=device_id) |
138 | | - .qubits(NUM_QUBITS) |
139 | | - .encoding("amplitude") |
140 | | - .batches(total_batches, size=batch_size) |
141 | | - .source_file(path) |
| 132 | + engine = QdpEngine(device_id=device_id, precision="float32") |
| 133 | + qt = engine.encode( |
| 134 | + X_norm.astype(np.float64), |
| 135 | + num_qubits=NUM_QUBITS, |
| 136 | + encoding_method="amplitude", |
142 | 137 | ) |
143 | | - batches = [] |
144 | | - for qt in loader: |
145 | | - t = torch.from_dlpack(qt) |
146 | | - batches.append(t) # keep on GPU |
147 | | - return torch.cat(batches, dim=0)[:n].clone() |
| 138 | + encoded = torch.from_dlpack(qt) |
| 139 | + return encoded[:n] |
148 | 140 |
|
149 | 141 |
|
150 | 142 | # --- Training: StatePrep(encoded) + Rot layers, square loss, optional early stop --- |
|
0 commit comments