Skip to content

Commit a0449b5

Browse files
authored
[QDP] feat: improve iris example (#1095)
1 parent 88eb7d1 commit a0449b5

File tree

1 file changed

+16
-24
lines changed

1 file changed

+16
-24
lines changed

qdp/qdp-python/benchmark/encoding_benchmarks/qdp_pipeline/iris_amplitude.py

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
see baseline docstring URL).
2828
- Total samples: 100 (2-class Iris). Full Iris has 150 (3 classes).
2929
30-
Only difference from baseline: encoding. Here we use QDP (QuantumDataLoader + amplitude) → StatePrep(encoded);
30+
Only difference from baseline: encoding. Here we use QDP (QdpEngine.encode + amplitude) → StatePrep(encoded);
3131
baseline uses get_angles → state_preparation(angles). Rest: same circuit (Rot + CNOT), loss, optimizer, CLI.
3232
"""
3333

@@ -36,8 +36,6 @@
3636
# --- Imports ---
3737

3838
import argparse
39-
import os
40-
import tempfile
4139
import time
4240
from typing import Any
4341

@@ -60,7 +58,7 @@
6058
"scikit-learn is required. Install with: uv sync --group benchmark"
6159
) from e
6260

63-
from qumat_qdp import QuantumDataLoader
61+
from qumat_qdp import QdpEngine
6462
import torch
6563

6664

@@ -113,38 +111,32 @@ def load_iris_binary_4d(seed: int = 42) -> tuple[np.ndarray, np.ndarray]:
113111
return X_norm, Y
114112

115113

116-
# --- Encoding: QDP (QuantumDataLoader + amplitude); 4-D → GPU tensor ---
114+
# --- Encoding: QDP (QdpEngine.encode + amplitude); 4-D → GPU tensor ---
117115
def encode_via_qdp(
118116
X_norm: np.ndarray,
119-
batch_size: int,
117+
batch_size: int, # kept for CLI symmetry; not used here
120118
device_id: int = 0,
121119
data_dir: str | None = None,
122120
filename: str = "iris_4d.npy",
123121
) -> torch.Tensor:
124-
"""QDP: save 4-D vectors to .npy, run QuantumDataLoader (amplitude), return encoded (n, 4) on GPU."""
122+
"""QDP: use QdpEngine.encode on 4-D vectors (amplitude), return encoded (n, 4) on GPU.
123+
124+
Uses in-memory encoding via QdpEngine instead of writing/reading .npy files. The returned
125+
tensor stays on the selected CUDA device and can be fed directly to qml.StatePrep.
126+
"""
125127
n, dim = X_norm.shape
126128
if dim != STATE_DIM:
127129
raise ValueError(
128130
f"X_norm must have {STATE_DIM} features for 2 qubits, got {dim}"
129131
)
130-
if data_dir is None:
131-
data_dir = tempfile.gettempdir()
132-
os.makedirs(data_dir, exist_ok=True)
133-
path = os.path.join(data_dir, filename)
134-
np.save(path, X_norm.astype(np.float64))
135-
total_batches = (n + batch_size - 1) // batch_size
136-
loader = (
137-
QuantumDataLoader(device_id=device_id)
138-
.qubits(NUM_QUBITS)
139-
.encoding("amplitude")
140-
.batches(total_batches, size=batch_size)
141-
.source_file(path)
132+
engine = QdpEngine(device_id=device_id, precision="float32")
133+
qt = engine.encode(
134+
X_norm.astype(np.float64),
135+
num_qubits=NUM_QUBITS,
136+
encoding_method="amplitude",
142137
)
143-
batches = []
144-
for qt in loader:
145-
t = torch.from_dlpack(qt)
146-
batches.append(t) # keep on GPU
147-
return torch.cat(batches, dim=0)[:n].clone()
138+
encoded = torch.from_dlpack(qt)
139+
return encoded[:n]
148140

149141

150142
# --- Training: StatePrep(encoded) + Rot layers, square loss, optional early stop ---

0 commit comments

Comments
 (0)