Description
import os
import pickle
import tarfile
from tqdm import tqdm
import matplotlib
import numpy as np
import pyqpanda as pq
import pyvqnet.nn as nn
from keras.utils import to_categorical
import tensorflow as tf
from pyvqnet.data.data import data_generator
from pyvqnet.nn.linear import Linear
from pyvqnet.nn.loss import CrossEntropyLoss
from pyvqnet.nn.module import Module
from pyvqnet.optim.adam import Adam
from pyvqnet.qnn.measure import expval
from pyvqnet.qnn.quantumlayer import QuantumLayerMultiProcess
from pyvqnet.tensor.tensor import QTensor
import keras
from pyvqnet import DEV_GPU_0
from pyvqnet.tensor import *
from pyvqnet.tensor import tensor
import warnings
warnings.filterwarnings('ignore')
try:
matplotlib.use("TkAgg")
except: # pylint:disable=bare-except
print("Can not use matplot TkAgg")
try:
import urllib.request
except ImportError:
raise ImportError("You should use Python 3.x")
def load_local_cifar10(path):
with tarfile.open(path, 'r:gz') as tar:
tar.extractall()
data_dir = './cifar-10-batches-py'
x_train, y_train, x_test, y_test = [], [], [], []
for batch in range(1, 6):
with open(os.path.join(data_dir, f'data_batch_{batch}'), 'rb') as f:
dict = pickle.load(f, encoding='bytes')
x_train.append(dict[b'data'])
y_train.append(dict[b'labels'])
with open(os.path.join(data_dir, 'test_batch'), 'rb') as f:
dict = pickle.load(f, encoding='bytes')
x_test = dict[b'data']
y_test = dict[b'labels']
x_train = np.concatenate(x_train)
y_train = np.concatenate(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# 将 x_train 转换为 QTensor
# x_train = tensor.to_tensor(x_train)
# # 将 y_train 转换为 QTensor
# y_train = tensor.to_tensor(y_train)
# # 将 x_test 转换为 QTensor
# x_test = tensor.to_tensor(x_test)
# # 将 y_test 转换为 QTensor
# y_test = tensor.to_tensor(y_test)
# x_train = x_train.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
# x_test = x_test.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
x_train = x_train.reshape([-1,3,32,32])
x_test = x_test.reshape([-1,3,32,32])
# return (x_train, y_train), (x_test, y_test)
# 数据预处理
# x_train = x_train.astype('float32') / 255.0
# x_test = x_test.astype('float32') / 255.0
# y_train = to_categorical(y_train, 10)
# y_test = to_categorical(y_test, 10)
# # 确定本地路径
# local_cifar10_path = 'cifar-10-python.tar.gz' # 确保路径正确
# (x_train, y_train), (x_test, y_test) = load_local_cifar10(local_cifar10_path)
return (x_train, y_train), (x_test, y_test)
def RotCircuit(para, qlist):
if isinstance(para, QTensor):
para = QTensor._to_numpy(para)
if para.ndim > 1:
raise ValueError(" dim of paramters in Rot should be 1")
if para.shape[0] != 3:
raise ValueError(" numbers of paramters in Rot should be 3")
cir = pq.QCircuit()
cir.insert(pq.RZ(qlist, para[2]))
cir.insert(pq.RY(qlist, para[1]))
cir.insert(pq.RZ(qlist, para[0]))
return cir
def build_RotCircuit(qubits, weights):
cir = pq.QCircuit()
cir.insert(RotCircuit(weights[0:3], qubits[0]))
cir.insert(RotCircuit(weights[3:6], qubits[1]))
cir.insert(RotCircuit(weights[6:9], qubits[2]))
cir.insert(RotCircuit(weights[9:12], qubits[3]))
cir.insert(RotCircuit(weights[12:15], qubits[4]))
cir.insert(RotCircuit(weights[15:18], qubits[5]))
cir.insert(RotCircuit(weights[18:21], qubits[6]))
cir.insert(RotCircuit(weights[21:24], qubits[7]))
cir.insert(RotCircuit(weights[24:27], qubits[8]))
cir.insert(RotCircuit(weights[27:30], qubits[9]))
cir.insert(RotCircuit(weights[30:33], qubits[10]))
cir.insert(RotCircuit(weights[33:36], qubits[11]))
cir.insert(RotCircuit(weights[36:39], qubits[12]))
cir.insert(RotCircuit(weights[39:42], qubits[13]))
cir.insert(RotCircuit(weights[42:45], qubits[14]))
cir.insert(RotCircuit(weights[45:48], qubits[15]))
return cir
def CRXCircuit(para, control_qlists, rot_qlists):
cir = pq.QCircuit()
cir.insert(pq.RX(rot_qlists, para))
cir.set_control(control_qlists)
return cir
def build_CRotCircuit(qubits, weights):
cir = pq.QCircuit()
cir.insert(CRXCircuit(weights[0], qubits[0], qubits[1]))
cir.insert(CRXCircuit(weights[1], qubits[1], qubits[2]))
cir.insert(CRXCircuit(weights[2], qubits[2], qubits[3]))
cir.insert(CRXCircuit(weights[3], qubits[3], qubits[4]))
cir.insert(CRXCircuit(weights[4], qubits[4], qubits[5]))
cir.insert(CRXCircuit(weights[5], qubits[5], qubits[6]))
cir.insert(CRXCircuit(weights[6], qubits[6], qubits[7]))
cir.insert(CRXCircuit(weights[7], qubits[7], qubits[8]))
cir.insert(CRXCircuit(weights[8], qubits[8], qubits[9]))
cir.insert(CRXCircuit(weights[9], qubits[9], qubits[10]))
cir.insert(CRXCircuit(weights[10], qubits[10], qubits[11]))
cir.insert(CRXCircuit(weights[11], qubits[11], qubits[12]))
cir.insert(CRXCircuit(weights[12], qubits[12], qubits[13]))
cir.insert(CRXCircuit(weights[13], qubits[13], qubits[14]))
cir.insert(CRXCircuit(weights[14], qubits[14], qubits[15]))
cir.insert(CRXCircuit(weights[15], qubits[15], qubits[0]))
return cir
def build_qmlp_circuit(x, weights, qubits, clist, machine):
cir = pq.QCircuit()
num_qubits = len(qubits)
for i in range(num_qubits):
cir.insert(pq.RX(qubits[i], x[i]))
cir.insert(build_RotCircuit(qubits, weights[0:48]))
cir.insert(build_CRotCircuit(qubits, weights[48:64]))
for i in range(num_qubits):
cir.insert(pq.RX(qubits[i], x[i]))
cir.insert(build_RotCircuit(qubits, weights[64:112]))
cir.insert(build_CRotCircuit(qubits, weights[112:128]))
prog = pq.QProg()
prog.insert(cir)
# print(prog)
# exit()
exp_vals = []
for position in range(num_qubits):
pauli_str = {"Z" + str(position): 1.0}
exp2 = expval(machine, prog, pauli_str, qubits)
exp_vals.append(exp2)
return exp_vals
def build_multiprocess_qmlp_circuit(x, weights, num_qubits, num_clist):
machine = pq.CPUQVM()
machine.init_qvm()
qubits = machine.qAlloc_many(num_qubits)
cir = pq.QCircuit()
for i in range(num_qubits):
cir.insert(pq.RX(qubits[i], x[i]))
cir.insert(build_RotCircuit(qubits, weights[0:48]))
cir.insert(build_CRotCircuit(qubits, weights[48:64]))
for i in range(num_qubits):
cir.insert(pq.RX(qubits[i], x[i]))
cir.insert(build_RotCircuit(qubits, weights[64:112]))
cir.insert(build_CRotCircuit(qubits, weights[112:128]))
prog = pq.QProg()
prog.insert(cir)
# print(prog)
# exit()
exp_vals = []
for position in range(num_qubits):
pauli_str = {"Z" + str(position): 1.0}
exp2 = expval(machine, prog, pauli_str, qubits)
exp_vals.append(exp2)
return exp_vals
class QMLPModel(Module):
def init(self):
super(QMLPModel, self).init()
self.ave_pool2d = AvgPool2D([7, 7], [7, 7], "valid")
# self.quantum_circuit = QuantumLayer(build_qmlp_circuit, 128, "CPU", 16, diff_method="finite_diff")
self.quantum_circuit = QuantumLayerMultiProcess(build_multiprocess_qmlp_circuit, 128,
16, 1, diff_method="finite_diff")
self.linear = Linear(16, 10)
def forward(self, x):
bsz = x.shape[0]
x = self.ave_pool2d(x)
input_data = x.reshape([bsz, 16])
quanutum_result = self.quantum_circuit(input_data)
result = self.linear(quanutum_result)
return result
class QMLPModel(Module):
def init(self):
super(QMLPModel, self).init()
# 添加一个简单的CNN来降维并抽取特征
self.ave_pool2d = nn.AvgPool2D([7, 7], [7, 7], "valid")
self.cnn_layers = nn.Sequential(
# 假设输入为CIFAR图像:[batch_size, 3, 32, 32]
nn.Conv2D(3, 16, kernel_size=(3,3)),
nn.ReLu(),
nn.MaxPool2D([2,2],[2,2]),
# 现在尺寸是:[batch_size, 16, 16, 16]
nn.Conv2D(16, 8, kernel_size=(3,3)),
nn.ReLu(),
nn.MaxPool2D([2,2],[2,2]),
# 最终尺寸是:[batch_size, 8, 8, 8]
)
# 确保修改量子电路或其他模型结构以适应新的输入尺寸
self.quantum_circuit = QuantumLayerMultiProcess(build_multiprocess_qmlp_circuit, 128, 16, 1, diff_method="finite_diff")
# 经过量子层后,再连接到最终的分类层
self.linear = Linear(16, 10)
def forward(self, x):
bsz = x.shape[0]
x = self.ave_pool2d(x)
# 此处你可能需要根据量子电路的输入要求进一步处理x
input_data = x.reshape([bsz, 16]) # 确保这一步匹配量子电路的期待输入格式
quantum_result = self.quantum_circuit(input_data)
result = self.linear(quantum_result)
return result
class QMLPModel(nn.Module):
def init(self):
super(QMLPModel, self).init()
# 假设输入是经过适当预处理的CIFAR图像:[batch_size, channels, height, width]
self.ave_pool2d = nn.AvgPool2D([3, 3], [1, 1], "valid")
self.cnn_layers = nn.Sequential(
nn.Conv2D(3, 16, kernel_size=(3, 3), stride=(1, 1), padding='same'),
nn.ReLu(),
nn.AvgPool2D(kernel=[2, 2], stride=[2, 2]),
# 由于使用'same' padding和stride=2,现在尺寸是:[batch_size, 16, 16, 16]
nn.Conv2D(16, 8, kernel_size=(3, 3), stride=(1, 1), padding='same'),
nn.ReLu(),
nn.AvgPool2D(kernel=[2, 2], stride=[2, 2]),
# 再次使用'same' padding和stride=2,现在尺寸是:[batch_size, 8, 8, 8]
)
# 需调整QuantumLayerMultiProcess参数来适配CNN层的输出
self.quantum_circuit = QuantumLayerMultiProcess(build_multiprocess_qmlp_circuit, 128, 16, 1, diff_method="finite_diff")
# 假定我们展平了CNN层的输出,并适配到了量子层
self.linear = nn.Linear(16, 10) # 输出10个类别
def forward(self, x):
# x = self.cnn_layers(x)
# # 展平特征以适配量子层
# x = x.view(x.size(0), -1)
# quanutum_result = self.quantum_circuit(x)
# result = self.linear(quanutum_result)
# return result
bsz = x.shape[0]
x = self.ave_pool2d(x)
input_data = x.reshape([bsz, 2700])
quanutum_result = self.quantum_circuit(input_data)
result = self.linear(quanutum_result)
return result
class QMLPModel(Module):
def init(self):
super(QMLPModel, self).init()
self.ave_pool2d = nn.AvgPool2D([7, 7], [7, 7], "valid")
# self.quantum_circuit = QuantumLayer(build_qmlp_circuit, 128, "CPU", 16, diff_method="finite_diff")
self.quantum_circuit = QuantumLayerMultiProcess(build_multiprocess_qmlp_circuit, 128,
16, 1, diff_method="finite_diff")
self.linear = Linear(16, 10)
def forward(self, x):
bsz = x.shape[0]
x = self.ave_pool2d(x)
input_data = x.reshape([bsz, 16*3])
quanutum_result = self.quantum_circuit(input_data)
result = self.linear(quanutum_result)
return result
def vqnet_test_QMLPModel():
# 设置训练集和验证集大小
# train_size = 5000
# eval_size = 1000
# 使用提供的函数加载CIFAR数据
local_cifar10_path = '/data3/gaolanmei_2024/AutoPET/AutoPET/data/cifar-10-python.tar.gz' # 确保路径正确
(x_train, y_train), (x_test, y_test) = load_local_cifar10(local_cifar10_path)
# 截取一部分数据用于快速测试
# x_train = x_train[:train_size]
# y_train = y_train[:train_size]
# x_test = x_test[:eval_size]
# y_test = y_test[:eval_size]
print(f"x_train length: {len(x_train)}")
print(f"y_train length: {len(x_test)}")
# print(f"Batch size: {batch_size}")
model = QMLPModel() # 确保QMLPModel可以处理CIFAR数据的输入维度
# model.toGPU(DEV_GPU_0)
optimizer = Adam(model.parameters(), lr=0.001)
loss_func = CrossEntropyLoss()
loss_list = []
epochs = 30
model = model.to_gpu(DEV_GPU_0)
batch_size = 16
# model.toGPU(DEV_GPU_0)
for epoch in range(1, epochs):
total_loss = []
correct = 0
n_train = 0
# 使用 data_generator 按批次生成数据
data_iter = data_generator(x_train, y_train, batch_size=batch_size, shuffle=True)
for x, y in tqdm(data_iter):
x = QTensor(x)
y = QTensor(y)
x = x.to_gpu(DEV_GPU_0)
y = y.to_gpu(DEV_GPU_0)
# x.toGPU(DEV_GPU_0)
# y.toGPU(DEV_GPU_0)
optimizer.zero_grad()
output = model(x)
loss = loss_func(y, output)
loss_np = loss.to_numpy()
np_output = output.to_numpy()
y = y.to_numpy()
mask = (np_output.argmax(1) == y.argmax(1))
correct += np.sum(np.array(mask))
n_train += batch_size
loss.backward()
optimizer._step()
total_loss.append(loss_np)
train_loss_list.append(np.sum(total_loss) / len(total_loss))
train_acc_list.append(np.sum(correct) / n_train)
print("train epoch {:.0f} loss is : {:.10f}".format(epoch, train_loss_list[-1]))
print("##########################")
print(f"Train Accuracy: {correct / n_train}")
loss_list.append(np.sum(total_loss) / len(total_loss))
print("epoch: ", epoch)
print("{:.0f} loss is : {:.10f}".format(epoch, loss_list[-1]))
if name == "main":
vqnet_test_QMLPModel()
我的代码是上面这个,但是报了tensor存在不同的device
Traceback (most recent call last):
File "/data3/gaolanmei_2024/AutoPET/AutoPET/data/main.py", line 386, in
vqnet_test_QMLPModel()
File "/data3/gaolanmei_2024/AutoPET/AutoPET/data/main.py", line 364, in vqnet_test_QMLPModel
output = model(x)
File "pyvqnet/nn/module.py", line 613, in pyvqnet.nn.module.Module.call
File "/data3/gaolanmei_2024/AutoPET/AutoPET/data/main.py", line 313, in forward
result = self.linear(quanutum_result)
File "pyvqnet/nn/module.py", line 613, in pyvqnet.nn.module.Module.call
File "pyvqnet/nn/linear.py", line 106, in pyvqnet.nn.linear.Linear.forward
RuntimeError: VQNet runtimeError: Tensors in different devices ( FILE: /root/yxy/vqnet2.0.8/package/1016-linux/py39/vqnet/src/tensor/tensor_utils.cpp. LINE: 3153. FUNC: mult2D_templates