Skip to content

Commit e14de61

Browse files
author
Joey Tsai
committed
Add GA mobilevit1
- Reset dynamic_shape to empty when all dims equal to zero for reshape validation - Remove dynamic_shape of add AddTensorParam Fix based on comments
1 parent 95a1db5 commit e14de61

File tree

4 files changed

+211
-3
lines changed

4 files changed

+211
-3
lines changed

backends/qualcomm/aot/wrappers/OpWrapper.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,14 +68,13 @@ class OpWrapper final {
6868
std::unique_ptr<QuantizeParamsWrapper> quantize_param_wrapper =
6969
std::make_unique<UndefinedQuantizeParamsWrapper>();
7070
constexpr std::uint32_t kBytes = 0;
71-
std::vector<uint8_t> dynamic_dims(rank, 0);
7271
std::shared_ptr<TensorWrapper> tensor_wrapper = CreateTensorWrapper(
7372
QNN_TENSOR_TYPE_STATIC,
7473
data_type,
7574
std::move(quantize_param_wrapper),
7675
rank,
7776
dims,
78-
dynamic_dims.data(),
77+
nullptr,
7978
kBytes,
8079
data,
8180
copy_data);

backends/qualcomm/builders/node_visitor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ def get_dynamic_dimension(self, dims):
348348
nominal_dims.append(dim)
349349
dynamic_dims.append(0)
350350

351-
return dynamic_dims, nominal_dims
351+
return dynamic_dims if any(dynamic_dims) else [], nominal_dims
352352

353353
def define_custom_tensor_wrapper(
354354
self,

backends/qualcomm/tests/test_qnn_delegate.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4180,6 +4180,42 @@ def test_pvt(self):
41804180
self.assertGreaterEqual(msg["top_1"], 65)
41814181
self.assertGreaterEqual(msg["top_5"], 85)
41824182

4183+
def test_mobilevit1(self):
4184+
if not self.required_envs([self.image_dataset]):
4185+
self.skipTest("missing required envs")
4186+
4187+
cmds = [
4188+
"python",
4189+
f"{self.executorch_root}/examples/qualcomm/oss_scripts/mobilevit1.py"
4190+
"--dataset",
4191+
self.image_dataset,
4192+
"--artifact",
4193+
self.artifact_dir,
4194+
"--build_folder",
4195+
self.build_folder,
4196+
"--device",
4197+
self.device,
4198+
"--model",
4199+
self.model,
4200+
"--ip",
4201+
self.ip,
4202+
"--port",
4203+
str(self.port),
4204+
]
4205+
if self.host:
4206+
cmds.extend(["--host", self.host])
4207+
4208+
p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL)
4209+
with Listener((self.ip, self.port)) as listener:
4210+
conn = listener.accept()
4211+
p.communicate()
4212+
msg = json.loads(conn.recv())
4213+
if "Error" in msg:
4214+
self.fail(msg["Error"])
4215+
else:
4216+
self.assertGreaterEqual(msg["top_1"], 70)
4217+
self.assertGreaterEqual(msg["top_5"], 85)
4218+
41834219
def test_regnet(self):
41844220
if not self.required_envs([self.image_dataset]):
41854221
self.skipTest("missing required envs")
Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
# Copyright (c) Qualcomm Innovation Center, Inc.
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import json
8+
import logging
9+
import os
10+
from multiprocessing.connection import Client
11+
12+
import numpy as np
13+
14+
import torch
15+
from executorch.backends.qualcomm.quantizer.quantizer import QuantDtype
16+
from executorch.examples.qualcomm.utils import (
17+
build_executorch_binary,
18+
make_output_dir,
19+
parse_skip_delegation_node,
20+
setup_common_args_and_variables,
21+
SimpleADB,
22+
topk_accuracy,
23+
)
24+
from PIL import Image
25+
from torchvision import datasets
26+
from transformers import AutoModelForImageClassification, MobileViTFeatureExtractor
27+
28+
29+
def get_imagenet_dataset(dataset_path, data_size, shuffle=True):
30+
31+
def get_data_loader():
32+
imagenet_data = datasets.ImageFolder(dataset_path)
33+
return torch.utils.data.DataLoader(
34+
imagenet_data,
35+
shuffle=shuffle,
36+
)
37+
38+
# prepare input data
39+
inputs, targets, input_list = [], [], ""
40+
data_loader = get_data_loader()
41+
feature_extractor = MobileViTFeatureExtractor.from_pretrained(
42+
"apple/mobilevit-xx-small"
43+
)
44+
for index, data in enumerate(data_loader.dataset.imgs):
45+
if index >= data_size:
46+
break
47+
data_path, target = data
48+
image = Image.open(data_path).convert("RGB")
49+
feature = feature_extractor(images=image, return_tensors="pt")
50+
inputs.append((feature["pixel_values"],))
51+
targets.append(torch.tensor(target))
52+
input_list += f"input_{index}_0.raw\n"
53+
54+
return inputs, targets, input_list
55+
56+
57+
def main(args):
58+
skip_node_id_set, skip_node_op_set = parse_skip_delegation_node(args)
59+
60+
# ensure the working directory exist.
61+
os.makedirs(args.artifact, exist_ok=True)
62+
63+
if not args.compile_only and args.device is None:
64+
raise RuntimeError(
65+
"device serial is required if not compile only. "
66+
"Please specify a device serial by -s/--device argument."
67+
)
68+
69+
data_num = 100
70+
if args.ci:
71+
inputs = [(torch.rand(1, 3, 224, 224),)]
72+
logging.warning(
73+
"This option is for CI to verify the export flow. It uses random input and will result in poor accuracy."
74+
)
75+
else:
76+
inputs, targets, input_list = get_imagenet_dataset(
77+
dataset_path=f"{args.dataset}",
78+
data_size=data_num,
79+
)
80+
81+
module = (
82+
AutoModelForImageClassification.from_pretrained("apple/mobilevit-xx-small")
83+
.eval()
84+
.to("cpu")
85+
)
86+
87+
pte_filename = "mobilevit1_qnn_q16"
88+
build_executorch_binary(
89+
module.eval(),
90+
inputs[0],
91+
args.model,
92+
f"{args.artifact}/{pte_filename}",
93+
inputs,
94+
skip_node_id_set=skip_node_id_set,
95+
skip_node_op_set=skip_node_op_set,
96+
quant_dtype=QuantDtype.use_16a16w,
97+
shared_buffer=args.shared_buffer,
98+
)
99+
100+
if args.compile_only:
101+
return
102+
103+
adb = SimpleADB(
104+
qnn_sdk=os.getenv("QNN_SDK_ROOT"),
105+
build_path=f"{args.build_folder}",
106+
pte_path=f"{args.artifact}/{pte_filename}.pte",
107+
workspace=f"/data/local/tmp/executorch/{pte_filename}",
108+
device_id=args.device,
109+
host_id=args.host,
110+
soc_model=args.model,
111+
shared_buffer=args.shared_buffer,
112+
)
113+
adb.push(inputs=inputs, input_list=input_list)
114+
adb.execute()
115+
116+
# collect output data
117+
output_data_folder = f"{args.artifact}/outputs"
118+
make_output_dir(output_data_folder)
119+
120+
adb.pull(output_path=args.artifact)
121+
122+
# top-k analysis
123+
predictions = []
124+
for i in range(data_num):
125+
predictions.append(
126+
np.fromfile(
127+
os.path.join(output_data_folder, f"output_{i}_0.raw"), dtype=np.float32
128+
)
129+
)
130+
131+
k_val = [1, 5]
132+
topk = [topk_accuracy(predictions, targets, k).item() for k in k_val]
133+
if args.ip and args.port != -1:
134+
with Client((args.ip, args.port)) as conn:
135+
conn.send(json.dumps({f"top_{k}": topk[i] for i, k in enumerate(k_val)}))
136+
else:
137+
for i, k in enumerate(k_val):
138+
print(f"top_{k}->{topk[i]}%")
139+
140+
141+
if __name__ == "__main__":
142+
parser = setup_common_args_and_variables()
143+
144+
parser.add_argument(
145+
"-d",
146+
"--dataset",
147+
help=(
148+
"path to the validation folder of ImageNet dataset. "
149+
"e.g. --dataset imagenet-mini/val "
150+
"for https://www.kaggle.com/datasets/ifigotin/imagenetmini-1000)"
151+
),
152+
type=str,
153+
required=False,
154+
)
155+
156+
parser.add_argument(
157+
"-a",
158+
"--artifact",
159+
help="path for storing generated artifacts by this example. "
160+
"Default ./mobilevit1",
161+
default="./mobilevit1",
162+
type=str,
163+
)
164+
165+
args = parser.parse_args()
166+
try:
167+
main(args)
168+
except Exception as e:
169+
if args.ip and args.port != -1:
170+
with Client((args.ip, args.port)) as conn:
171+
conn.send(json.dumps({"Error": str(e)}))
172+
else:
173+
raise Exception(e)

0 commit comments

Comments
 (0)