Skip to content

Commit ad5c82a

Browse files
authored
Merge branch 'main' into benchmarking_script
2 parents b9de90b + 68b8087 commit ad5c82a

File tree

3 files changed

+9
-7
lines changed

3 files changed

+9
-7
lines changed

docs/quantization.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -142,22 +142,22 @@ To use linear:a8wxdq and embedding:wx, you must set up the torchao experimental
142142

143143
From the torchchat root directory, run
144144
```
145-
sh torchchat/utils/scripts/build_torchao_ops.sh
145+
bash torchchat/utils/scripts/build_torchao_ops.sh
146146
```
147147

148148
This should take about 10 seconds to complete.
149149

150150
Note: if you want to use the new kernels in the AOTI and C++ runners, you must pass the flag link_torchao_ops when running the scripts the build the runners.
151151

152152
```
153-
sh torchchat/utils/scripts/build_native.sh aoti link_torchao_ops
153+
bash torchchat/utils/scripts/build_native.sh aoti link_torchao_ops
154154
```
155155

156156
```
157-
sh torchchat/utils/scripts/build_native.sh et link_torchao_ops
157+
bash torchchat/utils/scripts/build_native.sh et link_torchao_ops
158158
```
159159

160-
Note before running `sh torchchat/utils/scripts/build_native.sh et link_torchao_ops`, you must first install executorch with `sh torchchat/utils/scripts/install_et.sh` if you have not done so already.
160+
Note before running `bash torchchat/utils/scripts/build_native.sh et link_torchao_ops`, you must first install executorch with `bash torchchat/utils/scripts/install_et.sh` if you have not done so already.
161161

162162
### Examples
163163

@@ -212,7 +212,7 @@ Currently, torchchat can only run them on Eager mode.
212212

213213
From the torchchat root directory, run
214214
```
215-
sh torchchat/utils/scripts/build_torchao_ops.sh mps
215+
bash torchchat/utils/scripts/build_torchao_ops.sh mps
216216
```
217217

218218
### Examples

torchchat/utils/quantize.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -934,15 +934,15 @@ def quantized_model(self) -> nn.Module:
934934
torch.ops.load_library(libs[0])
935935
print("Loaded torchao cpu ops.")
936936
except Exception as e:
937-
print("Unabled to load torchao cpu ops library. Slow fallback kernels will be used.")
937+
print("Unable to load torchao cpu ops library. Slow fallback kernels will be used.")
938938

939939
try:
940940
libname = "libtorchao_ops_mps_aten.dylib"
941941
libpath = f"{torchao_build_path}/cmake-out/lib/{libname}"
942942
torch.ops.load_library(libpath)
943943
print("Loaded torchao mps ops.")
944944
except Exception as e:
945-
print("Unabled to load torchao mps ops library.")
945+
print("Unable to load torchao mps ops library.")
946946

947947
except Exception as e:
948948
print("Unabled to import torchao experimental quant_api with error: ", e)

torchchat/utils/scripts/updown.py

+2
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,8 @@ def updown_processor(
267267
lines = file.readlines()
268268
print_flag = False
269269

270+
# Use bash; set it to fail on the first failing command
271+
output("#! /bin/bash", replace_list=None, suppress_list=None)
270272
output("set -eou pipefail", replace_list=None, suppress_list=None)
271273

272274
if create_sections:

0 commit comments

Comments
 (0)