diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 556d9e99f..e44d9d037 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -292,7 +292,7 @@ jobs: echo "::endgroup::" echo "::group::Run inference with quantize file" - for DEVICE in cpu; do # cuda + for DEVICE in cpu; do # cuda # cuda - fails because `AttributeError: 'Linear' object has no attribute '_linear_extra_repr'` # follow up with torchao as a separate PR echo "saving snapshot for device ${DEVICE} and dtype bfloat16, and reloading as snapshot" @@ -349,7 +349,7 @@ jobs: # python3 torchchat.py export --output-snap model.tc --dtype float32 --quantize torchchat/quant_config/cuda-32.json --checkpoint "./checkpoints/${REPO_NAME}/model.pth" # python3 torchchat.py generate --snap model.tc --dtype float32 --checkpoint "./checkpoints/${REPO_NAME}/model.pth" # echo "::endgroup::" - + test-gpu-aoti-float16: permissions: id-token: write @@ -1075,7 +1075,7 @@ jobs: ./runner/build_android.sh echo "Tests complete." - test-torchao-aoti-experimental: + test-torchao-experimental-python: strategy: matrix: runner: [macos-14-xlarge] @@ -1107,13 +1107,60 @@ jobs: ./install/install_requirements.sh pip3 list python3 -c 'import torch;print(f"torch: {torch.__version__, torch.version.git_version}")' - - name: Install torchao-ops - id: install-torchao-ops + - name: Run inference run: | - bash torchchat/utils/scripts/build_torchao_ops.sh - - name: Install runner AOTI - id: install-runner-aoti + python torchchat.py download stories110M + wget -O ./tokenizer.model https://github.com/karpathy/llama2.c/raw/master/tokenizer.model + export PRMT="Once upon a time in a land far away" + echo "Generate eager" + python torchchat.py generate stories110M --temperature 0 --prompt "${PRMT}" --device cpu --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' + echo "Generate compile" + python torchchat.py generate stories110M --temperature 0 --prompt "${PRMT}" --device cpu --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' --compile + echo "Export AOTI" + python torchchat.py export stories110M --output-aoti-package-path ./model.pt2 --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' + echo "Generate AOTI" + python torchchat.py generate stories110M --aoti-package-path ./model.pt2 --prompt "${PRMT}" + echo "Tests complete." + + test-torchao-experimental-cpp: + strategy: + matrix: + runner: [macos-14-xlarge] + runs-on: ${{matrix.runner}} + steps: + - name: Checkout repo + uses: actions/checkout@v3 + with: + submodules: true + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.10.11 + - name: Setup Xcode + if: runner.os == 'macOS' + uses: maxim-lobanov/setup-xcode@v1 + with: + xcode-version: '15.3' + - name: Print machine info + run: | + uname -a + if [ $(uname -s) == Darwin ]; then + sysctl machdep.cpu.brand_string + sysctl machdep.cpu.core_count + fi + - name: Install torchchat + run: | + echo "Intalling pip3 packages" + ./install/install_requirements.sh + pip3 list + python3 -c 'import torch;print(f"torch: {torch.__version__, torch.version.git_version}")' + - name: Clone torchao + id: clone-torchao + run: | + bash torchchat/utils/scripts/clone_torchao.sh + - name: Install runner run: | + echo "Installing runner" bash torchchat/utils/scripts/build_native.sh aoti link_torchao_ops - name: Run inference run: | @@ -1123,11 +1170,9 @@ jobs: echo "Export and run AOTI (C++ runner)" python torchchat.py export stories110M --output-aoti-package-path ./model.pt2 --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' ./cmake-out/aoti_run ./model.pt2 -z ./tokenizer.model -t 0 -i "${PRMT}" - echo "Generate AOTI" - python torchchat.py generate stories110M --aoti-package-path ./model.pt2 --prompt "${PRMT}" echo "Tests complete." - test-torchao-et-experimental: + test-torchao-experimental-et: strategy: matrix: runner: [macos-14-xlarge] @@ -1159,15 +1204,15 @@ jobs: ./install/install_requirements.sh pip3 list python3 -c 'import torch;print(f"torch: {torch.__version__, torch.version.git_version}")' - - name: Install torchao-ops - id: install-torchao-ops - run: | - bash torchchat/utils/scripts/build_torchao_ops.sh - name: Install ET run: | echo "Installing ExecuTorch" export TORCHCHAT_ROOT=${PWD} bash torchchat/utils/scripts/install_et.sh + - name: Clone torchao + id: clone-torchao + run: | + bash torchchat/utils/scripts/clone_torchao.sh - name: Install runner run: | echo "Installing runner" @@ -1177,14 +1222,9 @@ jobs: python torchchat.py download stories110M wget -O ./tokenizer.model https://github.com/karpathy/llama2.c/raw/master/tokenizer.model export PRMT="Once upon a time in a land far away" - echo "Generate eager" - python torchchat.py generate stories110M --temperature 0 --prompt "${PRMT}" --device cpu --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' - echo "Generate compile" - python torchchat.py generate stories110M --temperature 0 --prompt "${PRMT}" --device cpu --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' --compile echo "Export and run ET (C++ runner)" python torchchat.py export stories110M --output-pte-path ./model.pte --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' ./cmake-out/et_run ./model.pte -z ./tokenizer.model -t 0 -i "${PRMT}" - echo "Tests complete." test-torchao-experimental-mps: strategy: @@ -1216,6 +1256,7 @@ jobs: - name: Install torchao-ops-mps id: install-torchao-ops-mps run: | + bash torchchat/utils/scripts/clone_torchao.sh bash torchchat/utils/scripts/build_torchao_ops.sh mps - name: Run inference run: | diff --git a/docs/quantization.md b/docs/quantization.md index 89e8e541a..79799f1ce 100644 --- a/docs/quantization.md +++ b/docs/quantization.md @@ -120,13 +120,15 @@ python3 torchchat.py generate llama3 --pte-path llama3.pte --prompt "Hello my n ## Experimental TorchAO lowbit kernels -WARNING: These kernels only work on devices with ARM CPUs, for example on Mac computers with Apple Silicon. +If you are on a Mac with Apple Silicon, we have 1-8 quantization available for embedding and linear layers, backed by CPU and MPS kernels. + +The CPU kernels are installed automatically by the torchchat install script and can be used out of the box. To use the MPS kernels, follow the setup instructions below. ### Use #### linear:a8wxdq The quantization scheme linear:a8wxdq dynamically quantizes activations to 8 bits, and quantizes the weights in a groupwise manner with a specified bitwidth and groupsize. -It takes arguments bitwidth (1, 2, 3, 4, 5, 6, 7), groupsize, and has_weight_zeros (true, false). +It takes arguments bitwidth (1, 2, 3, 4, 5, 6, 7, 8), groupsize (-1 if channelwise desired), and has_weight_zeros (true, false). The argument has_weight_zeros indicates whether the weights are quantized with scales only (has_weight_zeros: false) or with both scales and zeros (has_weight_zeros: true). Roughly speaking, {bitwidth: 4, groupsize: 32, has_weight_zeros: false} is similar to GGML's Q4_0 quantization scheme. @@ -138,7 +140,9 @@ The quantization scheme embedding:wx quantizes embeddings in a groupwise manner You should expect high performance on ARM CPU if groupsize is divisible by 32. With other platforms and argument choices, a slow fallback kernel will be used. You will see warnings about this during quantization. ### Setup -To use linear:a8wxdq and embedding:wx, you must set up the torchao experimental kernels. These will only work on devices with ARM CPUs, for example on Mac computers with Apple Silicon. +If you are using the torchao ops from python (i.e not with a C++ runner), they are available out of the box on a Mac with Apple Silicon, and you can skip these setup steps. + +If you plan to use the kernels from the AOTI/ExecuTorch C++ runners, follow the setup steps below. From the torchchat root directory, run ``` @@ -147,7 +151,7 @@ bash torchchat/utils/scripts/build_torchao_ops.sh This should take about 10 seconds to complete. -Note: if you want to use the new kernels in the AOTI and C++ runners, you must pass the flag link_torchao_ops when running the scripts the build the runners. +When building the AOTI and C++ runners, you must pass the flag link_torchao_ops when running the scripts the build the runners. ``` bash torchchat/utils/scripts/build_native.sh aoti link_torchao_ops @@ -175,8 +179,8 @@ OMP_NUM_THREADS=6 python3 torchchat.py generate llama3.1 --device cpu --dtype fl #### AOTI ``` -OMP_NUM_THREADS=6 python torchchat.py export llama3.1 --device cpu --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' --output-dso llama3_1.so -OMP_NUM_THREADS=6 python3 torchchat.py generate llama3.1 --dso-path llama3_1.so --prompt "Once upon a time," --num-samples 5 +OMP_NUM_THREADS=6 python torchchat.py export llama3.1 --device cpu --dtype float32 --quantize '{"embedding:wx": {"bitwidth": 2, "groupsize": 32}, "linear:a8wxdq": {"bitwidth": 3, "groupsize": 128, "has_weight_zeros": false}}' --output-aoti-package-path llama3_1.pt2 +OMP_NUM_THREADS=6 python3 torchchat.py generate llama3.1 --aoti-package-path llama3_1.pt2 --prompt "Once upon a time," --num-samples 5 ``` If you built the AOTI runner with link_torchao_ops as discussed in the setup section, you can also use the C++ runner: diff --git a/install/.pins/torchao-pin.txt b/install/.pins/torchao-pin.txt index 2da70769c..c1b84754c 100644 --- a/install/.pins/torchao-pin.txt +++ b/install/.pins/torchao-pin.txt @@ -1 +1 @@ -2e032c6b0de960dee554dcb08126ace718b14c6d +711fa0809f06fc97febd0c3fe72563c3fe227e51 diff --git a/install/install_requirements.sh b/install/install_requirements.sh index b8baa72ce..0e58409c8 100755 --- a/install/install_requirements.sh +++ b/install/install_requirements.sh @@ -126,12 +126,7 @@ then ) fi -# For torchao need to install from github since nightly build doesn't have macos build. -# TODO: Remove this and install nightly build, once it supports macos -( - set -x - $PIP_EXECUTABLE install git+https://github.com/pytorch/ao.git@7d8794622f3ac7ffa98761314019a20fba06edef -) +bash install/install_torchao.sh if [[ -x "$(command -v nvidia-smi)" ]]; then ( diff --git a/install/install_torchao.sh b/install/install_torchao.sh new file mode 100644 index 000000000..84974040a --- /dev/null +++ b/install/install_torchao.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +# USE_CPP=1 indicates that the torchao experimental aten kernels will be built and loaded +# if on Mac with Apple Silicon + +if [ -z "${PYTHON_EXECUTABLE:-}" ]; +then + if [[ -z ${CONDA_DEFAULT_ENV:-} ]] || [[ ${CONDA_DEFAULT_ENV:-} == "base" ]] || [[ ! -x "$(command -v python)" ]]; + then + PYTHON_EXECUTABLE=python3 + else + PYTHON_EXECUTABLE=python + fi +fi +echo "Using python executable: $PYTHON_EXECUTABLE" + +if [[ "$PYTHON_EXECUTABLE" == "python" ]]; +then + PIP_EXECUTABLE=pip +elif [[ "$PYTHON_EXECUTABLE" == "python3" ]]; +then + PIP_EXECUTABLE=pip3 +else + PIP_EXECUTABLE=pip${PYTHON_SYS_VERSION} +fi +echo "Using pip executable: $PIP_EXECUTABLE" + + +export TORCHAO_PIN=$(cat install/.pins/torchao-pin.txt) +( + set -x + USE_CPP=1 $PIP_EXECUTABLE install git+https://github.com/pytorch/ao.git@${TORCHAO_PIN} +) diff --git a/torchchat/export.py b/torchchat/export.py index 997639ffe..bad97cd35 100644 --- a/torchchat/export.py +++ b/torchchat/export.py @@ -439,7 +439,8 @@ def main(args): tokenizer, max_seq_length=builder_args.max_seq_length, support_tensor_subclass=output_dso_path is None - and output_aoti_package_path is None, + and output_aoti_package_path is None + and output_pte_path is None, ) model_to_pte = model model_to_dso = model diff --git a/torchchat/utils/quantize.py b/torchchat/utils/quantize.py index c4ecfbe6f..6246f1c05 100644 --- a/torchchat/utils/quantize.py +++ b/torchchat/utils/quantize.py @@ -50,6 +50,18 @@ state_dict_device, use_et_backend, ) +from torchao.experimental.packed_linear_int8_dynamic_activation_intx_weight_layout import ( + PackedLinearInt8DynamicActivationIntxWeightLayout, +) +from torchao.experimental.quant_api import ( + int8_dynamic_activation_intx_weight, + IntxWeightEmbeddingQuantizer, +) +from torchao.quantization.granularity import ( + PerGroup, + PerRow, +) +from torchao.dtypes import PlainLayout # Flag for whether the a8wxdq quantizer is available. @@ -117,7 +129,45 @@ def quantize_model( unwrap_tensor_subclass(model) continue - if quantizer in ["linear:a8wxdq", "embedding:wx"]: + if quantizer == "linear:a8wxdq": + if get_precision() != torch.float32: + print(f"Quantizer {quantizer} requires float32 inputs, but received {get_precision()}. Changing dtype to float32. Note that after quantization, the weights will be lowbit integers, not float32.") + set_precision(torch.float32) + + group_size = q_kwargs["groupsize"] + bit_width = q_kwargs["bitwidth"] + has_weight_zeros = q_kwargs["has_weight_zeros"] + granularity = PerRow() if group_size == -1 else PerGroup(group_size) + weight_dtype = getattr(torch, f"int{bit_width}") + + try: + quantize_( + model, + int8_dynamic_activation_intx_weight( + weight_dtype=weight_dtype, + granularity=granularity, + has_weight_zeros=has_weight_zeros, + layout=PackedLinearInt8DynamicActivationIntxWeightLayout(), + ), + ) + except Exception as e: + print("Encountered error during quantization: {e}") + print("Trying with PlainLayout") + quantize_( + model, + int8_dynamic_activation_intx_weight( + weight_dtype=weight_dtype, + granularity=granularity, + has_weight_zeros=has_weight_zeros, + layout=PlainLayout(), + ), + ) + + if not support_tensor_subclass: + unwrap_tensor_subclass(model) + continue + + if quantizer == "embedding:wx": # These quantizers require float32 input weights. Note that after quantization, # the weights will no longer be float32, but lowbit integers if get_precision() != torch.float32: @@ -889,10 +939,12 @@ def quantized_model(self) -> nn.Module: # class references quantizer_class_dict = { "embedding": EmbeddingOnlyQuantHandler, + "embedding:wx": IntxWeightEmbeddingQuantizer, "linear:int8": WeightOnlyInt8QuantHandler, "precision": PrecisionHandler, "executor": ExecutorHandler, "linear:int4": Int4WeightOnlyQuantizer, + "linear:a8wxdq": None, # uses quantize_ API "linear:a8w4dq": Int8DynActInt4WeightQuantizer, } @@ -915,27 +967,10 @@ def quantized_model(self) -> nn.Module: torchao_experimental_quant_api_spec.loader.exec_module( torchao_experimental_quant_api ) - from torchao_experimental_quant_api import ( - Int8DynActIntxWeightLinearQuantizer, - IntxWeightEmbeddingQuantizer, - UIntxWeightOnlyLinearQuantizer, - ) - - quantizer_class_dict["linear:a8wxdq"] = Int8DynActIntxWeightLinearQuantizer - quantizer_class_dict["embedding:wx"] = IntxWeightEmbeddingQuantizer + from torchao_experimental_quant_api import UIntxWeightOnlyLinearQuantizer quantizer_class_dict["linear:afpwx"] = UIntxWeightOnlyLinearQuantizer # Try loading custom op - try: - import glob - - libs = glob.glob(f"{torchao_build_path}/cmake-out/lib/libtorchao_ops_aten.*") - libs = list(filter(lambda l: (l.endswith("so") or l.endswith("dylib")), libs)) - torch.ops.load_library(libs[0]) - print("Loaded torchao cpu ops.") - except Exception as e: - print("Unable to load torchao cpu ops library. Slow fallback kernels will be used.") - try: libname = "libtorchao_ops_mps_aten.dylib" libpath = f"{torchao_build_path}/cmake-out/lib/{libname}" diff --git a/torchchat/utils/scripts/build_native.sh b/torchchat/utils/scripts/build_native.sh index b8481b4cc..d0e141678 100755 --- a/torchchat/utils/scripts/build_native.sh +++ b/torchchat/utils/scripts/build_native.sh @@ -57,6 +57,7 @@ while (( "$#" )); do done source "$(dirname "${BASH_SOURCE[0]}")/install_utils.sh" +find_cmake_prefix_path if [ -z "${ET_BUILD_DIR}" ]; then ET_BUILD_DIR="et-build" @@ -80,23 +81,22 @@ if [[ "$TARGET" == "et" ]]; then exit 1 fi - source "$(dirname "${BASH_SOURCE[0]}")/install_utils.sh" - find_cmake_prefix_path EXECUTORCH_INCLUDE_DIRS="${TORCHCHAT_ROOT}/${ET_BUILD_DIR}/install/include;${TORCHCHAT_ROOT}/${ET_BUILD_DIR}/src" EXECUTORCH_LIBRARIES="${TORCHCHAT_ROOT}/${ET_BUILD_DIR}/install/lib/libexecutorch_no_prim_ops.a;${TORCHCHAT_ROOT}/${ET_BUILD_DIR}/install/lib/libextension_threadpool.a;${TORCHCHAT_ROOT}/${ET_BUILD_DIR}/install/lib/libcpuinfo.a;${TORCHCHAT_ROOT}/${ET_BUILD_DIR}/install/lib/libpthreadpool.a" install_torchao_executorch_ops fi elif [[ "$LINK_TORCHAO_OPS" == "ON" ]]; then - # Install OMP when using AOTI with linked torchao ops - brew install libomp + # Install OMP when using AOTI with linked torchao ops + brew install libomp + install_torchao_aten_ops cpu fi popd # CMake commands if [[ "$TARGET" == "et" ]]; then - cmake -S . -B ./cmake-out -DCMAKE_PREFIX_PATH=`python3 -c 'import torch;print(torch.utils.cmake_prefix_path)'` -DLINK_TORCHAO_OPS="${LINK_TORCHAO_OPS}" -DET_USE_ADAPTIVE_THREADS=ON -DCMAKE_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=1" -G Ninja + cmake -S . -B ./cmake-out -DCMAKE_PREFIX_PATH="${MY_CMAKE_PREFIX_PATH}" -DLINK_TORCHAO_OPS="${LINK_TORCHAO_OPS}" -DET_USE_ADAPTIVE_THREADS=ON -DCMAKE_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=1" -G Ninja else - cmake -S . -B ./cmake-out -DCMAKE_PREFIX_PATH=`python3 -c 'import torch;print(torch.utils.cmake_prefix_path)'` -DLINK_TORCHAO_OPS="${LINK_TORCHAO_OPS}" -DCMAKE_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=1" -G Ninja + cmake -S . -B ./cmake-out -DCMAKE_PREFIX_PATH="${MY_CMAKE_PREFIX_PATH}" -DLINK_TORCHAO_OPS="${LINK_TORCHAO_OPS}" -DCMAKE_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=1" -G Ninja fi cmake --build ./cmake-out --target "${TARGET}"_run diff --git a/torchchat/utils/scripts/build_torchao_ops.sh b/torchchat/utils/scripts/build_torchao_ops.sh index 46e2479ac..a8388d8d7 100644 --- a/torchchat/utils/scripts/build_torchao_ops.sh +++ b/torchchat/utils/scripts/build_torchao_ops.sh @@ -16,6 +16,5 @@ source "$(dirname "${BASH_SOURCE[0]}")/install_utils.sh" pushd ${TORCHCHAT_ROOT} find_cmake_prefix_path -clone_torchao install_torchao_aten_ops "$device" popd diff --git a/torchchat/utils/scripts/clone_torchao.sh b/torchchat/utils/scripts/clone_torchao.sh new file mode 100644 index 000000000..834e9434a --- /dev/null +++ b/torchchat/utils/scripts/clone_torchao.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +source "$(dirname "${BASH_SOURCE[0]}")/install_utils.sh" + +pushd ${TORCHCHAT_ROOT} +clone_torchao +popd diff --git a/torchchat/utils/scripts/install_et.sh b/torchchat/utils/scripts/install_et.sh index 8062a8316..531e80a6e 100755 --- a/torchchat/utils/scripts/install_et.sh +++ b/torchchat/utils/scripts/install_et.sh @@ -19,4 +19,8 @@ pushd ${TORCHCHAT_ROOT} find_cmake_prefix_path clone_executorch install_executorch_libs $ENABLE_ET_PYBIND + +# During installation, ET uninstalls torchchat's preferred version of torchao +# so we reinstall here +bash install/install_torchao.sh popd diff --git a/torchchat/utils/scripts/install_utils.sh b/torchchat/utils/scripts/install_utils.sh index 83b412be0..68987e666 100644 --- a/torchchat/utils/scripts/install_utils.sh +++ b/torchchat/utils/scripts/install_utils.sh @@ -200,6 +200,9 @@ install_torchao_aten_ops() { CMAKE_OUT_DIR=${TORCHCHAT_ROOT}/torchao-build/cmake-out cmake -DCMAKE_PREFIX_PATH=${MY_CMAKE_PREFIX_PATH} \ -DCMAKE_INSTALL_PREFIX=${CMAKE_OUT_DIR} \ + -DTORCHAO_BUILD_CPU_AARCH64=ON \ + -DTORCHAO_PARALLEL_BACKEND=OPENMP \ + -DOpenMP_ROOT="$(brew --prefix)/opt/libomp" \ -DCMAKE_BUILD_TYPE="Release" \ -S . \ -B ${CMAKE_OUT_DIR} -G Ninja @@ -217,6 +220,7 @@ install_torchao_executorch_ops() { -DCMAKE_INSTALL_PREFIX=${CMAKE_OUT_DIR} \ -DCMAKE_BUILD_TYPE="Release" \ -DTORCHAO_BUILD_EXECUTORCH_OPS=ON \ + -DTORCHAO_BUILD_CPU_AARCH64=ON \ -DEXECUTORCH_INCLUDE_DIRS="${EXECUTORCH_INCLUDE_DIRS}" \ -DEXECUTORCH_LIBRARIES="${EXECUTORCH_LIBRARIES}" \ -S . \