Skip to content

[Not for merge] Demo for how to build against a different version of onnxruntime #1719

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions build-for-1.9.0.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/usr/bin/env bash


set -ex

if [ ! -f ./setup.py || -f ./sherpa-onnx/c-api/c-api.h || ! -f ./android/SherpaOnnx ]; then
echo "please run this script inside the sherpa-onnx directory"
exit 1
fi

if [ ! -d /Users/fangjun/t/onnxruntime-osx-x64-1.9.0/lib ]; then
mkdir -p /Users/fangjun/t
pushd /Users/fangjun/t
wget https://github.com/microsoft/onnxruntime/releases/download/v1.9.0/onnxruntime-osx-x64-1.9.0.tgz
tar xvf onnxruntime-osx-x64-1.9.0.tgz
rm onnxruntime-osx-x64-1.9.0.tgz
popd
fi

export SHERPA_ONNXRUNTIME_LIB_DIR=/Users/fangjun/t/onnxruntime-osx-x64-1.9.0/lib
export SHERPA_ONNXRUNTIME_INCLUDE_DIR=/Users/fangjun/t/onnxruntime-osx-x64-1.9.0/include

mkdir -p ./build-1.9.0
cd ./build-1.9.0
cmake -DBUILD_SHARED_LIBS=ON ..
make
4 changes: 4 additions & 0 deletions sherpa-onnx/csrc/provider-config.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,11 @@
namespace sherpa_onnx {

struct CudaConfig {
#if ORT_API_VERSION >= 10
int32_t cudnn_conv_algo_search = OrtCudnnConvAlgoSearchHeuristic;
#else
int32_t cudnn_conv_algo_search = 1;
#endif

CudaConfig() = default;
explicit CudaConfig(int32_t cudnn_conv_algo_search)
Expand Down
17 changes: 14 additions & 3 deletions sherpa-onnx/csrc/session.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

#include "sherpa-onnx/csrc/macros.h"
#include "sherpa-onnx/csrc/provider.h"
#if defined(__APPLE__)
#if defined(__APPLE__) && (ORT_API_VERSION >= 10)
#include "coreml_provider_factory.h" // NOLINT
#endif

Expand Down Expand Up @@ -76,9 +76,10 @@ Ort::SessionOptions GetSessionOptionsImpl(
break;
}
case Provider::kTRT: {
#if ORT_API_VERSION >= 10
if (provider_config == nullptr) {
SHERPA_ONNX_LOGE(
"Tensorrt support for Online models ony,"
"Tensorrt support for Online models only,"
"Must be extended for offline and others");
exit(1);
}
Expand Down Expand Up @@ -151,6 +152,12 @@ Ort::SessionOptions GetSessionOptionsImpl(
}
// break; is omitted here intentionally so that
// if TRT not available, CUDA will be used
#else
SHERPA_ONNX_LOGE(
"Tensorrt is not supported. Version of onnxruntime %d is too old. "
"Fallback to cuda provider",
static_cast<int32_t>(ORT_API_VERSION));
#endif
}
case Provider::kCUDA: {
if (std::find(available_providers.begin(), available_providers.end(),
Expand All @@ -165,7 +172,11 @@ Ort::SessionOptions GetSessionOptionsImpl(
} else {
options.device_id = 0;
// Default OrtCudnnConvAlgoSearchExhaustive is extremely slow
#if ORT_API_VERSION >= 10
options.cudnn_conv_algo_search = OrtCudnnConvAlgoSearchHeuristic;
#else
options.cudnn_conv_algo_search = OrtCudnnConvAlgoSearch(1);
#endif
// set more options on need
}
sess_opts.AppendExecutionProvider_CUDA(options);
Expand Down Expand Up @@ -196,7 +207,7 @@ Ort::SessionOptions GetSessionOptionsImpl(
break;
}
case Provider::kCoreML: {
#if defined(__APPLE__)
#if defined(__APPLE__) && (ORT_API_VERSION >= 10)
uint32_t coreml_flags = 0;
(void)OrtSessionOptionsAppendExecutionProvider_CoreML(sess_opts,
coreml_flags);
Expand Down
Loading