Skip to content

NV TensorRT RTX EP - initial commit #24456

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 31 commits into from
Apr 24, 2025
Merged
Show file tree
Hide file tree
Changes from 27 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
fb6a731
NV TensorRT RTX EP - initial commit
ankan-ban Apr 17, 2025
27db0d7
make CUDA Minimal the default for NV EP
gedoensmax Apr 17, 2025
3391d2f
rename some remaining functions to NV TRT RTX
gedoensmax Apr 17, 2025
5a48dc2
Merge pull request #1 from gedoensmax/gedoensmax/nv-tensorrt-rtx-ep
ankan-ban Apr 17, 2025
b730d49
Fix memory paging issue seen with large models.
gaugarg-nv Apr 17, 2025
ac8b694
Merge pull request #3 from gaugarg-nv/nv-tensorrt-rtx-ep
ankan-ban Apr 18, 2025
e8b9aca
Fix: Apply clang-format formatting
Apr 21, 2025
14986ad
added NvProviderFactory::CreateProvider(const OrtSessionOptions& sess…
Apr 21, 2025
a33d103
added the implementation of SessionOptionsAppendExecutionProvider for
Apr 21, 2025
90ca9e5
Fixed the help of onnxruntime_perf_test
Apr 21, 2025
0267fb2
Add support for python bindings of NV TensorRT RTX EP
hrishikeshm Apr 21, 2025
718aade
fixed review comments
Apr 21, 2025
57bee11
Merge pull request #5 from ishwar-raut1/ishwar/nv-tensorrt-rtx-ep
ankan-ban Apr 21, 2025
9a838a5
Merge pull request #6 from hrishikeshm/nv-tensorrt-rtx-ep
ankan-ban Apr 21, 2025
8c04376
use setShapeValuesV2
ankan-ban Apr 22, 2025
e87e5cd
Merge pull request #7 from ankan-ban/misc-fixes
ankan-ban Apr 22, 2025
25312aa
Clean up old APIs and options
ankan-ban Apr 22, 2025
7dfa562
Merge pull request #9 from ankan-ban/options-clean-up
ankan-ban Apr 22, 2025
d8a6ce5
fix formatting
ankan-ban Apr 22, 2025
b998bfb
Significantly reduce options
gedoensmax Apr 17, 2025
228f9ce
Merge pull request #10 from ankan-ban/minor-formatting-fixes
ankan-ban Apr 22, 2025
3f34076
Merge branch 'microsoft:main' into nv-tensorrt-rtx-ep
ankan-ban Apr 22, 2025
39608f2
add naiive test
gedoensmax Apr 17, 2025
62cd19c
better warning
gedoensmax Apr 22, 2025
273595c
remove debug logging
Apr 22, 2025
1d53281
lintrunner
gedoensmax Apr 22, 2025
8b7e443
dynamic shape support for models with multiple subgraphs
gedoensmax Apr 22, 2025
38bf50a
Merge pull request #11 from gedoensmax/gedoensmax/nv-reduce-code
ankan-ban Apr 23, 2025
11ca9ba
fix compile error in test
ankan-ban Apr 23, 2025
fbca483
Merge pull request #12 from ishwar-raut1/ishwar/nv-trt-rtx-ep-typos-f…
ankan-ban Apr 23, 2025
052c3c9
Merge pull request #13 from ankan-ban/test-compile-error-fix
gedoensmax Apr 23, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions cmake/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ option(onnxruntime_ENABLE_MICROSOFT_INTERNAL "Use this option to enable/disable
option(onnxruntime_USE_VITISAI "Build with Vitis-AI" OFF)
option(onnxruntime_USE_TENSORRT "Build with TensorRT support" OFF)
option(onnxruntime_USE_TENSORRT_BUILTIN_PARSER "Use TensorRT builtin parser" OFF)
option(onnxruntime_USE_NV "Build with TensorRT support" OFF)
option(onnxruntime_ENABLE_LTO "Enable link time optimization" OFF)
option(onnxruntime_CROSS_COMPILING "Cross compiling onnx runtime" OFF)
option(onnxruntime_GCOV_COVERAGE "Compile with options necessary to run code coverage" OFF)
Expand Down Expand Up @@ -250,6 +251,7 @@ option(onnxruntime_USE_LOCK_FREE_QUEUE "Build with lock-free task queue for thre
option(onnxruntime_FORCE_GENERIC_ALGORITHMS "Disable optimized arch-specific algorithms. Use only for testing and debugging generic algorithms." OFF)

option(onnxruntime_USE_TENSORRT_INTERFACE "Build ONNXRuntime shared lib which is compatible with TensorRT EP interface" OFF)
option(onnxruntime_USE_NV_INTERFACE "Build ONNXRuntime shared lib which is compatible with NV EP interface" OFF)
option(onnxruntime_USE_CUDA_INTERFACE "Build ONNXRuntime shared lib which is compatible with Cuda EP interface" OFF)
option(onnxruntime_USE_OPENVINO_INTERFACE "Build ONNXRuntime shared lib which is compatible with OpenVINO EP interface" OFF)
option(onnxruntime_USE_VITISAI_INTERFACE "Build ONNXRuntime shared lib which is compatible with Vitis-AI EP interface" OFF)
Expand Down Expand Up @@ -946,6 +948,15 @@ if (onnxruntime_USE_TENSORRT_INTERFACE AND (NOT onnxruntime_USE_TENSORRT))
list(APPEND ORT_INTERFACE_FLAGS -DUSE_TENSORRT=1)
endif()

if (onnxruntime_USE_NV)
list(APPEND ORT_PROVIDER_FLAGS -DUSE_NV=1)
list(APPEND ONNXRUNTIME_PROVIDER_NAMES nv_tensorrt_rtx)
endif()

if (onnxruntime_USE_NV_INTERFACE AND (NOT onnxruntime_USE_NV))
list(APPEND ORT_INTERFACE_FLAGS -DUSE_NV=1)
endif()

if (onnxruntime_USE_RKNPU)
list(APPEND ORT_PROVIDER_FLAGS -DUSE_RKNPU=1)
list(APPEND ONNXRUNTIME_PROVIDER_NAMES rknpu)
Expand Down
2 changes: 1 addition & 1 deletion cmake/onnxruntime_framework.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ endif()
if(onnxruntime_ENABLE_INSTRUMENT)
target_compile_definitions(onnxruntime_framework PRIVATE ONNXRUNTIME_ENABLE_INSTRUMENT)
endif()
if(onnxruntime_USE_TENSORRT OR onnxruntime_USE_NCCL)
if(onnxruntime_USE_TENSORRT OR onnxruntime_USE_NCCL OR onnxruntime_USE_NV)
# TODO: for now, core framework depends on CUDA. It should be moved to TensorRT EP
# TODO: provider_bridge_ort.cc should not include nccl.h
target_include_directories(onnxruntime_framework PRIVATE ${ONNXRUNTIME_ROOT} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
Expand Down
4 changes: 4 additions & 0 deletions cmake/onnxruntime_providers.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,10 @@ if (onnxruntime_USE_TENSORRT)
include(onnxruntime_providers_tensorrt.cmake)
endif()

if (onnxruntime_USE_NV)
include(onnxruntime_providers_nv.cmake)
endif()

if (onnxruntime_USE_VITISAI)
include(onnxruntime_providers_vitisai.cmake)
endif()
Expand Down
202 changes: 202 additions & 0 deletions cmake/onnxruntime_providers_nv.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
find_package(CUDAToolkit REQUIRED 12.8)
enable_language(CUDA)
if(onnxruntime_DISABLE_CONTRIB_OPS)
message( FATAL_ERROR "To compile TensorRT execution provider contrib ops have to be enabled to dump an engine using com.microsoft:EPContext node." )
endif()
add_definitions(-DUSE_NV=1)
if (onnxruntime_NV_PLACEHOLDER_BUILDER)
add_definitions(-DORT_NV_PLACEHOLDER_BUILDER)
endif()
set(BUILD_LIBRARY_ONLY 1)
add_definitions("-DONNX_ML=1")
add_definitions("-DONNX_NAMESPACE=onnx")
set(CUDA_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIRS})
set(TENSORRT_ROOT ${onnxruntime_TENSORRT_HOME})
set(OLD_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
set(PROTOBUF_LIBRARY ${PROTOBUF_LIB})
if (WIN32)
set(OLD_CMAKE_CUDA_FLAGS ${CMAKE_CUDA_FLAGS})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4099 /wd4551 /wd4505 /wd4515 /wd4706 /wd4456 /wd4324 /wd4701 /wd4804 /wd4702 /wd4458 /wd4703")
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4805")
endif()
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -include algorithm")
set(DISABLED_WARNINGS_FOR_TRT /wd4456)
endif()
if ( CMAKE_COMPILER_IS_GNUCC )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-missing-field-initializers")
endif()
set(CXX_VERSION_DEFINED TRUE)

find_path(TENSORRT_INCLUDE_DIR NvInfer.h
HINTS ${TENSORRT_ROOT}
PATH_SUFFIXES include)


file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h NVINFER_VER_CONTENT)
string(REGEX MATCH "define NV_TENSORRT_MAJOR * +([0-9]+)" NV_TENSORRT_MAJOR "${NVINFER_VER_CONTENT}")
string(REGEX REPLACE "define NV_TENSORRT_MAJOR * +([0-9]+)" "\\1" NV_TENSORRT_MAJOR "${NV_TENSORRT_MAJOR}")
string(REGEX MATCH "define NV_TENSORRT_MINOR * +([0-9]+)" NV_TENSORRT_MINOR "${NVINFER_VER_CONTENT}")
string(REGEX REPLACE "define NV_TENSORRT_MINOR * +([0-9]+)" "\\1" NV_TENSORRT_MINOR "${NV_TENSORRT_MINOR}")
string(REGEX MATCH "define NV_TENSORRT_PATCH * +([0-9]+)" NV_TENSORRT_PATCH "${NVINFER_VER_CONTENT}")
string(REGEX REPLACE "define NV_TENSORRT_PATCH * +([0-9]+)" "\\1" NV_TENSORRT_PATCH "${NV_TENSORRT_PATCH}")
math(EXPR NV_TENSORRT_MAJOR_INT "${NV_TENSORRT_MAJOR}")
math(EXPR NV_TENSORRT_MINOR_INT "${NV_TENSORRT_MINOR}")
math(EXPR NV_TENSORRT_PATCH_INT "${NV_TENSORRT_PATCH}")

if (NV_TENSORRT_MAJOR)
MESSAGE(STATUS "NV_TENSORRT_MAJOR is ${NV_TENSORRT_MAJOR}")
else()
MESSAGE(STATUS "Can't find NV_TENSORRT_MAJOR macro")
endif()

# Check TRT version >= 10.0.1.6
if ((NV_TENSORRT_MAJOR_INT GREATER 10) OR
(NV_TENSORRT_MAJOR_INT EQUAL 10 AND NV_TENSORRT_MINOR_INT GREATER 0) OR
(NV_TENSORRT_MAJOR_INT EQUAL 10 AND NV_TENSORRT_PATCH_INT GREATER 0))
set(TRT_GREATER_OR_EQUAL_TRT_10_GA ON)
else()
message( FATAL_ERROR "Only TensorRT 10.x or higher is supported." )
endif()

# TensorRT 10 GA onwards, the TensorRT libraries will have major version appended to the end on Windows,
# for example, nvinfer_10.dll, nvonnxparser_10.dll ...
if (WIN32 AND TRT_GREATER_OR_EQUAL_TRT_10_GA)
set(NVINFER_LIB "nvinfer_${NV_TENSORRT_MAJOR}")
set(PARSER_LIB "nvonnxparser_${NV_TENSORRT_MAJOR}")
endif()

if (NOT NVINFER_LIB)
set(NVINFER_LIB "nvinfer")
endif()

if (NOT PARSER_LIB)
set(PARSER_LIB "nvonnxparser")
endif()

MESSAGE(STATUS "Looking for ${NVINFER_LIB}")

find_library(TENSORRT_LIBRARY_INFER ${NVINFER_LIB}
HINTS ${TENSORRT_ROOT}
PATH_SUFFIXES lib lib64 lib/x64)

if (NOT TENSORRT_LIBRARY_INFER)
MESSAGE(STATUS "Can't find ${NVINFER_LIB}")
endif()

if (onnxruntime_USE_TENSORRT_BUILTIN_PARSER)
MESSAGE(STATUS "Looking for ${PARSER_LIB}")

find_library(TENSORRT_LIBRARY_NVONNXPARSER ${PARSER_LIB}
HINTS ${TENSORRT_ROOT}
PATH_SUFFIXES lib lib64 lib/x64)

if (NOT TENSORRT_LIBRARY_NVONNXPARSER)
MESSAGE(STATUS "Can't find ${PARSER_LIB}")
endif()

set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_NVONNXPARSER})
MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
else()
if (TRT_GREATER_OR_EQUAL_TRT_10_GA)
set(ONNX_USE_LITE_PROTO ON)
endif()
onnxruntime_fetchcontent_declare(
onnx_tensorrt
URL ${DEP_URL_onnx_tensorrt}
URL_HASH SHA1=${DEP_SHA1_onnx_tensorrt}
EXCLUDE_FROM_ALL
)
if (NOT CUDA_INCLUDE_DIR)
set(CUDA_INCLUDE_DIR ${CUDAToolkit_INCLUDE_DIRS}) # onnx-tensorrt repo needs this variable to build
endif()
# The onnx_tensorrt repo contains a test program, getSupportedAPITest, which doesn't support Windows. It uses
# unistd.h. So we must exclude it from our build. onnxruntime_fetchcontent_makeavailable is for the purpose.
onnxruntime_fetchcontent_makeavailable(onnx_tensorrt)
include_directories(${onnx_tensorrt_SOURCE_DIR})
set(CMAKE_CXX_FLAGS ${OLD_CMAKE_CXX_FLAGS})
if ( CMAKE_COMPILER_IS_GNUCC )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
endif()
if (WIN32)
set(CMAKE_CUDA_FLAGS ${OLD_CMAKE_CUDA_FLAGS})
unset(PROTOBUF_LIBRARY)
unset(OLD_CMAKE_CXX_FLAGS)
unset(OLD_CMAKE_CUDA_FLAGS)
set_target_properties(${PARSER_LIB} PROPERTIES LINK_FLAGS "/ignore:4199")
target_compile_options(nvonnxparser_static PRIVATE /FIio.h /wd4100)
target_compile_options(${PARSER_LIB} PRIVATE /FIio.h /wd4100)
endif()
# Static libraries are just nvonnxparser_static on all platforms
set(onnxparser_link_libs nvonnxparser_static)
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER})
MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
endif()

include_directories(${TENSORRT_INCLUDE_DIR})
# ${TENSORRT_LIBRARY} is empty if we link nvonnxparser_static.
# nvonnxparser_static is linked against tensorrt libraries in onnx-tensorrt
# See https://github.com/onnx/onnx-tensorrt/blob/8af13d1b106f58df1e98945a5e7c851ddb5f0791/CMakeLists.txt#L121
# However, starting from TRT 10 GA, nvonnxparser_static doesn't link against tensorrt libraries.
# Therefore, the above code finds ${TENSORRT_LIBRARY_INFER}
set(trt_link_libs ${CMAKE_DL_LIBS} ${TENSORRT_LIBRARY})
file(GLOB_RECURSE onnxruntime_providers_nv_tensorrt_rtx_cc_srcs CONFIGURE_DEPENDS
"${ONNXRUNTIME_ROOT}/core/providers/nv_tensorrt_rtx/*.h"
"${ONNXRUNTIME_ROOT}/core/providers/nv_tensorrt_rtx/*.cc"
"${ONNXRUNTIME_ROOT}/core/providers/shared_library/*.h"
"${ONNXRUNTIME_ROOT}/core/providers/shared_library/*.cc"
"${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_stream_handle.h"
"${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_stream_handle.cc"
"${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_graph.h"
"${ONNXRUNTIME_ROOT}/core/providers/cuda/cuda_graph.cc"
)

source_group(TREE ${ONNXRUNTIME_ROOT}/core FILES ${onnxruntime_providers_nv_tensorrt_rtx_cc_srcs})
onnxruntime_add_shared_library_module(onnxruntime_providers_nv_tensorrt_rtx ${onnxruntime_providers_nv_tensorrt_rtx_cc_srcs})
onnxruntime_add_include_to_target(onnxruntime_providers_nv_tensorrt_rtx onnxruntime_common)
target_link_libraries(onnxruntime_providers_nv_tensorrt_rtx PRIVATE Eigen3::Eigen onnx flatbuffers::flatbuffers Boost::mp11 safeint_interface Eigen3::Eigen)
add_dependencies(onnxruntime_providers_nv_tensorrt_rtx onnxruntime_providers_shared ${onnxruntime_EXTERNAL_DEPENDENCIES})
if (onnxruntime_USE_TENSORRT_BUILTIN_PARSER)
target_link_libraries(onnxruntime_providers_nv_tensorrt_rtx PRIVATE ${trt_link_libs} ${ONNXRUNTIME_PROVIDERS_SHARED} ${PROTOBUF_LIB} flatbuffers::flatbuffers Boost::mp11 safeint_interface ${ABSEIL_LIBS} PUBLIC CUDA::cudart)
else()
target_link_libraries(onnxruntime_providers_nv_tensorrt_rtx PRIVATE ${onnxparser_link_libs} ${trt_link_libs} ${ONNXRUNTIME_PROVIDERS_SHARED} ${PROTOBUF_LIB} flatbuffers::flatbuffers ${ABSEIL_LIBS} PUBLIC CUDA::cudart)
endif()
target_include_directories(onnxruntime_providers_nv_tensorrt_rtx PRIVATE ${ONNXRUNTIME_ROOT} ${CMAKE_CURRENT_BINARY_DIR}
PUBLIC ${CUDAToolkit_INCLUDE_DIRS})

# ${CMAKE_CURRENT_BINARY_DIR} is so that #include "onnxruntime_config.h" inside tensor_shape.h is found
set_target_properties(onnxruntime_providers_nv_tensorrt_rtx PROPERTIES LINKER_LANGUAGE CUDA)
set_target_properties(onnxruntime_providers_nv_tensorrt_rtx PROPERTIES FOLDER "ONNXRuntime")
target_compile_definitions(onnxruntime_providers_nv_tensorrt_rtx PRIVATE ONNXIFI_BUILD_LIBRARY=1)
target_compile_options(onnxruntime_providers_nv_tensorrt_rtx PRIVATE ${DISABLED_WARNINGS_FOR_TRT})
if (WIN32)
target_compile_options(onnxruntime_providers_nv_tensorrt_rtx INTERFACE /wd4456)
endif()
# set CUDA_MINIMAL as default for NV provider since we do not have fallback to CUDA
target_compile_definitions(onnxruntime_providers_nv_tensorrt_rtx PRIVATE USE_CUDA_MINIMAL=1)

# Needed for the provider interface, as it includes training headers when training is enabled
if (onnxruntime_ENABLE_TRAINING_OPS)
target_include_directories(onnxruntime_providers_nv_tensorrt_rtx PRIVATE ${ORTTRAINING_ROOT})
if (onnxruntime_ENABLE_TRAINING_TORCH_INTEROP)
onnxruntime_add_include_to_target(onnxruntime_providers_nv_tensorrt_rtx Python::Module)
endif()
endif()

if(APPLE)
set_property(TARGET onnxruntime_providers_nv_tensorrt_rtx APPEND_STRING PROPERTY LINK_FLAGS "-Xlinker -exported_symbols_list ${ONNXRUNTIME_ROOT}/core/providers/nv_tensorrt_rtx/exported_symbols.lst")
elseif(UNIX)
set_property(TARGET onnxruntime_providers_nv_tensorrt_rtx APPEND_STRING PROPERTY COMPILE_FLAGS "-Wno-deprecated-declarations")
set_property(TARGET onnxruntime_providers_nv_tensorrt_rtx APPEND_STRING PROPERTY LINK_FLAGS "-Xlinker --version-script=${ONNXRUNTIME_ROOT}/core/providers/nv_tensorrt_rtx/version_script.lds -Xlinker --gc-sections")
elseif(WIN32)
set_property(TARGET onnxruntime_providers_nv_tensorrt_rtx APPEND_STRING PROPERTY LINK_FLAGS "-DEF:${ONNXRUNTIME_ROOT}/core/providers/nv_tensorrt_rtx/symbols.def")
else()
message(FATAL_ERROR "onnxruntime_providers_nv_tensorrt_rtx unknown platform, need to specify shared library exports for it")
endif()

install(TARGETS onnxruntime_providers_nv_tensorrt_rtx
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
3 changes: 2 additions & 1 deletion cmake/onnxruntime_providers_tensorrt.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-missing-field-initializers")
endif()
set(CXX_VERSION_DEFINED TRUE)

message(STATUS "ishwar TENSORRT_ROOT is ${TENSORRT_ROOT}")
message(STATUS "onnxruntime_USE_TENSORRT_BUILTIN_PARSER is ${onnxruntime_USE_TENSORRT_BUILTIN_PARSER}")
find_path(TENSORRT_INCLUDE_DIR NvInfer.h
HINTS ${TENSORRT_ROOT}
PATH_SUFFIXES include)
Expand Down
10 changes: 10 additions & 0 deletions cmake/onnxruntime_python.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -928,6 +928,16 @@ if (onnxruntime_USE_TENSORRT)
)
endif()

if (onnxruntime_USE_NV)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
$<TARGET_FILE:onnxruntime_providers_nv_tensorrt_rtx>
$<TARGET_FILE:onnxruntime_providers_shared>
$<TARGET_FILE_DIR:${build_output_target}>/onnxruntime/capi/
)
endif()

if (onnxruntime_USE_MIGRAPHX)
add_custom_command(
TARGET onnxruntime_pybind11_state POST_BUILD
Expand Down
Loading