Skip to content

Commit 0273bf5

Browse files
committed
fix the runtime error and now trt and openvino can run together
1 parent 9de9b4c commit 0273bf5

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

onnxruntime/core/providers/shared_library/provider_interfaces.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ struct ProviderHost {
167167
virtual std::string demangle(const char* name) = 0;
168168
virtual std::string demangle(const std::string& name) = 0;
169169

170-
#ifdef USE_CUDA
170+
//#ifdef USE_CUDA
171171
virtual std::unique_ptr<IAllocator> CreateCUDAAllocator(int16_t device_id, const char* name) = 0;
172172
virtual std::unique_ptr<IAllocator> CreateCUDAPinnedAllocator(const char* name) = 0;
173173
virtual std::unique_ptr<IDataTransfer> CreateGPUDataTransfer() = 0;
@@ -179,7 +179,7 @@ struct ProviderHost {
179179

180180
virtual Status CudaCall_false(int retCode, const char* exprString, const char* libName, int successCode, const char* msg, const char* file, const int line) = 0;
181181
virtual void CudaCall_true(int retCode, const char* exprString, const char* libName, int successCode, const char* msg, const char* file, const int line) = 0;
182-
#endif
182+
//#endif
183183

184184
#ifdef USE_MIGRAPHX
185185
virtual std::unique_ptr<IAllocator> CreateMIGraphXAllocator(int16_t device_id, const char* name) = 0;
@@ -1177,9 +1177,9 @@ struct ProviderHost {
11771177
virtual training::DistributedRunContext& GetDistributedRunContextInstance() = 0;
11781178
#endif
11791179

1180-
#if defined(USE_CUDA) || defined(USE_ROCM)
1180+
//#if defined(USE_CUDA) || defined(USE_ROCM)
11811181
virtual PhiloxGenerator& PhiloxGenerator__Default() = 0;
1182-
#endif
1182+
//#endif
11831183

11841184
#ifdef ENABLE_TRAINING_TORCH_INTEROP
11851185
virtual void contrib__PythonOpBase__Init(contrib::PythonOpBase* p, const OpKernelInfo& info) = 0;

onnxruntime/core/session/provider_bridge_ort.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ struct ProviderHostImpl : ProviderHost {
234234
void* CPUAllocator__Alloc(CPUAllocator* p, size_t size) override { return p->CPUAllocator::Alloc(size); }
235235
void CPUAllocator__Free(CPUAllocator* p, void* allocation) override { return p->CPUAllocator::Free(allocation); }
236236

237-
#ifdef USE_CUDA
237+
//#ifdef USE_CUDA
238238
std::unique_ptr<IAllocator> CreateCUDAAllocator(int16_t device_id, const char* name) override { return GetProviderInfo_CUDA().CreateCUDAAllocator(device_id, name); }
239239
std::unique_ptr<IAllocator> CreateCUDAPinnedAllocator(const char* name) override { return GetProviderInfo_CUDA().CreateCUDAPinnedAllocator(name); }
240240
std::unique_ptr<IDataTransfer> CreateGPUDataTransfer() override { return GetProviderInfo_CUDA().CreateGPUDataTransfer(); }
@@ -247,7 +247,7 @@ struct ProviderHostImpl : ProviderHost {
247247

248248
Status CudaCall_false(int retCode, const char* exprString, const char* libName, int successCode, const char* msg, const char* file, const int line) override { return GetProviderInfo_CUDA().CudaCall_false(retCode, exprString, libName, successCode, msg, file, line); }
249249
void CudaCall_true(int retCode, const char* exprString, const char* libName, int successCode, const char* msg, const char* file, const int line) override { GetProviderInfo_CUDA().CudaCall_true(retCode, exprString, libName, successCode, msg, file, line); }
250-
#endif
250+
//#endif
251251

252252
#ifdef USE_MIGRAPHX
253253
std::unique_ptr<IAllocator> CreateMIGraphXAllocator(int16_t device_id, const char* name) override { return GetProviderInfo_MIGraphX().CreateMIGraphXAllocator(device_id, name); }
@@ -1419,9 +1419,9 @@ struct ProviderHostImpl : ProviderHost {
14191419
training::DistributedRunContext& GetDistributedRunContextInstance() override { return training::DistributedRunContext::GetInstance(); }
14201420
#endif
14211421

1422-
#if defined(USE_CUDA) || defined(USE_ROCM)
1422+
//#if defined(USE_CUDA) || defined(USE_ROCM)
14231423
PhiloxGenerator& PhiloxGenerator__Default() override { return PhiloxGenerator::Default(); }
1424-
#endif
1424+
//#endif
14251425

14261426
#ifdef ENABLE_TRAINING_TORCH_INTEROP
14271427
void contrib__PythonOpBase__Init(contrib::PythonOpBase* p, const OpKernelInfo& info) override { p->PythonOpBase::Init(info); }

0 commit comments

Comments
 (0)