@@ -281,7 +281,7 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
281
281
std::unordered_map<std::string, std::vector<std::vector<int64_t >>>& profile_opt_shapes,
282
282
ShapeRangesMap& input_explicit_shape_ranges) {
283
283
if (trt_profiles.size () == 0 ) {
284
- LOGS_DEFAULT (WARNING) << " [Nv EP] Number of optimization profiles should be greater than 0, but it's 0." ;
284
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] Number of optimization profiles should be greater than 0, but it's 0." ;
285
285
return false ;
286
286
}
287
287
@@ -295,8 +295,8 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
295
295
input_explicit_shape_ranges[input_name] = inner_map;
296
296
}
297
297
298
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Begin to apply profile shapes ..." ;
299
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Input tensor name is '" << input_name << " ', number of profiles found is " << trt_profiles.size ();
298
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Begin to apply profile shapes ..." ;
299
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Input tensor name is '" << input_name << " ', number of profiles found is " << trt_profiles.size ();
300
300
301
301
for (size_t i = 0 ; i < trt_profiles.size (); i++) {
302
302
nvinfer1::Dims dims = input->getDimensions ();
@@ -309,7 +309,7 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
309
309
int shape_size = nb_dims == 0 ? 1 : static_cast <int >(profile_min_shapes[input_name][i].size ());
310
310
std::vector<int64_t > shapes_min (shape_size), shapes_opt (shape_size), shapes_max (shape_size);
311
311
312
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] shape size of this shape tensor is " << shape_size;
312
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] shape size of this shape tensor is " << shape_size;
313
313
314
314
for (int j = 0 ; j < shape_size; j++) {
315
315
auto min_value = profile_min_shapes[input_name][i][j];
@@ -318,9 +318,9 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
318
318
shapes_min[j] = static_cast <int64_t >(min_value);
319
319
shapes_max[j] = static_cast <int64_t >(max_value);
320
320
shapes_opt[j] = static_cast <int64_t >(opt_value);
321
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] shapes_min.d[" << j << " ] is " << shapes_min[j];
322
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] shapes_max.d[" << j << " ] is " << shapes_max[j];
323
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] shapes_opt.d[" << j << " ] is " << shapes_opt[j];
321
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] shapes_min.d[" << j << " ] is " << shapes_min[j];
322
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] shapes_max.d[" << j << " ] is " << shapes_max[j];
323
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] shapes_opt.d[" << j << " ] is " << shapes_opt[j];
324
324
325
325
if (input_explicit_shape_ranges[input_name].find (j) == input_explicit_shape_ranges[input_name].end ()) {
326
326
std::vector<std::vector<int64_t >> profile_vector (trt_profiles.size ());
@@ -342,7 +342,7 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
342
342
dims_max.nbDims = nb_dims;
343
343
dims_opt.nbDims = nb_dims;
344
344
345
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] number of dimension of this execution tensor is " << nb_dims;
345
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] number of dimension of this execution tensor is " << nb_dims;
346
346
347
347
for (int j = 0 ; j < nb_dims; j++) {
348
348
if (dims.d [j] == -1 ) {
@@ -352,9 +352,9 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
352
352
dims_min.d [j] = static_cast <int32_t >(min_value);
353
353
dims_max.d [j] = static_cast <int32_t >(max_value);
354
354
dims_opt.d [j] = static_cast <int32_t >(opt_value);
355
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] dims_min.d[" << j << " ] is " << dims_min.d [j];
356
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] dims_max.d[" << j << " ] is " << dims_max.d [j];
357
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] dims_opt.d[" << j << " ] is " << dims_opt.d [j];
355
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] dims_min.d[" << j << " ] is " << dims_min.d [j];
356
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] dims_max.d[" << j << " ] is " << dims_max.d [j];
357
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] dims_opt.d[" << j << " ] is " << dims_opt.d [j];
358
358
359
359
if (input_explicit_shape_ranges[input_name].find (j) == input_explicit_shape_ranges[input_name].end ()) {
360
360
std::vector<std::vector<int64_t >> profile_vector (trt_profiles.size ());
@@ -933,7 +933,7 @@ NvExecutionProvider::PerThreadContext::~PerThreadContext() {
933
933
bool NvExecutionProvider::PerThreadContext::CompareProfileShapes (std::string fused_node, ShapeRangesMap& shape_ranges) {
934
934
if (shape_ranges.size () > 0 ) {
935
935
if (input_shape_ranges_[fused_node] != shape_ranges) {
936
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] The shape ranges maintained by the PerThreadContext is different from the shape ranges maintained by TRT EP. \
936
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] The shape ranges maintained by the PerThreadContext is different from the shape ranges maintained by TRT EP. \
937
937
This means the engine is updated and will need to update the execution context as well." ;
938
938
return true ;
939
939
}
@@ -1116,31 +1116,31 @@ NvExecutionProvider::NvExecutionProvider(const NvExecutionProviderInfo& info)
1116
1116
status = ParseProfileShapes (profile_min_shapes, profile_min_shapes_);
1117
1117
if (!status) {
1118
1118
profile_min_shapes_.clear ();
1119
- LOGS_DEFAULT (WARNING) << " [TensorRT EP] The format of provider option 'trt_profile_min_shapes' is wrong, please follow the format of 'input1:dim1xdimd2...,input2:dim1xdim2...,...'" ;
1119
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] The format of provider option 'trt_profile_min_shapes' is wrong, please follow the format of 'input1:dim1xdimd2...,input2:dim1xdim2...,...'" ;
1120
1120
}
1121
1121
}
1122
1122
1123
1123
if (status) {
1124
1124
status = ParseProfileShapes (profile_max_shapes, profile_max_shapes_);
1125
1125
if (!status) {
1126
1126
profile_max_shapes_.clear ();
1127
- LOGS_DEFAULT (WARNING) << " [TensorRT EP] The format of provider option 'trt_profile_max_shapes' is wrong, please follow the format of 'input1:dim1xdimd2...,input2:dim1xdim2...,...'" ;
1127
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] The format of provider option 'trt_profile_max_shapes' is wrong, please follow the format of 'input1:dim1xdimd2...,input2:dim1xdim2...,...'" ;
1128
1128
}
1129
1129
}
1130
1130
1131
1131
if (status) {
1132
1132
status = ParseProfileShapes (profile_opt_shapes, profile_opt_shapes_);
1133
1133
if (!status) {
1134
1134
profile_opt_shapes_.clear ();
1135
- LOGS_DEFAULT (WARNING) << " [TensorRT EP] The format of provider option 'trt_profile_opt_shapes' is wrong, please follow the format of 'input1:dim1xdimd2...,input2:dim1xdim2...,...'" ;
1135
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] The format of provider option 'trt_profile_opt_shapes' is wrong, please follow the format of 'input1:dim1xdimd2...,input2:dim1xdim2...,...'" ;
1136
1136
}
1137
1137
}
1138
1138
1139
1139
if (status) {
1140
1140
status = ValidateProfileShapes (profile_min_shapes_, profile_max_shapes_, profile_opt_shapes_);
1141
1141
if (!status) {
1142
- LOGS_DEFAULT (WARNING) << " [TensorRT EP] Profile shapes validation failed. Make sure the provider options 'trt_profile_min_shapes', 'trt_profile_max_shapes' and 'trt_profile_opt_shapes' have same input name and number of profile." ;
1143
- LOGS_DEFAULT (WARNING) << " [TensorRT EP] TRT EP will implicitly create optimization profiles based on input tensor for you." ;
1142
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] Profile shapes validation failed. Make sure the provider options 'trt_profile_min_shapes', 'trt_profile_max_shapes' and 'trt_profile_opt_shapes' have same input name and number of profile." ;
1143
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] TRT EP will implicitly create optimization profiles based on input tensor for you." ;
1144
1144
profile_min_shapes_.clear ();
1145
1145
profile_max_shapes_.clear ();
1146
1146
profile_opt_shapes_.clear ();
@@ -1152,11 +1152,11 @@ NvExecutionProvider::NvExecutionProvider(const NvExecutionProviderInfo& info)
1152
1152
1153
1153
// Validate setting
1154
1154
if (max_partition_iterations_ <= 0 ) {
1155
- // LOGS_DEFAULT(WARNING) << "[Nv EP] TensorRT option nv_max_partition_iterations must be a positive integer value. Set it to 1000";
1155
+ // LOGS_DEFAULT(WARNING) << "[NvTensorRTRTX EP] TensorRT option nv_max_partition_iterations must be a positive integer value. Set it to 1000";
1156
1156
max_partition_iterations_ = 1000 ;
1157
1157
}
1158
1158
if (min_subgraph_size_ <= 0 ) {
1159
- // LOGS_DEFAULT(WARNING) << "[Nv EP] TensorRT option nv_min_subgraph_size must be a positive integer value. Set it to 1";
1159
+ // LOGS_DEFAULT(WARNING) << "[NvTensorRTRTX EP] TensorRT option nv_min_subgraph_size must be a positive integer value. Set it to 1";
1160
1160
min_subgraph_size_ = 1 ;
1161
1161
}
1162
1162
@@ -1223,10 +1223,10 @@ NvExecutionProvider::NvExecutionProvider(const NvExecutionProviderInfo& info)
1223
1223
trt_version_ = getInferLibVersion ();
1224
1224
CUDA_CALL_THROW (cudaRuntimeGetVersion (&cuda_version_));
1225
1225
1226
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] TensorRT version is " << trt_version_;
1227
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] CUDA version is " << cuda_version_;
1226
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] TensorRT version is " << trt_version_;
1227
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] CUDA version is " << cuda_version_;
1228
1228
1229
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Nv provider options: "
1229
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Nv provider options: "
1230
1230
<< " device_id: " << device_id_
1231
1231
<< " , nv_max_partition_iterations: " << max_partition_iterations_
1232
1232
<< " , nv_min_subgraph_size: " << min_subgraph_size_
@@ -1355,7 +1355,7 @@ nvinfer1::IBuilder* NvExecutionProvider::GetBuilder(TensorrtLogger& trt_logger)
1355
1355
void NvExecutionProvider::GetCustomOpDomainList (std::vector<OrtCustomOpDomain*>& custom_op_domain_list) const {
1356
1356
auto status = CreateTensorRTCustomOpDomainList (custom_op_domain_list, info_.extra_plugin_lib_paths );
1357
1357
if (status != Status::OK ()) {
1358
- LOGS_DEFAULT (WARNING) << " [Nv EP] Failed to get TRT plugins from TRT plugin registration." ;
1358
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] Failed to get TRT plugins from TRT plugin registration." ;
1359
1359
}
1360
1360
}
1361
1361
@@ -1534,7 +1534,7 @@ std::unique_ptr<IndexedSubGraph> NvExecutionProvider::GetSubGraph(SubGraph_t gra
1534
1534
auto meta_def = IndexedSubGraph_MetaDef::Create ();
1535
1535
const std::string graph_type = graph.IsSubgraph () ? " subgraph" : " graph" ;
1536
1536
meta_def->name () = " TRTKernel_" + graph_type + " _" + graph.Name () + " _" + subgraph_id;
1537
- LOGS_DEFAULT (INFO) << " [Nv EP] TensorRT subgraph MetaDef name " + meta_def->name ();
1537
+ LOGS_DEFAULT (INFO) << " [NvTensorRTRTX EP] TensorRT subgraph MetaDef name " + meta_def->name ();
1538
1538
1539
1539
// Assign inputs and outputs to subgraph's meta_def
1540
1540
for (const auto & input : inputs) {
@@ -1655,7 +1655,7 @@ SubGraphCollection_t NvExecutionProvider::GetSupportedList(SubGraphCollection_t
1655
1655
// Only if the newly built graph has control flow op as well as it has parent node,
1656
1656
// it needs to handle outer scope values before calling graph.Resolve().
1657
1657
if (has_control_flow_op && graph.ParentNode ()) {
1658
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Handle outer scope values for the subgraph " << graph_build.Name ();
1658
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Handle outer scope values for the subgraph " << graph_build.Name ();
1659
1659
BuildSubGraphContext (graph_build);
1660
1660
SetGraphOuterScopeValuesAndInputs (graph_build, graph.GetGraph ());
1661
1661
SetAllGraphInputs (graph_build);
@@ -2041,9 +2041,9 @@ NvExecutionProvider::GetCapability(const GraphViewer& graph,
2041
2041
}
2042
2042
SubGraphCollection_t consolidated_supported_nodes_vector = {{nodes_vector, true }};
2043
2043
if (DetectTensorRTGraphCycles (consolidated_supported_nodes_vector, graph, model_hash, false )) {
2044
- LOGS_DEFAULT (INFO) << " [Nv EP] TensorRT nodes are not consolidated because graph will have cycles after consolidation" ;
2044
+ LOGS_DEFAULT (INFO) << " [NvTensorRTRTX EP] TensorRT nodes are not consolidated because graph will have cycles after consolidation" ;
2045
2045
} else {
2046
- LOGS_DEFAULT (INFO) << " [Nv EP] TensorRT nodes are consolidated into one subgraph" ;
2046
+ LOGS_DEFAULT (INFO) << " [NvTensorRTRTX EP] TensorRT nodes are consolidated into one subgraph" ;
2047
2047
supported_nodes_vector = consolidated_supported_nodes_vector;
2048
2048
}
2049
2049
}
@@ -2108,7 +2108,7 @@ NvExecutionProvider::GetCapability(const GraphViewer& graph,
2108
2108
}
2109
2109
}
2110
2110
}
2111
- LOGS_DEFAULT (INFO) << " [Nv EP] Whole graph will run on Nv execution provider" ;
2111
+ LOGS_DEFAULT (INFO) << " [NvTensorRTRTX EP] Whole graph will run on Nv execution provider" ;
2112
2112
2113
2113
// The context map is only used during EP compile time, release it to save memory space.
2114
2114
subgraph_context_map_.clear ();
@@ -2128,11 +2128,11 @@ NvExecutionProvider::GetCapability(const GraphViewer& graph,
2128
2128
2129
2129
const size_t number_of_subgraphs = supported_nodes_vector.size ();
2130
2130
if (number_of_trt_nodes == 0 ) {
2131
- LOGS_DEFAULT (WARNING) << " [Nv EP] No graph will run on Nv execution provider" ;
2131
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] No graph will run on Nv execution provider" ;
2132
2132
} else if (number_of_trt_nodes == number_of_ort_nodes) {
2133
- LOGS_DEFAULT (INFO) << " [Nv EP] Whole graph will run on Nv execution provider" ;
2133
+ LOGS_DEFAULT (INFO) << " [NvTensorRTRTX EP] Whole graph will run on Nv execution provider" ;
2134
2134
} else {
2135
- LOGS_DEFAULT (INFO) << " [Nv EP] Graph is partitioned and number of subgraphs running on Nv executio provider is " << number_of_subgraphs;
2135
+ LOGS_DEFAULT (INFO) << " [NvTensorRTRTX EP] Graph is partitioned and number of subgraphs running on Nv executio provider is " << number_of_subgraphs;
2136
2136
}
2137
2137
2138
2138
// The context map is only used during EP compile time, release it to save memory space.
@@ -2190,20 +2190,20 @@ common::Status NvExecutionProvider::RefitEngine(std::string onnx_model_filename,
2190
2190
auto parser_refitter = std::unique_ptr<nvonnxparser::IParserRefitter>(
2191
2191
nvonnxparser::createParserRefitter (*refitter, trt_logger));
2192
2192
if (refit_from_file) {
2193
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Refitting from file on disk: " << onnx_model_path.string ();
2193
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Refitting from file on disk: " << onnx_model_path.string ();
2194
2194
if (!parser_refitter->refitFromFile (onnx_model_path.string ().c_str ())) {
2195
2195
return ORT_MAKE_STATUS (ONNXRUNTIME, EP_FAIL,
2196
2196
" Nv EP's IParserRefitter could not refit deserialized weight-stripped engine with weights contained in: " + onnx_model_path.string ());
2197
2197
}
2198
2198
} else {
2199
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Refitting from byte array" ;
2199
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Refitting from byte array" ;
2200
2200
if (!parser_refitter->refitFromBytes (onnx_model_bytestream, onnx_model_bytestream_size)) {
2201
2201
return ORT_MAKE_STATUS (ONNXRUNTIME, EP_FAIL,
2202
2202
" Nv EP's IParserRefitter could not refit deserialized weight-stripped engine with weights contained in the provided bytestraem" );
2203
2203
}
2204
2204
}
2205
2205
if (refitter->refitCudaEngine ()) {
2206
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Successfully refitted the weight-stripped engine." ;
2206
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Successfully refitted the weight-stripped engine." ;
2207
2207
} else {
2208
2208
return ORT_MAKE_STATUS (ONNXRUNTIME, EP_FAIL,
2209
2209
" Nv EP's IRefitter could not refit deserialized weight-stripped engine with weights contained in: " + onnx_model_path.string ());
@@ -2215,7 +2215,7 @@ common::Status NvExecutionProvider::RefitEngine(std::string onnx_model_filename,
2215
2215
nvinfer1::IHostMemory* serialized_engine = trt_engine->serialize ();
2216
2216
std::ofstream engine_file (refitted_engine_cache, std::ios::binary | std::ios::out);
2217
2217
engine_file.write (reinterpret_cast <const char *>(serialized_engine->data ()), serialized_engine->size ());
2218
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Serialize the refitted engine to " << refitted_engine_cache;
2218
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Serialize the refitted engine to " << refitted_engine_cache;
2219
2219
}
2220
2220
return Status::OK ();
2221
2221
}
@@ -2378,7 +2378,7 @@ Status NvExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphViewer& gr
2378
2378
has_dynamic_shape |= tensor_is_dynamic (input);
2379
2379
}
2380
2380
if (has_dynamic_shape) {
2381
- LOGS_DEFAULT (WARNING) << " [Nv EP] No explicit optimization profile was specified. "
2381
+ LOGS_DEFAULT (WARNING) << " [NvTensorRTRTX EP] No explicit optimization profile was specified. "
2382
2382
" We will assume a single profile with fully dynamic range. "
2383
2383
" This feature is experimental and may change in the future."
2384
2384
" If you plan to use this model as fixed shape we recommend using a free dimension override: "
@@ -2401,7 +2401,7 @@ Status NvExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphViewer& gr
2401
2401
if (has_explicit_profile && tensor_has_profile) {
2402
2402
apply_profile = ApplyProfileShapesFromProviderOptions (trt_profiles, input, profile_min_shapes_, profile_max_shapes_, profile_opt_shapes_, input_explicit_shape_ranges);
2403
2403
} else {
2404
- LOGS_DEFAULT (INFO) << " [Nv EP] Creating implicit profile for tensor " << input_name;
2404
+ LOGS_DEFAULT (INFO) << " [NvTensorRTRTX EP] Creating implicit profile for tensor " << input_name;
2405
2405
profile_min_shapes_[input_name] = std::vector<std::vector<int64_t >>{{}};
2406
2406
profile_min_shapes_[input_name][0 ].resize (dims.nbDims );
2407
2407
profile_opt_shapes_[input_name] = std::vector<std::vector<int64_t >>{{}};
@@ -2458,20 +2458,20 @@ Status NvExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphViewer& gr
2458
2458
// enable sparse weights
2459
2459
if (sparsity_enable_) {
2460
2460
trt_config->setFlag (nvinfer1::BuilderFlag::kSPARSE_WEIGHTS );
2461
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Sparse weights are allowed" ;
2461
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Sparse weights are allowed" ;
2462
2462
}
2463
2463
2464
2464
// limit auxiliary streams
2465
2465
if (auxiliary_streams_ >= 0 ) {
2466
2466
trt_config->setMaxAuxStreams (auxiliary_streams_);
2467
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Auxiliary streams are se to " << auxiliary_streams_;
2467
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Auxiliary streams are se to " << auxiliary_streams_;
2468
2468
}
2469
2469
2470
2470
if (weight_stripped_engine_enable_) {
2471
2471
trt_config->setFlag (nvinfer1::BuilderFlag::kSTRIP_PLAN );
2472
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] STRIP_PLAN is enabled" ;
2472
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] STRIP_PLAN is enabled" ;
2473
2473
trt_config->setFlag (nvinfer1::BuilderFlag::kREFIT_IDENTICAL );
2474
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] REFIT_IDENTICAL is enabled" ;
2474
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] REFIT_IDENTICAL is enabled" ;
2475
2475
}
2476
2476
2477
2477
// Build TRT engine (if needed) and load TRT engine if:
@@ -2554,7 +2554,7 @@ Status NvExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphViewer& gr
2554
2554
}
2555
2555
2556
2556
if (weight_stripped_engine_refit_) {
2557
- LOGS_DEFAULT (VERBOSE) << " [Nv EP] Refit engine from main ONNX file after engine build" ;
2557
+ LOGS_DEFAULT (VERBOSE) << " [NvTensorRTRTX EP] Refit engine from main ONNX file after engine build" ;
2558
2558
char * onnx = string_buf.data ();
2559
2559
size_t onnx_size = string_buf.size ();
2560
2560
auto status = RefitEngine (model_path_,
0 commit comments