2323#include < utility>
2424
2525#include < deep_core/plugin_interfaces/deep_backend_plugin.hpp>
26- #include < deep_ort_backend_plugin/ort_backend_plugin.hpp>
27- #include < deep_ort_gpu_backend_plugin/ort_gpu_backend_plugin.hpp>
26+ #include < pluginlib/class_loader.hpp>
2827#include < rcl_interfaces/msg/parameter_descriptor.hpp>
2928
3029namespace deep_yolo_inference
@@ -33,8 +32,18 @@ namespace deep_yolo_inference
3332BackendManager::BackendManager (rclcpp_lifecycle::LifecycleNode & node, const YoloParams & params)
3433: node_(node)
3534, params_(params)
35+ , plugin_loader_(std::make_unique<pluginlib::ClassLoader<deep_ros::DeepBackendPlugin>>(" deep_core" , " deep_ros::DeepBackendPlugin" ))
3636{}
3737
38+ BackendManager::~BackendManager ()
39+ {
40+ // Reset plugin holder before plugin loader is destroyed
41+ // This ensures proper cleanup order to avoid class_loader warnings
42+ plugin_holder_.reset ();
43+ executor_.reset ();
44+ allocator_.reset ();
45+ }
46+
3847void BackendManager::buildProviderOrder ()
3948{
4049 auto normalize_pref = params_.preferred_provider ;
@@ -121,34 +130,32 @@ bool BackendManager::initializeBackend(size_t start_index)
121130 }
122131
123132 try {
124- switch (provider) {
125- case Provider::TENSORRT:
126- case Provider::CUDA: {
127- const auto provider_name = providerToString (provider);
128- auto overrides = std::vector<rclcpp::Parameter>{
129- rclcpp::Parameter (" Backend.device_id" , params_.device_id ),
130- rclcpp::Parameter (" Backend.execution_provider" , provider_name),
131- rclcpp::Parameter (" Backend.trt_engine_cache_enable" , params_.enable_trt_engine_cache ),
132- rclcpp::Parameter (" Backend.trt_engine_cache_path" , params_.trt_engine_cache_path )};
133- auto backend_node = createBackendConfigNode (provider_name, std::move (overrides));
134- auto plugin = std::make_shared<deep_ort_gpu_backend::OrtGpuBackendPlugin>();
135- plugin->initialize (backend_node);
136- allocator_ = plugin->get_allocator ();
137- executor_ = plugin->get_inference_executor ();
138- plugin_holder_ = plugin;
139- break ;
140- }
141- case Provider::CPU: {
142- auto backend_node = createBackendConfigNode (" cpu" );
143- auto plugin = std::make_shared<deep_ort_backend::OrtBackendPlugin>();
144- plugin->initialize (backend_node);
145- allocator_ = plugin->get_allocator ();
146- executor_ = plugin->get_inference_executor ();
147- plugin_holder_ = plugin;
148- break ;
149- }
133+ const std::string plugin_name = providerToPluginName (provider);
134+ if (plugin_name.empty ()) {
135+ RCLCPP_WARN (node_.get_logger (), " No plugin name for provider: %s" , providerToString (provider).c_str ());
136+ continue ;
137+ }
138+
139+ // Create backend config node with provider-specific parameters
140+ rclcpp_lifecycle::LifecycleNode::SharedPtr backend_node;
141+ if (provider == Provider::TENSORRT || provider == Provider::CUDA) {
142+ const auto provider_name = providerToString (provider);
143+ auto overrides = std::vector<rclcpp::Parameter>{
144+ rclcpp::Parameter (" Backend.device_id" , params_.device_id ),
145+ rclcpp::Parameter (" Backend.execution_provider" , provider_name),
146+ rclcpp::Parameter (" Backend.trt_engine_cache_enable" , params_.enable_trt_engine_cache ),
147+ rclcpp::Parameter (" Backend.trt_engine_cache_path" , params_.trt_engine_cache_path )};
148+ backend_node = createBackendConfigNode (provider_name, std::move (overrides));
149+ } else {
150+ backend_node = createBackendConfigNode (" cpu" );
150151 }
151152
153+ // Load plugin using pluginlib
154+ plugin_holder_ = plugin_loader_->createUniqueInstance (plugin_name);
155+ plugin_holder_->initialize (backend_node);
156+ allocator_ = plugin_holder_->get_allocator ();
157+ executor_ = plugin_holder_->get_inference_executor ();
158+
152159 if (!executor_ || !allocator_) {
153160 throw std::runtime_error (" Executor or allocator is null" );
154161 }
@@ -281,4 +288,17 @@ deep_ros::Tensor BackendManager::buildInputTensor(const PackedInput & packed) co
281288 return tensor;
282289}
283290
291+ std::string BackendManager::providerToPluginName (Provider provider) const
292+ {
293+ switch (provider) {
294+ case Provider::TENSORRT:
295+ case Provider::CUDA:
296+ return " onnxruntime_gpu" ;
297+ case Provider::CPU:
298+ return " onnxruntime_cpu" ;
299+ default :
300+ return " " ;
301+ }
302+ }
303+
284304} // namespace deep_yolo_inference
0 commit comments