|
| 1 | +// Copyright (c) 2025-present WATonomous. All rights reserved. |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +#pragma once |
| 16 | + |
| 17 | +#include <onnxruntime_cxx_api.h> |
| 18 | + |
| 19 | +#include <filesystem> |
| 20 | +#include <memory> |
| 21 | +#include <string> |
| 22 | +#include <vector> |
| 23 | + |
| 24 | +#include <deep_core/plugin_interfaces/backend_inference_executor.hpp> |
| 25 | +#include <deep_core/types/tensor.hpp> |
| 26 | + |
| 27 | +namespace deep_ort_backend |
| 28 | +{ |
| 29 | + |
| 30 | +/** |
| 31 | + * @brief ONNX Runtime backend inference executor |
| 32 | + * |
| 33 | + * Provides inference execution using ONNX Runtime with CPU optimization. |
| 34 | + * Uses zero-copy IO binding for efficient tensor operations. |
| 35 | + */ |
| 36 | +class OrtBackendExecutor : public deep_ros::BackendInferenceExecutor |
| 37 | +{ |
| 38 | +public: |
| 39 | + /** |
| 40 | + * @brief Constructor - initializes ONNX Runtime environment |
| 41 | + */ |
| 42 | + OrtBackendExecutor(); |
| 43 | + |
| 44 | + /** |
| 45 | + * @brief Destructor |
| 46 | + */ |
| 47 | + ~OrtBackendExecutor() override = default; |
| 48 | + |
| 49 | + /** |
| 50 | + * @brief Get supported model formats |
| 51 | + * @return Vector containing "onnx" |
| 52 | + */ |
| 53 | + std::vector<std::string> supported_model_formats() const override; |
| 54 | + |
| 55 | +protected: |
| 56 | + /** |
| 57 | + * @brief Load an ONNX model from file |
| 58 | + * @param model_path Path to the .onnx model file |
| 59 | + * @return true if successful, false otherwise |
| 60 | + */ |
| 61 | + bool load_model_impl(const std::filesystem::path & model_path) override; |
| 62 | + |
| 63 | + /** |
| 64 | + * @brief Run inference using zero-copy IO binding |
| 65 | + * @param input Input tensor (must be compatible with model input) |
| 66 | + * @return Output tensor with inference results |
| 67 | + * @throws std::runtime_error if inference fails or no model loaded |
| 68 | + */ |
| 69 | + deep_ros::Tensor run_inference_impl(deep_ros::Tensor & input) override; |
| 70 | + |
| 71 | + /** |
| 72 | + * @brief Unload the currently loaded model |
| 73 | + */ |
| 74 | + void unload_model_impl() override; |
| 75 | + |
| 76 | +private: |
| 77 | + std::filesystem::path model_path_; |
| 78 | + |
| 79 | + std::unique_ptr<Ort::Env> env_; |
| 80 | + std::unique_ptr<Ort::Session> session_; |
| 81 | + Ort::MemoryInfo memory_info_; |
| 82 | + |
| 83 | + /** |
| 84 | + * @brief Convert deep_ros DataType to ONNX tensor element type |
| 85 | + * @param dtype deep_ros data type |
| 86 | + * @return ONNX tensor element data type |
| 87 | + */ |
| 88 | + ONNXTensorElementDataType convert_to_onnx_type(deep_ros::DataType dtype) const; |
| 89 | + |
| 90 | + /** |
| 91 | + * @brief Get model output shape based on input shape |
| 92 | + * @param input_shape Input tensor shape |
| 93 | + * @return Expected output tensor shape |
| 94 | + * @throws std::runtime_error if model not loaded or shape inference fails |
| 95 | + */ |
| 96 | + std::vector<size_t> get_output_shape(const std::vector<size_t> & input_shape) const; |
| 97 | + |
| 98 | + /** |
| 99 | + * @brief Get element size in bytes for a data type |
| 100 | + * @param dtype Data type |
| 101 | + * @return Size in bytes per element |
| 102 | + */ |
| 103 | + size_t get_element_size(deep_ros::DataType dtype) const; |
| 104 | +}; |
| 105 | + |
| 106 | +} // namespace deep_ort_backend |
0 commit comments