|
| 1 | +// Copyright (c) 2025-present WATonomous. All rights reserved. |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +#include <cstring> |
| 16 | +#include <memory> |
| 17 | +#include <string> |
| 18 | +#include <vector> |
| 19 | + |
| 20 | +#include <pluginlib/class_list_macros.hpp> |
| 21 | + |
| 22 | +#include "deep_core/plugin_interfaces/backend_inference_executor.hpp" |
| 23 | +#include "deep_core/plugin_interfaces/backend_memory_allocator.hpp" |
| 24 | +#include "deep_core/plugin_interfaces/deep_backend_plugin.hpp" |
| 25 | + |
| 26 | +namespace deep_ros |
| 27 | +{ |
| 28 | +namespace test |
| 29 | +{ |
| 30 | + |
| 31 | +class TestMemoryAllocator : public BackendMemoryAllocator |
| 32 | +{ |
| 33 | +public: |
| 34 | + void * allocate(size_t bytes) override |
| 35 | + { |
| 36 | + if (bytes == 0) return nullptr; |
| 37 | + return std::malloc(bytes); |
| 38 | + } |
| 39 | + |
| 40 | + void deallocate(void * ptr) override |
| 41 | + { |
| 42 | + if (ptr) { |
| 43 | + std::free(ptr); |
| 44 | + } |
| 45 | + } |
| 46 | + |
| 47 | + void copy_from_host(void * dst, const void * src, size_t bytes) override |
| 48 | + { |
| 49 | + std::memcpy(dst, src, bytes); |
| 50 | + } |
| 51 | + |
| 52 | + void copy_to_host(void * dst, const void * src, size_t bytes) override |
| 53 | + { |
| 54 | + std::memcpy(dst, src, bytes); |
| 55 | + } |
| 56 | + |
| 57 | + void copy_device_to_device(void * dst, const void * src, size_t bytes) override |
| 58 | + { |
| 59 | + std::memcpy(dst, src, bytes); |
| 60 | + } |
| 61 | + |
| 62 | + bool is_device_memory() const override |
| 63 | + { |
| 64 | + return false; |
| 65 | + } |
| 66 | + |
| 67 | + std::string device_name() const override |
| 68 | + { |
| 69 | + return "test_cpu"; |
| 70 | + } |
| 71 | +}; |
| 72 | + |
| 73 | +class TestInferenceExecutor : public BackendInferenceExecutor |
| 74 | +{ |
| 75 | +public: |
| 76 | + bool load_model(const std::filesystem::path & model_path) override |
| 77 | + { |
| 78 | + model_loaded_ = true; |
| 79 | + current_model_path_ = model_path; |
| 80 | + return true; |
| 81 | + } |
| 82 | + |
| 83 | + Tensor run_inference(Tensor input) override |
| 84 | + { |
| 85 | + if (!model_loaded_) { |
| 86 | + throw std::runtime_error("No model loaded"); |
| 87 | + } |
| 88 | + |
| 89 | + // For testing, just return a copy of the input |
| 90 | + return input; |
| 91 | + } |
| 92 | + |
| 93 | + void unload_model() override |
| 94 | + { |
| 95 | + model_loaded_ = false; |
| 96 | + current_model_path_.clear(); |
| 97 | + } |
| 98 | + |
| 99 | + std::vector<std::string> supported_model_formats() const override |
| 100 | + { |
| 101 | + return {"test", "dummy"}; |
| 102 | + } |
| 103 | + |
| 104 | +private: |
| 105 | + bool model_loaded_ = false; |
| 106 | + std::filesystem::path current_model_path_; |
| 107 | +}; |
| 108 | + |
| 109 | +class TestBackendPlugin : public DeepBackendPlugin |
| 110 | +{ |
| 111 | +public: |
| 112 | + TestBackendPlugin() |
| 113 | + { |
| 114 | + allocator_ = std::make_shared<TestMemoryAllocator>(); |
| 115 | + executor_ = std::make_shared<TestInferenceExecutor>(); |
| 116 | + } |
| 117 | + |
| 118 | + std::string backend_name() const override |
| 119 | + { |
| 120 | + return "test_backend"; |
| 121 | + } |
| 122 | + |
| 123 | + std::shared_ptr<BackendMemoryAllocator> get_allocator() const override |
| 124 | + { |
| 125 | + return allocator_; |
| 126 | + } |
| 127 | + |
| 128 | + std::shared_ptr<BackendInferenceExecutor> get_inference_executor() const override |
| 129 | + { |
| 130 | + return executor_; |
| 131 | + } |
| 132 | + |
| 133 | +private: |
| 134 | + std::shared_ptr<TestMemoryAllocator> allocator_; |
| 135 | + std::shared_ptr<TestInferenceExecutor> executor_; |
| 136 | +}; |
| 137 | + |
| 138 | +} // namespace test |
| 139 | +} // namespace deep_ros |
| 140 | + |
| 141 | +PLUGINLIB_EXPORT_CLASS(deep_ros::test::TestBackendPlugin, deep_ros::DeepBackendPlugin) |
0 commit comments