|
| 1 | +// Copyright 2024 TeiaCare |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +#include <teiacare/inference_client/client_factory.hpp> |
| 16 | + |
| 17 | +#include <spdlog/spdlog.h> |
| 18 | + |
| 19 | +int main(int argc, char** argv) |
| 20 | +{ |
| 21 | + spdlog::set_level(spdlog::level::level_enum::trace); |
| 22 | + spdlog::set_pattern("[%Y-%m-%d %H:%M:%S.%e] [%^%l%$] %v"); |
| 23 | + spdlog::info("Running client_example_inference"); |
| 24 | + |
| 25 | + auto client = tc::infer::create_client("localhost:8001", std::chrono::seconds(30)); |
| 26 | + if (!client->is_server_live() || !client->is_server_ready()) |
| 27 | + { |
| 28 | + spdlog::error("Server is not available"); |
| 29 | + return 1; |
| 30 | + } |
| 31 | + |
| 32 | + std::string model_name = "yolov5x_face_person_trt"; |
| 33 | + std::string model_version = "2"; |
| 34 | + const auto model_metadata = client->model_metadata(model_name, model_version); |
| 35 | + spdlog::info("Model available: '{}' (available versions: '{}' platform '{}')", model_metadata.model_name, fmt::join(model_metadata.model_versions, ", "), model_metadata.platform); |
| 36 | + |
| 37 | + for (auto input : model_metadata.inputs) |
| 38 | + { |
| 39 | + spdlog::info("Input tensor: {} (datatype: {}, shape: {})", input.name, input.datatype, fmt::join(input.shape, ", ")); |
| 40 | + } |
| 41 | + |
| 42 | + for (auto output : model_metadata.outputs) |
| 43 | + { |
| 44 | + spdlog::info("Output tensor: {} (datatype: {}, shape: {})", output.name, output.datatype, fmt::join(output.shape, ", ")); |
| 45 | + } |
| 46 | + |
| 47 | + std::vector<float> data(3 * 640 * 640, 1.0f); // fill with dummy data |
| 48 | + std::vector<int64_t> shape{1, 3, 640, 640}; |
| 49 | + |
| 50 | + tc::infer::infer_request request; |
| 51 | + request.model_name = model_name; |
| 52 | + request.model_version = model_version; |
| 53 | + request.id = "REQUEST_0"; |
| 54 | + request.add_input_tensor(data.data(), data.size(), shape, "images"); |
| 55 | + |
| 56 | + tc::infer::infer_response response; |
| 57 | + try |
| 58 | + { |
| 59 | + response = client->infer(request, std::chrono::seconds(30)); |
| 60 | + } |
| 61 | + catch (const std::runtime_error& ex) |
| 62 | + { |
| 63 | + spdlog::error("Unable to perform inference: {}", ex.what()); |
| 64 | + return EXIT_FAILURE; |
| 65 | + } |
| 66 | + |
| 67 | + spdlog::info("Model name: {}", response.model_name); |
| 68 | + spdlog::info("Model version: {}", response.model_version); |
| 69 | + spdlog::info("Output layers"); |
| 70 | + for (const auto& output : response.output_tensors) |
| 71 | + { |
| 72 | + spdlog::info("- Name: {}", output.name()); |
| 73 | + spdlog::info("- DataType: {}", output.datatype().str()); |
| 74 | + spdlog::info("- Shape: [{}]", fmt::join(output.shape(), ", ")); |
| 75 | + spdlog::info("- Output layer data"); |
| 76 | + |
| 77 | + const int8_t* output_data = output.as<int8_t>(); |
| 78 | + for (auto i = 0; i < output.data_size(); ++i) |
| 79 | + { |
| 80 | + spdlog::debug(" {}: {}", i, output_data[i]); |
| 81 | + } |
| 82 | + } |
| 83 | + |
| 84 | + return EXIT_SUCCESS; |
| 85 | +} |
0 commit comments