Skip to content

Commit c25ab61

Browse files
Added example inference. Added examples in doxygen.
1 parent d293e08 commit c25ab61

File tree

3 files changed

+88
-3
lines changed

3 files changed

+88
-3
lines changed

Doxyfile

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -943,7 +943,7 @@ WARN_LOGFILE =
943943
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
944944
# Note: If this tag is empty the current directory is searched.
945945

946-
INPUT = inference_client/examples/src inference_client/include/teiacare/inference_client README.md
946+
INPUT = inference_client/examples inference_client/include/teiacare/inference_client README.md
947947

948948
# This tag can be used to specify the character encoding of the source files
949949
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@@ -1016,8 +1016,7 @@ EXCLUDE_SYMLINKS = NO
10161016
# Note that the wildcards are matched against the file with absolute path, so to
10171017
# exclude all test directories for example use the pattern */test/*
10181018

1019-
EXCLUDE_PATTERNS = function_traits.hpp \
1020-
clock.hpp
1019+
EXCLUDE_PATTERNS =
10211020

10221021
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
10231022
# (namespaces, classes, functions, etc.) that should be excluded from the

inference_client/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,4 +115,5 @@ include(examples)
115115
add_example(teiacare::inference_client client_example_model_info)
116116
add_example(teiacare::inference_client client_example_server_info)
117117
add_example(teiacare::inference_client client_example_simple_inference)
118+
add_example(teiacare::inference_client client_example_inference)
118119
endif()
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
// Copyright 2024 TeiaCare
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include <teiacare/inference_client/client_factory.hpp>
16+
17+
#include <spdlog/spdlog.h>
18+
19+
int main(int argc, char** argv)
20+
{
21+
spdlog::set_level(spdlog::level::level_enum::trace);
22+
spdlog::set_pattern("[%Y-%m-%d %H:%M:%S.%e] [%^%l%$] %v");
23+
spdlog::info("Running client_example_inference");
24+
25+
auto client = tc::infer::create_client("localhost:8001", std::chrono::seconds(30));
26+
if (!client->is_server_live() || !client->is_server_ready())
27+
{
28+
spdlog::error("Server is not available");
29+
return 1;
30+
}
31+
32+
std::string model_name = "yolov5x_face_person_trt";
33+
std::string model_version = "2";
34+
const auto model_metadata = client->model_metadata(model_name, model_version);
35+
spdlog::info("Model available: '{}' (available versions: '{}' platform '{}')", model_metadata.model_name, fmt::join(model_metadata.model_versions, ", "), model_metadata.platform);
36+
37+
for (auto input : model_metadata.inputs)
38+
{
39+
spdlog::info("Input tensor: {} (datatype: {}, shape: {})", input.name, input.datatype, fmt::join(input.shape, ", "));
40+
}
41+
42+
for (auto output : model_metadata.outputs)
43+
{
44+
spdlog::info("Output tensor: {} (datatype: {}, shape: {})", output.name, output.datatype, fmt::join(output.shape, ", "));
45+
}
46+
47+
std::vector<float> data(3 * 640 * 640, 1.0f); // fill with dummy data
48+
std::vector<int64_t> shape{1, 3, 640, 640};
49+
50+
tc::infer::infer_request request;
51+
request.model_name = model_name;
52+
request.model_version = model_version;
53+
request.id = "REQUEST_0";
54+
request.add_input_tensor(data.data(), data.size(), shape, "images");
55+
56+
tc::infer::infer_response response;
57+
try
58+
{
59+
response = client->infer(request, std::chrono::seconds(30));
60+
}
61+
catch (const std::runtime_error& ex)
62+
{
63+
spdlog::error("Unable to perform inference: {}", ex.what());
64+
return EXIT_FAILURE;
65+
}
66+
67+
spdlog::info("Model name: {}", response.model_name);
68+
spdlog::info("Model version: {}", response.model_version);
69+
spdlog::info("Output layers");
70+
for (const auto& output : response.output_tensors)
71+
{
72+
spdlog::info("- Name: {}", output.name());
73+
spdlog::info("- DataType: {}", output.datatype().str());
74+
spdlog::info("- Shape: [{}]", fmt::join(output.shape(), ", "));
75+
spdlog::info("- Output layer data");
76+
77+
const int8_t* output_data = output.as<int8_t>();
78+
for (auto i = 0; i < output.data_size(); ++i)
79+
{
80+
spdlog::debug(" {}: {}", i, output_data[i]);
81+
}
82+
}
83+
84+
return EXIT_SUCCESS;
85+
}

0 commit comments

Comments
 (0)