diff --git a/CMakeLists.txt b/CMakeLists.txt
index a8924d5..cf95e16 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -9,13 +9,13 @@ add_compile_options(-Wextra -Werror=extra)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
-
-find_package(OpenCV REQUIRED)
find_package(catkin REQUIRED
COMPONENTS
onnxruntime_ros
)
+find_package(console_bridge REQUIRED)
+find_package(OpenCV REQUIRED)
set(${PROJECT_NAME}_CUDA_ENABLED ${onnxruntime_ros_CUDA_ENABLED})
if(${PROJECT_NAME}_CUDA_ENABLED)
@@ -35,7 +35,7 @@ catkin_package(
INCLUDE_DIRS include ${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_INCLUDE_DESTINATION}
LIBRARIES ${PROJECT_NAME}
CATKIN_DEPENDS onnxruntime_ros
- DEPENDS ${${PROJECT_NAME}_CUDA_CATKIN_DEPENDS} OpenCV
+ DEPENDS ${${PROJECT_NAME}_CUDA_CATKIN_DEPENDS} OpenCV console_bridge
)
# ------------------------------------------------------------------------------------------------
@@ -47,6 +47,7 @@ include_directories(
${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_INCLUDE_DESTINATION}
SYSTEM
${${PROJECT_NAME}_CUDA_INCLUDE_DIRS}
+ ${console_bridge_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${catkin_INCLUDE_DIRS}
)
@@ -55,7 +56,7 @@ add_library(${PROJECT_NAME}
src/detection.cpp
src/yolo_inference.cpp
)
-target_link_libraries(${PROJECT_NAME} ${${PROJECT_NAME}_CUDA_TARGET_LINK_LIBRARIES} ${OpenCV_LIBRARIES} ${catkin_LIBRARIES})
+target_link_libraries(${PROJECT_NAME} ${${PROJECT_NAME}_CUDA_TARGET_LINK_LIBRARIES} ${console_bridge_LIBRARIES} ${OpenCV_LIBRARIES} ${catkin_LIBRARIES})
# add_dependencies(${PROJECT_NAME} generate_config_hpp)
add_executable(test_${PROJECT_NAME}
diff --git a/package.xml b/package.xml
index 3fa4063..c071479 100644
--- a/package.xml
+++ b/package.xml
@@ -13,6 +13,7 @@
catkin
+ libconsole-bridge-dev
onnxruntime_ros
libopencv-dev
diff --git a/src/detection.cpp b/src/detection.cpp
index 618b78f..1968336 100644
--- a/src/detection.cpp
+++ b/src/detection.cpp
@@ -1,6 +1,8 @@
#include "yolo_onnx_ros/detection.hpp"
#include
+#include
+
#include
#include
#include
@@ -94,7 +96,7 @@ int ReadYaml(const std::filesystem::path& filename, std::unique_ptr& p)
std::ifstream file(filename);
if (!file.is_open())
{
- std::cerr << "[ReadYaml] Failed to open: " << filename << std::endl;
+ CONSOLE_BRIDGE_logError("[ReadYaml] Failed to open: %s", filename.c_str());
return 1;
}
@@ -128,7 +130,7 @@ int ReadYaml(const std::filesystem::path& filename, std::unique_ptr& p)
if (!in_names_section || start >= lines.size())
{
- std::cerr << "[ReadYaml] Could not find 'names:' section in " << filename << std::endl;
+ CONSOLE_BRIDGE_logError("[ReadYaml] Could not find 'names:' section in %s", filename.c_str());
return 1;
}
@@ -166,12 +168,12 @@ int ReadYaml(const std::filesystem::path& filename, std::unique_ptr& p)
if (names.empty())
{
- std::cerr << "[ReadYaml] No class names found in " << filename << std::endl;
+ CONSOLE_BRIDGE_logError("[ReadYaml] No class names found in %s", filename.c_str());
return 1;
}
p->classes = names;
- std::cout << "[ReadYaml] Loaded " << names.size() << " classes from " << filename << std::endl;
+ CONSOLE_BRIDGE_logInform("[ReadYaml] Loaded %u classes from %s", names.size(), filename.c_str());
return 0;
}
diff --git a/src/main.cpp b/src/main.cpp
index a637b05..5d9fc0b 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1,9 +1,7 @@
-#include
-#include
#include "yolo_onnx_ros/detection.hpp"
+
#include
-#include
-#include
+#include
int main(int argc, char *argv[])
{
@@ -21,7 +19,7 @@ int main(int argc, char *argv[])
std::tie(yoloDetector, params) = Initialize(model_name);
std::filesystem::path imgs_path = argv[2];
- for (auto& i : std::filesystem::directory_iterator(imgs_path))
+ for (const auto& i : std::filesystem::directory_iterator(imgs_path))
{
if (i.path().extension() == ".jpg" || i.path().extension() == ".png" || i.path().extension() == ".jpeg")
{
diff --git a/src/yolo_inference.cpp b/src/yolo_inference.cpp
index 5444d60..b20c31a 100644
--- a/src/yolo_inference.cpp
+++ b/src/yolo_inference.cpp
@@ -1,5 +1,9 @@
#include "yolo_onnx_ros/yolo_inference.hpp"
+
+#include
+
#include
+#include
#define benchmark
#define min(a, b) (((a) < (b)) ? (a) : (b))
@@ -118,8 +122,7 @@ const char* YOLO_V8::CreateSession(DL_INIT_PARAM& iParams) {
bool result = std::regex_search(iParams.modelPath, pattern);
if (result)
{
- Ret = "[YOLO_V8]:Your model path is error. Change your model path without chinese characters.";
- std::cout << Ret << std::endl;
+ CONSOLE_BRIDGE_logInform("[YOLO_V8]:Your model path is error. Change your model path without chinese characters.");
return Ret;
}
try
@@ -166,14 +169,9 @@ const char* YOLO_V8::CreateSession(DL_INIT_PARAM& iParams) {
}
catch (const std::exception& e)
{
- const char* str1 = "[YOLO_V8]:";
- const char* str2 = e.what();
- std::string result = std::string(str1) + std::string(str2);
- char* merged = new char[result.length() + 1];
- std::strcpy(merged, result.c_str());
- std::cout << merged << std::endl;
- delete[] merged;
- return "[YOLO_V8]:Create session failed.";
+ std::string error_msg = "[YOLO_V8]: Failed to create session: " + std::string(e.what());
+ CONSOLE_BRIDGE_logError(error_msg.c_str());
+ throw std::runtime_error(error_msg);
}
}
@@ -301,14 +299,18 @@ char* YOLO_V8::TensorProcess(clock_t& starttime_1, N& blob, std::vector
double pre_process_time = (double)(starttime_2 - starttime_1) / CLOCKS_PER_SEC * 1000;
double process_time = (double)(starttime_3 - starttime_2) / CLOCKS_PER_SEC * 1000;
double post_process_time = (double)(starttime_4 - starttime_3) / CLOCKS_PER_SEC * 1000;
+ std::stringstream ss;
if (cudaEnable_)
{
- std::cout << "[YOLO_V8(CUDA)]: " << pre_process_time << "ms pre-process, " << process_time << "ms inference, " << post_process_time << "ms post-process." << std::endl;
+
+ ss << "[YOLO_V8(CUDA)]: ";
}
else
{
- std::cout << "[YOLO_V8(CPU)]: " << pre_process_time << "ms pre-process, " << process_time << "ms inference, " << post_process_time << "ms post-process." << std::endl;
+ ss << "[YOLO_V8(CPU)]: ";
}
+ ss << pre_process_time << "ms pre-process, " << process_time << "ms inference, " << post_process_time << "ms post-process." << std::endl;
+ CONSOLE_BRIDGE_logInform(ss.str().c_str());
#endif // benchmark
break;
@@ -337,7 +339,7 @@ char* YOLO_V8::TensorProcess(clock_t& starttime_1, N& blob, std::vector
break;
}
default:
- std::cout << "[YOLO_V8]: " << "Not support model type." << std::endl;
+ CONSOLE_BRIDGE_logError("[YOLO_V8]: Not support model type.");
}
return RET_OK;
@@ -365,7 +367,7 @@ char* YOLO_V8::WarmUpSession() {
double post_process_time = (double)(starttime_4 - starttime_1) / CLOCKS_PER_SEC * 1000;
if (cudaEnable_)
{
- std::cout << "[YOLO_V8(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl;
+ CONSOLE_BRIDGE_logInform("[YOLO_V8(CUDA)]: CUDA warm-up cost %f ms.", post_process_time);
}
}
else
@@ -381,7 +383,7 @@ char* YOLO_V8::WarmUpSession() {
double post_process_time = (double)(starttime_4 - starttime_1) / CLOCKS_PER_SEC * 1000;
if (cudaEnable_)
{
- std::cout << "[YOLO_V8(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl;
+ CONSOLE_BRIDGE_logInform("[YOLO_V8(CUDA)]: CUDA warm-up cost %f ms.", post_process_time);
}
#endif
}
diff --git a/test/yolo_test.cpp b/test/yolo_test.cpp
index b9f8ea1..5e1008a 100644
--- a/test/yolo_test.cpp
+++ b/test/yolo_test.cpp
@@ -76,8 +76,8 @@ TEST_F(YoloInferenceTest, CreateSessionWithValidModel)
TEST_F(YoloInferenceTest, CreateSessionWithInvalidModel)
{
params.modelPath = "nonexistent_model.onnx";
- const char* result = yolo->CreateSession(params);
- EXPECT_NE(result, nullptr) << "CreateSession should fail with invalid model path";
+ EXPECT_THROW(yolo->CreateSession(params), std::runtime_error)
+ << "CreateSession should throw an exception with invalid model path";
}
TEST_F(YoloInferenceTest, FullInferencePipeline)