diff --git a/VisionPilot/Production_Releases/0.5/CMakeLists.txt b/VisionPilot/Production_Releases/0.5/CMakeLists.txt index 911b5dc78..8ae21c0da 100644 --- a/VisionPilot/Production_Releases/0.5/CMakeLists.txt +++ b/VisionPilot/Production_Releases/0.5/CMakeLists.txt @@ -7,6 +7,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) # Find dependencies find_package(OpenCV REQUIRED) find_package(Threads REQUIRED) +find_package(Eigen3 REQUIRED) # ONNX Runtime - user must set ONNXRUNTIME_ROOT if(NOT DEFINED ENV{ONNXRUNTIME_ROOT}) @@ -35,6 +36,7 @@ include_directories( include ${OpenCV_INCLUDE_DIRS} ${ONNXRUNTIME_INCLUDE_DIR} + ${EIGEN3_INCLUDE_DIR} ) # AutoSteer inference backend @@ -80,6 +82,25 @@ target_link_libraries(camera_utils ${OpenCV_LIBS} ) +# Path planning module (polynomial fitting + Bayes filter) +add_library(path_planning + src/path_planning/estimator.cpp + src/path_planning/poly_fit.cpp + src/path_planning/path_finder.cpp +) +target_link_libraries(path_planning + ${OpenCV_LIBS} + Eigen3::Eigen +) + +# Steering control module +add_library(steering_control + src/steering_control/steering_controller.cpp +) +target_link_libraries(steering_control + # No external dependencies, pure C++ math +) + # Rerun SDK (optional, for visualization/logging) option(ENABLE_RERUN "Enable Rerun logging and visualization" OFF) @@ -152,6 +173,8 @@ set(AUTOSTEER_LIBS autosteer_lane_filtering autosteer_lane_tracking camera_utils + path_planning + steering_control ${OpenCV_LIBS} pthread ) diff --git a/VisionPilot/Production_Releases/0.5/include/lane_tracking/lane_tracking.hpp b/VisionPilot/Production_Releases/0.5/include/lane_tracking/lane_tracking.hpp index 964226881..d1b52082e 100644 --- a/VisionPilot/Production_Releases/0.5/include/lane_tracking/lane_tracking.hpp +++ b/VisionPilot/Production_Releases/0.5/include/lane_tracking/lane_tracking.hpp @@ -14,6 +14,11 @@ struct BEVVisuals { std::vector bev_left_coeffs; // BEV egoleft coeffs std::vector bev_right_coeffs; // BEV egoright coeffs std::vector bev_center_coeffs;// BEV drivable corridor coeffs + + // BEV lane points in pixel space (for PathFinder) + std::vector bev_left_pts; // Left lane points in BEV pixels + std::vector bev_right_pts; // Right lane points in BEV pixels + double last_valid_width_pixels = 0.0; // Width bar bool valid = false; }; diff --git a/VisionPilot/Production_Releases/0.5/include/path_planning/estimator.hpp b/VisionPilot/Production_Releases/0.5/include/path_planning/estimator.hpp new file mode 100644 index 000000000..3a3144d20 --- /dev/null +++ b/VisionPilot/Production_Releases/0.5/include/path_planning/estimator.hpp @@ -0,0 +1,35 @@ +/** + * @file estimator.hpp + * @brief Bayes filter for temporal tracking of lane parameters + * + * Ported from PATHFINDER ROS2 package (no ROS2 dependencies) + */ + +#pragma once + +#include +#include +#include + +constexpr size_t STATE_DIM = 14; + +struct Gaussian +{ + double mean; + double variance; +}; + +class Estimator +{ +private: + std::array state; + std::vector> fusion_rules; + +public: + void initialize(const std::array &init_state); + void predict(const std::array &process); + void update(const std::array &measurement); + void configureFusionGroups(const std::vector> &rules); + const std::array &getState() const; +}; + diff --git a/VisionPilot/Production_Releases/0.5/include/path_planning/path_finder.hpp b/VisionPilot/Production_Releases/0.5/include/path_planning/path_finder.hpp new file mode 100644 index 000000000..283802ba0 --- /dev/null +++ b/VisionPilot/Production_Releases/0.5/include/path_planning/path_finder.hpp @@ -0,0 +1,114 @@ +/** + * @file path_finder.hpp + * @brief Standalone PathFinder module (no ROS2) + * + * Integrates polynomial fitting and Bayes filter for robust lane tracking + */ + +#pragma once + +#include "path_planning/poly_fit.hpp" +#include "path_planning/estimator.hpp" +#include +#include +#include +#include + +namespace autoware_pov::vision::path_planning { + +/** + * @brief Output of PathFinder module + */ +struct PathFinderOutput +{ + // Fused metrics from Bayes filter (temporally smoothed) + double cte; // Cross-track error (meters) + double yaw_error; // Yaw error (radians) + double curvature; // Curvature (1/meters) + double lane_width; // Corridor width (meters) + + // Confidence (variance from Bayes filter) + double cte_variance; + double yaw_variance; + double curv_variance; + double lane_width_variance; + + // Raw polynomial coefficients (for debugging/visualization) + std::array left_coeff; // x = c0*y² + c1*y + c2 + std::array right_coeff; // x = c0*y² + c1*y + c2 + std::array center_coeff; // Average of left and right + + // Individual curve metrics (before fusion) + double left_cte; + double left_yaw_error; + double left_curvature; + double right_cte; + double right_yaw_error; + double right_curvature; + + // Validity flags + bool left_valid; + bool right_valid; + bool fused_valid; +}; + +/** + * @brief PathFinder for lane tracking and trajectory estimation + * + * Features: + * - Polynomial fitting to BEV lane points (in meters) + * - Temporal smoothing via Bayes filter + * - Robust fusion of left/right lanes + * + * Note: Expects BEV points already in metric coordinates (meters) + */ +class PathFinder +{ +public: + /** + * @brief Initialize PathFinder + * @param default_lane_width Default lane width in meters (default: 4.0) + */ + explicit PathFinder(double default_lane_width = 4.0); + + /** + * @brief Update with new lane detections + * + * @param left_pts_bev Left lane points in BEV meters (x=lateral, y=longitudinal) + * @param right_pts_bev Right lane points in BEV meters + * @return PathFinder output (fused metrics + individual curves) + */ + PathFinderOutput update( + const std::vector& left_pts_bev, + const std::vector& right_pts_bev); + + /** + * @brief Get current tracked state + * @return Current Bayes filter state (all 14 variables) + */ + const std::array& getState() const; + + /** + * @brief Reset Bayes filter to initial state + */ + void reset(); + +private: + double default_lane_width_; + Estimator bayes_filter_; + + // Tuning parameters (from PATHFINDER) + const double PROC_SD = 0.5; // Process noise + const double STD_M_CTE = 0.1; // CTE measurement std (m) + const double STD_M_YAW = 0.01; // Yaw measurement std (rad) + const double STD_M_CURV = 0.1; // Curvature measurement std (1/m) + const double STD_M_WIDTH = 0.01; // Width measurement std (m) + + /** + * @brief Initialize Bayes filter + */ + void initializeBayesFilter(); +}; + +} // namespace autoware_pov::vision::path_planning + diff --git a/VisionPilot/Production_Releases/0.5/include/path_planning/poly_fit.hpp b/VisionPilot/Production_Releases/0.5/include/path_planning/poly_fit.hpp new file mode 100644 index 000000000..304540015 --- /dev/null +++ b/VisionPilot/Production_Releases/0.5/include/path_planning/poly_fit.hpp @@ -0,0 +1,56 @@ +/** + * @file poly_fit.hpp + * @brief Polynomial fitting and lane curve utilities + * + * Extracted from PATHFINDER package's path_finder.hpp/cpp + */ + +#pragma once + +#include +#include +#include + +namespace autoware_pov::vision::path_planning { + +/** + * @brief Fitted quadratic curve: x = c0*y² + c1*y + c2 + * + * Vehicle-centric BEV coordinates: + * - x: lateral position (positive = right) + * - y: longitudinal position (positive = forward) + */ +struct FittedCurve +{ + std::array coeff; // [c0, c1, c2] for x = c0*y² + c1*y + c2 + double cte; // Cross-track error at y=0 (meters) + double yaw_error; // Yaw error at y=0 (radians) + double curvature; // Curvature at y=0 (meters^-1) + + FittedCurve(); + explicit FittedCurve(const std::array &coeff); +}; + +/** + * @brief Fit quadratic polynomial to BEV points + * + * Fits x = c0*y² + c1*y + c2 using least squares + * + * @param points BEV points in meters (x=lateral, y=longitudinal) + * @return Coefficients [c0, c1, c2], or NaN if insufficient points + */ +std::array fitQuadPoly(const std::vector &points); + +/** + * @brief Calculate center lane from left and right lanes + * + * Averages the polynomial coefficients + * + * @param left_lane Left lane curve + * @param right_lane Right lane curve + * @return Center lane curve + */ +FittedCurve calculateEgoPath(const FittedCurve &left_lane, const FittedCurve &right_lane); + +} // namespace autoware_pov::vision::path_planning + diff --git a/VisionPilot/Production_Releases/0.5/include/steering_control/steering_controller.hpp b/VisionPilot/Production_Releases/0.5/include/steering_control/steering_controller.hpp new file mode 100644 index 000000000..d3fd0aae5 --- /dev/null +++ b/VisionPilot/Production_Releases/0.5/include/steering_control/steering_controller.hpp @@ -0,0 +1,60 @@ +/** + * @file steering_controller.hpp + * @brief Steering controller for path following + * + * Combines Stanley controller (PI on CTE) with curvature feedforward + * Input: CTE, yaw_error, forward_velocity, curvature (from PathFinder) + * Output: Steering angle (radians) + */ + +#pragma once + +#include +#include + +namespace autoware_pov::vision::steering_control { + +/** + * @brief Default steering controller parameters + */ + +namespace SteeringControllerDefaults { + constexpr double K_P = 0.8; // Proportional gain for yaw error + constexpr double K_I = 1.6; // Integral gain for CTE (Stanley controller) + constexpr double K_D = 1.0; // Derivative gain for yaw error + constexpr double K_S = 1.0; //proportionality constant for curvature +}; + +class SteeringController +{ +public: + /** + * @brief Constructor + * @param K_p Proportional gain for yaw error + * @param K_i Integral gain for CTE (Stanley controller) + * @param K_d Derivative gain for yaw error + * @param K_S Proportionality constant for curvature feedforward + */ + SteeringController( + double K_p, + double K_i, + double K_d, + double K_S); + + + /** + * @brief Compute steering angle + * @param cte Cross-track error (meters) + * @param yaw_error Yaw error (radians) + * @param feed_forward_steering_estimate (angle in degrees) + * @return Steering angle (radians) + */ + double computeSteering(double cte, double yaw_error, double feed_forward_steering_estimate); + +private: + double K_p, K_i, K_d, K_S; + double prev_yaw_error; +}; + +} // namespace autoware_pov::vision::steering_control + diff --git a/VisionPilot/Production_Releases/0.5/include/visualization/draw_lanes.hpp b/VisionPilot/Production_Releases/0.5/include/visualization/draw_lanes.hpp index 3c9e5d4e8..6abbdfca0 100644 --- a/VisionPilot/Production_Releases/0.5/include/visualization/draw_lanes.hpp +++ b/VisionPilot/Production_Releases/0.5/include/visualization/draw_lanes.hpp @@ -77,6 +77,16 @@ void drawBEVVis( const BEVVisuals& bev_data ); +/** + * @brief Debug function to verify Pixel -> Meter conversion + * Draws the metric polynomials (projected back to pixels) on top of the BEV image. + */ +void drawMetricVerification( + cv::Mat& bev_image, + const std::vector& left_metric_coeffs, + const std::vector& right_metric_coeffs +); + } // namespace autoware_pov::vision::autosteer #endif // AUTOWARE_POV_VISION_AUTOSTEER_DRAW_LANES_HPP_ \ No newline at end of file diff --git a/VisionPilot/Production_Releases/0.5/main.cpp b/VisionPilot/Production_Releases/0.5/main.cpp index 02e81dc3a..29741ff15 100644 --- a/VisionPilot/Production_Releases/0.5/main.cpp +++ b/VisionPilot/Production_Releases/0.5/main.cpp @@ -8,127 +8,182 @@ * - Display Thread: Optionally visualizes and saves results */ - #include "inference/onnxruntime_engine.hpp" - #include "visualization/draw_lanes.hpp" - #include "lane_filtering/lane_filter.hpp" +#include "inference/onnxruntime_engine.hpp" +#include "visualization/draw_lanes.hpp" +#include "lane_filtering/lane_filter.hpp" #include "lane_tracking/lane_tracking.hpp" #include "camera/camera_utils.hpp" + #include "path_planning/path_finder.hpp" + #include "steering_control/steering_controller.hpp" #ifdef ENABLE_RERUN #include "rerun/rerun_logger.hpp" #endif - #include - #include - #include - #include - #include - #include - #include - #include - #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include + #include + #ifndef M_PI + #define M_PI 3.14159265358979323846 + #endif - using namespace autoware_pov::vision::autosteer; - using namespace autoware_pov::vision::camera; - using namespace std::chrono; - - // Thread-safe queue with max size limit - template - class ThreadSafeQueue { - public: - explicit ThreadSafeQueue(size_t max_size = 10) : max_size_(max_size) {} - - void push(const T& item) { - std::unique_lock lock(mutex_); - // Wait if queue is full (backpressure) - cond_not_full_.wait(lock, [this] { - return queue_.size() < max_size_ || !active_; - }); - if (!active_) return; - - queue_.push(item); - cond_not_empty_.notify_one(); - } - - bool try_pop(T& item) { - std::unique_lock lock(mutex_); - if (queue_.empty()) { - return false; - } - item = queue_.front(); - queue_.pop(); - cond_not_full_.notify_one(); // Notify that space is available - return true; - } - - T pop() { - std::unique_lock lock(mutex_); - cond_not_empty_.wait(lock, [this] { return !queue_.empty() || !active_; }); - if (!active_ && queue_.empty()) { - return T(); - } - T item = queue_.front(); - queue_.pop(); - cond_not_full_.notify_one(); // Notify that space is available - return item; - } - - void stop() { - active_ = false; - cond_not_empty_.notify_all(); - cond_not_full_.notify_all(); - } +using namespace autoware_pov::vision::autosteer; +using namespace autoware_pov::vision::camera; +using namespace autoware_pov::vision::path_planning; +using namespace autoware_pov::vision::steering_control; +using namespace std::chrono; + +// Thread-safe queue with max size limit +template +class ThreadSafeQueue { +public: + explicit ThreadSafeQueue(size_t max_size = 10) : max_size_(max_size) {} + + void push(const T& item) { + std::unique_lock lock(mutex_); + // Wait if queue is full (backpressure) + cond_not_full_.wait(lock, [this] { + return queue_.size() < max_size_ || !active_; + }); + if (!active_) return; + + queue_.push(item); + cond_not_empty_.notify_one(); + } + + bool try_pop(T& item) { + std::unique_lock lock(mutex_); + if (queue_.empty()) { + return false; + } + item = queue_.front(); + queue_.pop(); + cond_not_full_.notify_one(); // Notify that space is available + return true; + } + + T pop() { + std::unique_lock lock(mutex_); + cond_not_empty_.wait(lock, [this] { return !queue_.empty() || !active_; }); + if (!active_ && queue_.empty()) { + return T(); + } + T item = queue_.front(); + queue_.pop(); + cond_not_full_.notify_one(); // Notify that space is available + return item; + } + + void stop() { + active_ = false; + cond_not_empty_.notify_all(); + cond_not_full_.notify_all(); + } + + size_t size() { + std::unique_lock lock(mutex_); + return queue_.size(); + } + +private: + std::queue queue_; + std::mutex mutex_; + std::condition_variable cond_not_empty_; + std::condition_variable cond_not_full_; + std::atomic active_{true}; + size_t max_size_; +}; + +// Timestamped frame +struct TimestampedFrame { + cv::Mat frame; + int frame_number; + steady_clock::time_point timestamp; +}; + +// Inference result +struct InferenceResult { + cv::Mat frame; + LaneSegmentation lanes; + DualViewMetrics metrics; + int frame_number; + steady_clock::time_point capture_time; + steady_clock::time_point inference_time; + double steering_angle = 0.0; // Steering angle from controller (radians) + PathFinderOutput path_output; // Added for metric debug +}; + +// Performance metrics +struct PerformanceMetrics { + std::atomic total_capture_us{0}; + std::atomic total_inference_us{0}; + std::atomic total_display_us{0}; + std::atomic total_end_to_end_us{0}; + std::atomic frame_count{0}; + bool measure_latency{true}; +}; - size_t size() { - std::unique_lock lock(mutex_); - return queue_.size(); - } +/** + * @brief Transform BEV pixel coordinates to BEV metric coordinates (meters) + * + * Transformation based on 640x640 BEV image: + * Input (Pixels): + * - Origin (0,0) at Top-Left + * - x right, y down + * - Vehicle at Bottom-Center (320, 640) + * + * Output (Meters): + * - Origin (0,0) at Vehicle Position + * - x right (lateral), y forward (longitudinal) + * - Range: X [-20m, 20m], Y [0m, 40m] + * - Scale: 640 pixels = 40 meters + * + * @param bev_pixels BEV points in pixel coordinates (from LaneTracker) + * @return BEV points in metric coordinates (meters, x=lateral, y=longitudinal) + */ +std::vector transformPixelsToMeters(const std::vector& bev_pixels) { + std::vector bev_meters; + + if (bev_pixels.empty()) { + return bev_meters; + } + + + const double bev_width_px = 640.0; + const double bev_height_px = 640.0; + const double bev_range_m = 40.0; + + const double scale = bev_range_m / bev_height_px; // 40m / 640px = 0.0625 m/px + const double center_x = bev_width_px / 2.0; // 320.0 + const double origin_y = bev_height_px; // 640.0 (bottom) + //check again + for (const auto& pt : bev_pixels) { + bev_meters.push_back(cv::Point2f( + (pt.x - center_x) * scale, // Lateral: (x - 320) * scale + (origin_y - pt.y) * scale // Longitudinal: (640 - y) * scale (Flip Y to match image origin) + )); + } + + return bev_meters; +} - private: - std::queue queue_; - std::mutex mutex_; - std::condition_variable cond_not_empty_; - std::condition_variable cond_not_full_; - std::atomic active_{true}; - size_t max_size_; - }; - - // Timestamped frame - struct TimestampedFrame { - cv::Mat frame; - int frame_number; - steady_clock::time_point timestamp; - }; - - // Inference result - struct InferenceResult { - cv::Mat frame; - LaneSegmentation lanes; - DualViewMetrics metrics; - int frame_number; - steady_clock::time_point capture_time; - steady_clock::time_point inference_time; - }; - - // Performance metrics - struct PerformanceMetrics { - std::atomic total_capture_us{0}; - std::atomic total_inference_us{0}; - std::atomic total_display_us{0}; - std::atomic total_end_to_end_us{0}; - std::atomic frame_count{0}; - bool measure_latency{true}; - }; - - /** - * @brief Unified capture thread - handles both video files and cameras - */ - void captureThread( +/** + * @brief Unified capture thread - handles both video files and cameras + */ +void captureThread( const std::string& source, bool is_camera, - ThreadSafeQueue& queue, - PerformanceMetrics& metrics, - std::atomic& running) - { + ThreadSafeQueue& queue, + PerformanceMetrics& metrics, + std::atomic& running) +{ cv::VideoCapture cap; if (is_camera) { @@ -139,41 +194,41 @@ cap.open(source); } - if (!cap.isOpened()) { + if (!cap.isOpened()) { std::cerr << "Failed to open source: " << source << std::endl; - running.store(false); - return; - } + running.store(false); + return; + } - int frame_width = static_cast(cap.get(cv::CAP_PROP_FRAME_WIDTH)); - int frame_height = static_cast(cap.get(cv::CAP_PROP_FRAME_HEIGHT)); + int frame_width = static_cast(cap.get(cv::CAP_PROP_FRAME_WIDTH)); + int frame_height = static_cast(cap.get(cv::CAP_PROP_FRAME_HEIGHT)); double fps = cap.get(cv::CAP_PROP_FPS); std::cout << "Source opened: " << frame_width << "x" << frame_height << " @ " << fps << " FPS\n" << std::endl; if (!is_camera) { - int total_frames = static_cast(cap.get(cv::CAP_PROP_FRAME_COUNT)); - std::cout << "Total frames: " << total_frames << std::endl; + int total_frames = static_cast(cap.get(cv::CAP_PROP_FRAME_COUNT)); + std::cout << "Total frames: " << total_frames << std::endl; } // For camera: throttle 30fps → 10fps int frame_skip = 0; int skip_interval = is_camera ? 3 : 1; - int frame_number = 0; - while (running.load()) { - auto t_start = steady_clock::now(); - - cv::Mat frame; + int frame_number = 0; + while (running.load()) { + auto t_start = steady_clock::now(); + + cv::Mat frame; if (!cap.read(frame) || frame.empty()) { if (is_camera) { std::cerr << "Camera error" << std::endl; } else { - std::cout << "End of video stream" << std::endl; + std::cout << "End of video stream" << std::endl; } - break; - } + break; + } auto t_end = steady_clock::now(); @@ -181,47 +236,53 @@ if (++frame_skip < skip_interval) continue; frame_skip = 0; - long capture_us = duration_cast(t_end - t_start).count(); - metrics.total_capture_us.fetch_add(capture_us); + long capture_us = duration_cast(t_end - t_start).count(); + metrics.total_capture_us.fetch_add(capture_us); - TimestampedFrame tf; - tf.frame = frame; - tf.frame_number = frame_number++; - tf.timestamp = t_end; - queue.push(tf); - } + TimestampedFrame tf; + tf.frame = frame; + tf.frame_number = frame_number++; + tf.timestamp = t_end; + queue.push(tf); + } - running.store(false); - queue.stop(); + running.store(false); + queue.stop(); cap.release(); - } - - /** - * @brief Inference thread - runs lane detection model - */ - void inferenceThread( - AutoSteerOnnxEngine& engine, - ThreadSafeQueue& input_queue, - ThreadSafeQueue& output_queue, - PerformanceMetrics& metrics, - std::atomic& running, - float threshold +} + +/** + * @brief Inference thread - runs lane detection model + */ +void inferenceThread( + AutoSteerOnnxEngine& engine, + ThreadSafeQueue& input_queue, + ThreadSafeQueue& output_queue, + PerformanceMetrics& metrics, + std::atomic& running, + float threshold, + PathFinder* path_finder = nullptr, + SteeringController* steering_controller = nullptr #ifdef ENABLE_RERUN , autoware_pov::vision::rerun_integration::RerunLogger* rerun_logger = nullptr #endif ) - { - // Init lane filter - LaneFilter lane_filter(0.5f); +{ + // Init lane filter + LaneFilter lane_filter(0.5f); // Init lane tracker LaneTracker lane_tracker; - while (running.load()) { - TimestampedFrame tf = input_queue.pop(); - if (tf.frame.empty()) continue; + //Buffer for last 2 frames' + boost::circular_buffer image_buffer(2); + - auto t_inference_start = steady_clock::now(); + while (running.load()) { + TimestampedFrame tf = input_queue.pop(); + if (tf.frame.empty()) continue; + + auto t_inference_start = steady_clock::now(); // Crop tf.frame 420 pixels top tf.frame = tf.frame(cv::Rect( @@ -230,12 +291,18 @@ tf.frame.cols, tf.frame.rows - 420 )); + + image_buffer.push_back(tf.frame.clone()); // Store a copy of the cropped frame - // Run inference - LaneSegmentation raw_lanes = engine.inference(tf.frame, threshold); + // Run Ego Lanes inference + LaneSegmentation raw_lanes = engine.inference(tf.frame, threshold); - // Post-processing with lane filter - LaneSegmentation filtered_lanes = lane_filter.update(raw_lanes); + // if( image_buffer.full()) { + // // Do inference on AutoSteer with 2 frames (current + previous) + // continue; + // } + // Post-processing with lane filter + LaneSegmentation filtered_lanes = lane_filter.update(raw_lanes); // Further processing with lane tracker cv::Size frame_size(tf.frame.cols, tf.frame.rows); @@ -247,12 +314,68 @@ LaneSegmentation final_lanes = track_result.first; DualViewMetrics final_metrics = track_result.second; - auto t_inference_end = steady_clock::now(); - - // Calculate inference latency - long inference_us = duration_cast( - t_inference_end - t_inference_start).count(); - metrics.total_inference_us.fetch_add(inference_us); + auto t_inference_end = steady_clock::now(); + + // Calculate inference latency + long inference_us = duration_cast( + t_inference_end - t_inference_start).count(); + metrics.total_inference_us.fetch_add(inference_us); + + // ======================================== + // PATHFINDER (Polynomial Fitting + Bayes Filter) + STEERING CONTROL + // ======================================== + double steering_angle = 0.0; // Initialize steering angle + PathFinderOutput path_output; // Declare at higher scope for result storage + path_output.fused_valid = false; // Initialize as invalid + + if (path_finder != nullptr && final_metrics.bev_visuals.valid) { + + // 1. Get BEV points in PIXEL space from LaneTracker + std::vector left_bev_pixels = final_metrics.bev_visuals.bev_left_pts; + std::vector right_bev_pixels = final_metrics.bev_visuals.bev_right_pts; + + // 2. Transform BEV pixels → BEV meters + // TODO: Calibrate transformPixelsToMeters() for your specific camera + std::vector left_bev_meters = transformPixelsToMeters(left_bev_pixels); + std::vector right_bev_meters = transformPixelsToMeters(right_bev_pixels); + + // 3. Update PathFinder (polynomial fit + Bayes filter in metric space) + path_output = path_finder->update(left_bev_meters, right_bev_meters); + + // 4. Compute steering angle (if controller available) + if (steering_controller != nullptr && path_output.fused_valid) { + steering_angle = steering_controller->computeSteering( + path_output.cte, + path_output.yaw_error, + path_output.curvature + ); + } + + // 5. Print output (cross-track error, yaw error, curvature, lane width + variances + steering) + if (path_output.fused_valid) { + std::cout << "[PathFinder Frame " << tf.frame_number << "] " + << "CTE: " << std::fixed << std::setprecision(3) << path_output.cte << " m " + << "(var: " << path_output.cte_variance << "), " + << "Yaw: " << path_output.yaw_error << " rad " + << "(var: " << path_output.yaw_variance << "), " + << "Curvature: " << path_output.curvature << " 1/m " + << "(var: " << path_output.curv_variance << "), " + << "Width: " << path_output.lane_width << " m " + << "(var: " << path_output.lane_width_variance << ")"; + + if (steering_controller != nullptr) { + std::cout << ", Steering: " << std::setprecision(4) << steering_angle << " rad " + << "(" << (steering_angle * 180.0 / M_PI) << " deg)"; + } + std::cout << std::endl; + + // Print polynomial coefficients (for control interface) + std::cout << " Center polynomial: c0=" << path_output.center_coeff[0] + << ", c1=" << path_output.center_coeff[1] + << ", c2=" << path_output.center_coeff[2] << std::endl; + } + } + // ======================================== #ifdef ENABLE_RERUN // Log to Rerun with downsampled frame (reduces data by 75%) @@ -292,28 +415,31 @@ result.frame_number = tf.frame_number; result.capture_time = tf.timestamp; result.inference_time = t_inference_end; + result.steering_angle = steering_angle; // Store computed steering angle + result.path_output = path_output; // Store for viz output_queue.push(result); - } + } - output_queue.stop(); - } - - /** - * @brief Display thread - handles visualization and video saving - */ - void displayThread( - ThreadSafeQueue& queue, - PerformanceMetrics& metrics, - std::atomic& running, - bool enable_viz, - bool save_video, - const std::string& output_video_path = "./assets/output_video.mp4" - ) - { - // Visualization setup + output_queue.stop(); +} + +/** + * @brief Display thread - handles visualization and video saving + */ +void displayThread( + ThreadSafeQueue& queue, + PerformanceMetrics& metrics, + std::atomic& running, + bool enable_viz, + bool save_video, + const std::string& output_video_path, + const std::string& csv_log_path +) +{ + // Visualization setup int window_width = 1600; int window_height = 1080; - if (enable_viz) { + if (enable_viz) { cv::namedWindow( "AutoSteer Inference", cv::WINDOW_NORMAL @@ -323,55 +449,56 @@ window_width, window_height ); - } + } - // Video writer setup - cv::VideoWriter video_writer; - bool video_writer_initialized = false; + // Video writer setup + cv::VideoWriter video_writer; + bool video_writer_initialized = false; - if (save_video && enable_viz) { - std::cout << "Video saving enabled. Output: " << output_video_path << std::endl; - } + if (save_video && enable_viz) { + std::cout << "Video saving enabled. Output: " << output_video_path << std::endl; + } // CSV logger for curve params metrics std::ofstream csv_file; - csv_file.open("./assets/curve_params_metrics.csv"); + csv_file.open(csv_log_path); if (csv_file.is_open()) { // Write header csv_file << "frame_id,timestamp_ms," << "orig_lane_offset,orig_yaw_offset,orig_curvature," - << "bev_lane_offset,bev_yaw_offset,bev_curvature\n"; + << "pathfinder_cte,pathfinder_yaw_error,pathfinder_curvature," + << "steering_angle_rad,steering_angle_deg\n"; - std::cout << "CSV logging enabled: curve_params_metrics.csv" << std::endl; + std::cout << "CSV logging enabled: " << csv_log_path << std::endl; } else { - std::cerr << "Error: could not open curve_params_metrics.csv for writing" << std::endl; + std::cerr << "Error: could not open " << csv_log_path << " for writing" << std::endl; } - while (running.load()) { - InferenceResult result; - if (!queue.try_pop(result)) { - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - continue; - } + while (running.load()) { + InferenceResult result; + if (!queue.try_pop(result)) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + continue; + } - auto t_display_start = steady_clock::now(); + auto t_display_start = steady_clock::now(); - int count = metrics.frame_count.fetch_add(1) + 1; + int count = metrics.frame_count.fetch_add(1) + 1; - // Console output: frame info - std::cout << "[Frame " << result.frame_number << "] Processed" << std::endl; + // Console output: frame info + std::cout << "[Frame " << result.frame_number << "] Processed" << std::endl; - // Visualization - if (enable_viz) { - // drawLanesInPlace(result.frame, result.lanes, 2); - // drawFilteredLanesInPlace(result.frame, result.lanes); + // Visualization + if (enable_viz) { + // drawLanesInPlace(result.frame, result.lanes, 2); + // drawFilteredLanesInPlace(result.frame, result.lanes); // 1. Init 3 views: // - Raw masks (debugging) // - Polyfit lanes (final prod) // - BEV vis - cv::Mat view_debug = result.frame.clone(); - cv::Mat view_final = result.frame.clone(); + cv::Mat view_debug = result.frame.clone(); + cv::Mat view_final = result.frame.clone(); cv::Mat view_bev( 640, 640, @@ -380,20 +507,31 @@ ); // 2. Draw 3 views - drawRawMasksInPlace( - view_debug, - result.lanes - ); - drawPolyFitLanesInPlace( - view_final, - result.lanes - ); + drawRawMasksInPlace( + view_debug, + result.lanes + ); + drawPolyFitLanesInPlace( + view_final, + result.lanes + ); drawBEVVis( view_bev, result.frame, result.metrics.bev_visuals ); + // Draw Metric Debug (projected back to pixels) - only if path is valid + if (result.path_output.fused_valid) { + std::vector left_coeffs(result.path_output.left_coeff.begin(), result.path_output.left_coeff.end()); + std::vector right_coeffs(result.path_output.right_coeff.begin(), result.path_output.right_coeff.end()); + autoware_pov::vision::autosteer::drawMetricVerification( + view_bev, + left_coeffs, + right_coeffs + ); + } + // 3. View layout handling // Layout: // | [Debug] | [ BEV (640x640) ] @@ -401,9 +539,9 @@ // Left col: debug (top) + final (bottom) cv::Mat left_col; - cv::vconcat( - view_debug, - view_final, + cv::vconcat( + view_debug, + view_final, left_col ); @@ -435,49 +573,49 @@ cv::hconcat( left_col, right_col, - stacked_view - ); + stacked_view + ); - // Initialize video writer on first frame - if (save_video && !video_writer_initialized) { - // Use H.264 for better performance and smaller file size - // XVID is slower and creates larger files - int fourcc = cv::VideoWriter::fourcc('a', 'v', 'c', '1'); // H.264 - video_writer.open( - output_video_path, - fourcc, + // Initialize video writer on first frame + if (save_video && !video_writer_initialized) { + // Use H.264 for better performance and smaller file size + // XVID is slower and creates larger files + int fourcc = cv::VideoWriter::fourcc('a', 'v', 'c', '1'); // H.264 + video_writer.open( + output_video_path, + fourcc, 10.0, - stacked_view.size(), - true - ); + stacked_view.size(), + true + ); - if (video_writer.isOpened()) { - std::cout << "Video writer initialized (H.264): " << stacked_view.cols + if (video_writer.isOpened()) { + std::cout << "Video writer initialized (H.264): " << stacked_view.cols << "x" << stacked_view.rows << " @ 10 fps" << std::endl; - video_writer_initialized = true; - } else { - std::cerr << "Warning: Failed to initialize video writer" << std::endl; - } - } - - // Write to video - if (save_video && video_writer_initialized && video_writer.isOpened()) { - video_writer.write(stacked_view); - } - - // Display - cv::imshow("AutoSteer Inference", stacked_view); - - if (cv::waitKey(1) == 'q') { - running.store(false); - break; - } - } + video_writer_initialized = true; + } else { + std::cerr << "Warning: Failed to initialize video writer" << std::endl; + } + } + + // Write to video + if (save_video && video_writer_initialized && video_writer.isOpened()) { + video_writer.write(stacked_view); + } + + // Display + cv::imshow("AutoSteer Inference", stacked_view); + + if (cv::waitKey(1) == 'q') { + running.store(false); + break; + } + } - // CSV logging for curve params + // CSV logging for curve params (use PathFinder filtered outputs, not raw metrics) if ( csv_file.is_open() && - result.lanes.path_valid + result.path_output.fused_valid // Use PathFinder validity, not lanes.path_valid ) { // Timestamp calc, from captured time auto ms_since_epoch = duration_cast( @@ -486,95 +624,114 @@ csv_file << result.frame_number << "," << ms_since_epoch << "," - // Orig metrics + // Orig metrics (for reference, but not used for tuning) << result.metrics.orig_lane_offset << "," << result.metrics.orig_yaw_offset << "," << result.metrics.orig_curvature << "," - // BEV metrics - << result.metrics.bev_lane_offset << "," - << result.metrics.bev_yaw_offset << "," - << result.metrics.bev_curvature << "\n"; + // PathFinder filtered metrics (CORRECT - these match console output) + << result.path_output.cte << "," + << result.path_output.yaw_error << "," + << result.path_output.curvature << "," + // Steering angle + << std::fixed << std::setprecision(6) << result.steering_angle << "," + << (result.steering_angle * 180.0 / M_PI) << "\n"; } - auto t_display_end = steady_clock::now(); - - // Calculate latencies - long display_us = duration_cast( - t_display_end - t_display_start).count(); - long end_to_end_us = duration_cast( - t_display_end - result.capture_time).count(); - - metrics.total_display_us.fetch_add(display_us); - metrics.total_end_to_end_us.fetch_add(end_to_end_us); - - // Print metrics every 30 frames - if (metrics.measure_latency && count % 30 == 0) { - long avg_capture = metrics.total_capture_us.load() / count; - long avg_inference = metrics.total_inference_us.load() / count; - long avg_display = metrics.total_display_us.load() / count; - long avg_e2e = metrics.total_end_to_end_us.load() / count; - - std::cout << "\n========================================\n"; - std::cout << "Frames processed: " << count << "\n"; - std::cout << "Pipeline Latencies:\n"; - std::cout << " 1. Capture: " << std::fixed << std::setprecision(2) - << (avg_capture / 1000.0) << " ms\n"; - std::cout << " 2. Inference: " << (avg_inference / 1000.0) - << " ms (" << (1000000.0 / avg_inference) << " FPS capable)\n"; - std::cout << " 3. Display: " << (avg_display / 1000.0) << " ms\n"; - std::cout << " 4. End-to-End: " << (avg_e2e / 1000.0) << " ms\n"; - std::cout << "Throughput: " << (count / (avg_e2e * count / 1000000.0)) << " FPS\n"; - std::cout << "========================================\n"; - } - } + auto t_display_end = steady_clock::now(); + + // Calculate latencies + long display_us = duration_cast( + t_display_end - t_display_start).count(); + long end_to_end_us = duration_cast( + t_display_end - result.capture_time).count(); + + metrics.total_display_us.fetch_add(display_us); + metrics.total_end_to_end_us.fetch_add(end_to_end_us); + + // Print metrics every 30 frames + if (metrics.measure_latency && count % 30 == 0) { + long avg_capture = metrics.total_capture_us.load() / count; + long avg_inference = metrics.total_inference_us.load() / count; + long avg_display = metrics.total_display_us.load() / count; + long avg_e2e = metrics.total_end_to_end_us.load() / count; + + std::cout << "\n========================================\n"; + std::cout << "Frames processed: " << count << "\n"; + std::cout << "Pipeline Latencies:\n"; + std::cout << " 1. Capture: " << std::fixed << std::setprecision(2) + << (avg_capture / 1000.0) << " ms\n"; + std::cout << " 2. Inference: " << (avg_inference / 1000.0) + << " ms (" << (1000000.0 / avg_inference) << " FPS capable)\n"; + std::cout << " 3. Display: " << (avg_display / 1000.0) << " ms\n"; + std::cout << " 4. End-to-End: " << (avg_e2e / 1000.0) << " ms\n"; + std::cout << "Throughput: " << (count / (avg_e2e * count / 1000000.0)) << " FPS\n"; + std::cout << "========================================\n"; + } + } // Cleanups // Video writer - if (save_video && video_writer_initialized && video_writer.isOpened()) { - video_writer.release(); - std::cout << "\nVideo saved to: " << output_video_path << std::endl; - } + if (save_video && video_writer_initialized && video_writer.isOpened()) { + video_writer.release(); + std::cout << "\nVideo saved to: " << output_video_path << std::endl; + } // Vis - if (enable_viz) { - cv::destroyAllWindows(); - } + if (enable_viz) { + cv::destroyAllWindows(); + } // CSV logger if (csv_file.is_open()) { csv_file.close(); std::cout << "CSV log saved." << std::endl; } - } +} - int main(int argc, char** argv) - { +int main(int argc, char** argv) +{ if (argc < 2) { std::cerr << "Usage:\n"; std::cerr << " " << argv[0] << " camera [device_id] [options...]\n"; std::cerr << " " << argv[0] << " video [device_id] [options...]\n\n"; std::cerr << "Arguments:\n"; std::cerr << " model: ONNX model file (.onnx)\n"; - std::cerr << " provider: 'cpu' or 'tensorrt'\n"; - std::cerr << " precision: 'fp32' or 'fp16' (for TensorRT)\n"; - std::cerr << " device_id: (optional) GPU device ID (default: 0)\n"; - std::cerr << " cache_dir: (optional) TensorRT cache directory (default: ./trt_cache)\n"; - std::cerr << " threshold: (optional) Segmentation threshold (default: 0.0)\n"; - std::cerr << " measure_latency: (optional) 'true' to show metrics (default: true)\n"; - std::cerr << " enable_viz: (optional) 'true' for visualization (default: true)\n"; - std::cerr << " save_video: (optional) 'true' to save video (default: false)\n"; + std::cerr << " provider: 'cpu' or 'tensorrt'\n"; + std::cerr << " precision: 'fp32' or 'fp16' (for TensorRT)\n"; + std::cerr << " device_id: (optional) GPU device ID (default: 0)\n"; + std::cerr << " cache_dir: (optional) TensorRT cache directory (default: ./trt_cache)\n"; + std::cerr << " threshold: (optional) Segmentation threshold (default: 0.0)\n"; + std::cerr << " measure_latency: (optional) 'true' to show metrics (default: true)\n"; + std::cerr << " enable_viz: (optional) 'true' for visualization (default: true)\n"; + std::cerr << " save_video: (optional) 'true' to save video (default: false)\n"; std::cerr << " output_video: (optional) Output video path (default: output.avi)\n\n"; - std::cerr << "Rerun Logging (optional):\n"; - std::cerr << " --rerun : Enable Rerun live viewer\n"; - std::cerr << " --rerun-save [path] : Save to .rrd file (default: autosteer.rrd)\n\n"; - std::cerr << "Examples:\n"; - std::cerr << " # Camera with live Rerun viewer:\n"; - std::cerr << " " << argv[0] << " camera model.onnx tensorrt fp16 --rerun\n\n"; - std::cerr << " # Camera saving to file:\n"; - std::cerr << " " << argv[0] << " camera model.onnx tensorrt fp16 --rerun-save recording.rrd\n\n"; - std::cerr << " # Video without Rerun:\n"; - std::cerr << " " << argv[0] << " video test.mp4 model.onnx cpu fp32\n"; + std::cerr << "Rerun Logging (optional):\n"; + std::cerr << " --rerun : Enable Rerun live viewer\n"; + std::cerr << " --rerun-save [path] : Save to .rrd file (default: autosteer.rrd)\n\n"; + std::cerr << "PathFinder (optional):\n"; + std::cerr << " --path-planner : Enable PathFinder (Bayes filter + polynomial fitting)\n"; + std::cerr << " --pathfinder : (alias)\n\n"; + std::cerr << "Steering Control (optional, requires --path-planner):\n"; + std::cerr << " --steering-control : Enable steering controller\n"; + std::cerr << " --Ks [val] : Proportionality constant for curvature feedforward (default: " + << SteeringControllerDefaults::K_S << ")\n"; + std::cerr << " --Kp [val] : Proportional gain (default: " + << SteeringControllerDefaults::K_P << ")\n"; + std::cerr << " --Ki [val] : Integral gain (default: " + << SteeringControllerDefaults::K_I << ")\n"; + std::cerr << " --Kd [val] : Derivative gain (default: " + << SteeringControllerDefaults::K_D << ")\n"; + std::cerr << " --csv-log [path] : CSV log file path (default: ./assets/curve_params_metrics.csv)\n"; + std::cerr << "Examples:\n"; + std::cerr << " # Camera with live Rerun viewer:\n"; + std::cerr << " " << argv[0] << " camera model.onnx tensorrt fp16 --rerun\n\n"; + std::cerr << " # Video with path planning:\n"; + std::cerr << " " << argv[0] << " video test.mp4 model.onnx cpu fp32 --path-planner\n\n"; + std::cerr << " # Camera with path planning + Rerun:\n"; + std::cerr << " " << argv[0] << " camera model.onnx tensorrt fp16 --path-planner --rerun\n\n"; + std::cerr << " # Video without extras:\n"; + std::cerr << " " << argv[0] << " video test.mp4 model.onnx cpu fp32\n"; return 1; } @@ -627,8 +784,8 @@ } else { std::cerr << "Unknown mode: " << mode << std::endl; std::cerr << "Use 'camera' or 'video'" << std::endl; - return 1; - } + return 1; + } // Parse optional arguments (different offset for camera vs video) int base_idx = is_camera ? 5 : 6; @@ -639,131 +796,186 @@ bool enable_viz = (argc >= base_idx + 5) ? (std::string(argv[base_idx + 4]) == "true") : true; bool save_video = (argc >= base_idx + 6) ? (std::string(argv[base_idx + 5]) == "true") : false; std::string output_video_path = (argc >= base_idx + 7) ? argv[base_idx + 6] : "output.avi"; + + // Parse Rerun flags, PathFinder flags, and Steering Control flags (check all remaining arguments) + bool enable_rerun = false; + bool spawn_rerun_viewer = true; + std::string rerun_save_path = ""; + bool enable_path_planner = false; + bool enable_steering_control = false; + + // Steering controller parameters (defaults from steering_controller.hpp) + double K_p = SteeringControllerDefaults::K_P; + double K_i = SteeringControllerDefaults::K_I; + double K_d = SteeringControllerDefaults::K_D; + double K_S = SteeringControllerDefaults::K_S; + std::string csv_log_path = "./assets/curve_params_metrics.csv"; + + for (int i = base_idx + 7; i < argc; ++i) { + std::string arg = argv[i]; + if (arg == "--rerun") { + enable_rerun = true; + } else if (arg == "--rerun-save") { + enable_rerun = true; + spawn_rerun_viewer = false; + if (i + 1 < argc && argv[i + 1][0] != '-') { + rerun_save_path = argv[++i]; + } else { + rerun_save_path = "autosteer.rrd"; + } + } else if (arg == "--path-planner" || arg == "--pathfinder") { + enable_path_planner = true; + } else if (arg == "--steering-control") { + enable_steering_control = true; + } else if (arg == "--Ks" && i + 1 < argc) { + K_S = std::atof(argv[++i]); + } else if (arg == "--Kp" && i + 1 < argc) { + K_p = std::atof(argv[++i]); + } else if (arg == "--Ki" && i + 1 < argc) { + K_i = std::atof(argv[++i]); + } else if (arg == "--Kd" && i + 1 < argc) { + K_d = std::atof(argv[++i]); + } else if (arg == "--csv-log" && i + 1 < argc) { + csv_log_path = argv[++i]; + } + } + + // Steering control requires PathFinder + if (enable_steering_control && !enable_path_planner) { + std::cerr << "Warning: --steering-control requires --path-planner. Enabling PathFinder." << std::endl; + enable_path_planner = true; + } + + if (save_video && !enable_viz) { + std::cerr << "Warning: save_video requires enable_viz=true. Enabling visualization." << std::endl; + enable_viz = true; + } + + // Initialize inference backend + std::cout << "Loading model: " << model_path << std::endl; + std::cout << "Provider: " << provider << " | Precision: " << precision << std::endl; + + if (provider == "tensorrt") { + std::cout << "Device ID: " << device_id << " | Cache dir: " << cache_dir << std::endl; + std::cout << "\nNote: TensorRT engine build may take 20-30 seconds on first run..." << std::endl; + } + + AutoSteerOnnxEngine engine(model_path, provider, precision, device_id, cache_dir); + std::cout << "Backend initialized!\n" << std::endl; + + // Warm-up inference (builds TensorRT engine on first run) + if (provider == "tensorrt") { + std::cout << "Running warm-up inference to build TensorRT engine..." << std::endl; + std::cout << "This may take 20-60 seconds on first run. Please wait...\n" << std::endl; + + cv::Mat dummy_frame(720, 1280, CV_8UC3, cv::Scalar(128, 128, 128)); + auto warmup_start = std::chrono::steady_clock::now(); + + // Run warm-up inference + LaneSegmentation warmup_result = engine.inference(dummy_frame, threshold); + + auto warmup_end = std::chrono::steady_clock::now(); + double warmup_time = std::chrono::duration_cast( + warmup_end - warmup_start).count() / 1000.0; + + std::cout << "Warm-up complete! (took " << std::fixed << std::setprecision(1) + << warmup_time << "s)" << std::endl; + std::cout << "TensorRT engine is now cached and ready.\n" << std::endl; + } + + std::cout << "Backend ready!\n" << std::endl; + +#ifdef ENABLE_RERUN + // Initialize Rerun logger (optional) + std::unique_ptr rerun_logger; + if (enable_rerun) { + rerun_logger = std::make_unique( + "AutoSteer", spawn_rerun_viewer, rerun_save_path); + } +#endif - // Parse Rerun flags (check all remaining arguments) - bool enable_rerun = false; - bool spawn_rerun_viewer = true; - std::string rerun_save_path = ""; - - for (int i = base_idx + 7; i < argc; ++i) { - std::string arg = argv[i]; - if (arg == "--rerun") { - enable_rerun = true; - } else if (arg == "--rerun-save") { - enable_rerun = true; - spawn_rerun_viewer = false; - if (i + 1 < argc && argv[i + 1][0] != '-') { - rerun_save_path = argv[++i]; - } else { - rerun_save_path = "autosteer.rrd"; - } - } - } - - if (save_video && !enable_viz) { - std::cerr << "Warning: save_video requires enable_viz=true. Enabling visualization." << std::endl; - enable_viz = true; - } - - // Initialize inference backend - std::cout << "Loading model: " << model_path << std::endl; - std::cout << "Provider: " << provider << " | Precision: " << precision << std::endl; - - if (provider == "tensorrt") { - std::cout << "Device ID: " << device_id << " | Cache dir: " << cache_dir << std::endl; - std::cout << "\nNote: TensorRT engine build may take 20-30 seconds on first run..." << std::endl; - } - - AutoSteerOnnxEngine engine(model_path, provider, precision, device_id, cache_dir); - std::cout << "Backend initialized!\n" << std::endl; - - // Warm-up inference (builds TensorRT engine on first run) - if (provider == "tensorrt") { - std::cout << "Running warm-up inference to build TensorRT engine..." << std::endl; - std::cout << "This may take 20-60 seconds on first run. Please wait...\n" << std::endl; - - cv::Mat dummy_frame(720, 1280, CV_8UC3, cv::Scalar(128, 128, 128)); - auto warmup_start = std::chrono::steady_clock::now(); - - // Run warm-up inference - LaneSegmentation warmup_result = engine.inference(dummy_frame, threshold); - - auto warmup_end = std::chrono::steady_clock::now(); - double warmup_time = std::chrono::duration_cast( - warmup_end - warmup_start).count() / 1000.0; - - std::cout << "Warm-up complete! (took " << std::fixed << std::setprecision(1) - << warmup_time << "s)" << std::endl; - std::cout << "TensorRT engine is now cached and ready.\n" << std::endl; - } - - std::cout << "Backend ready!\n" << std::endl; - - #ifdef ENABLE_RERUN - // Initialize Rerun logger (optional) - std::unique_ptr rerun_logger; - if (enable_rerun) { - rerun_logger = std::make_unique( - "AutoSteer", spawn_rerun_viewer, rerun_save_path); - } - #endif - - // Thread-safe queues with bounded size (prevents memory overflow) - ThreadSafeQueue capture_queue(5); // Max 5 frames waiting for inference - ThreadSafeQueue display_queue(5); // Max 5 frames waiting for display - - // Performance metrics - PerformanceMetrics metrics; - metrics.measure_latency = measure_latency; - std::atomic running{true}; - - // Launch threads - std::cout << "========================================" << std::endl; - std::cout << "Starting multi-threaded inference pipeline" << std::endl; - std::cout << "========================================" << std::endl; - std::cout << "Source: " << (is_camera ? "Camera" : "Video") << std::endl; - std::cout << "Mode: " << (enable_viz ? "Visualization" : "Headless") << std::endl; - std::cout << "Threshold: " << threshold << std::endl; - #ifdef ENABLE_RERUN - if (enable_rerun && rerun_logger && rerun_logger->isEnabled()) { - std::cout << "Rerun logging: ENABLED" << std::endl; - } - #endif - if (measure_latency) { - std::cout << "Latency measurement: ENABLED (metrics every 30 frames)" << std::endl; - } - if (save_video && enable_viz) { - std::cout << "Video saving: ENABLED -> " << output_video_path << std::endl; - } - if (enable_viz) { - std::cout << "Press 'q' in the video window to quit" << std::endl; - } else { - std::cout << "Running in headless mode" << std::endl; - std::cout << "Press Ctrl+C to quit" << std::endl; - } - std::cout << "========================================\n" << std::endl; - - std::thread t_capture(captureThread, source, is_camera, std::ref(capture_queue), - std::ref(metrics), std::ref(running)); - #ifdef ENABLE_RERUN - std::thread t_inference(inferenceThread, std::ref(engine), - std::ref(capture_queue), std::ref(display_queue), - std::ref(metrics), std::ref(running), threshold, - rerun_logger.get()); - #else - std::thread t_inference(inferenceThread, std::ref(engine), - std::ref(capture_queue), std::ref(display_queue), - std::ref(metrics), std::ref(running), threshold); - #endif - std::thread t_display(displayThread, std::ref(display_queue), std::ref(metrics), - std::ref(running), enable_viz, save_video, output_video_path); + // Initialize PathFinder (optional - uses LaneTracker's BEV output) + std::unique_ptr path_finder; + if (enable_path_planner) { + path_finder = std::make_unique(4.0); // 4.0m default lane width + std::cout << "PathFinder initialized (Bayes filter + polynomial fitting)" << std::endl; + std::cout << " - Using BEV points from LaneTracker" << std::endl; + std::cout << " - Transform: BEV pixels → meters (TODO: calibrate)" << "\n" << std::endl; + } + + // Initialize Steering Controller (optional - requires PathFinder) + std::unique_ptr steering_controller; + if (enable_steering_control) { + steering_controller = std::make_unique(K_p, K_i, K_d, K_S); + std::cout << "Steering Controller initialized" << std::endl; + } + + // Thread-safe queues with bounded size (prevents memory overflow) + ThreadSafeQueue capture_queue(5); // Max 5 frames waiting for inference + ThreadSafeQueue display_queue(5); // Max 5 frames waiting for display + + // Performance metrics + PerformanceMetrics metrics; + metrics.measure_latency = measure_latency; + std::atomic running{true}; + + // Launch threads + std::cout << "========================================" << std::endl; + std::cout << "Starting multi-threaded inference pipeline" << std::endl; + std::cout << "========================================" << std::endl; + std::cout << "Source: " << (is_camera ? "Camera" : "Video") << std::endl; + std::cout << "Mode: " << (enable_viz ? "Visualization" : "Headless") << std::endl; + std::cout << "Threshold: " << threshold << std::endl; +#ifdef ENABLE_RERUN + if (enable_rerun && rerun_logger && rerun_logger->isEnabled()) { + std::cout << "Rerun logging: ENABLED" << std::endl; + } +#endif + if (path_finder) { + std::cout << "PathFinder: ENABLED (polynomial fitting + Bayes filter)" << std::endl; + } + if (steering_controller) { + std::cout << "Steering Control: ENABLED" << std::endl; + } + if (measure_latency) { + std::cout << "Latency measurement: ENABLED (metrics every 30 frames)" << std::endl; + } + if (save_video && enable_viz) { + std::cout << "Video saving: ENABLED -> " << output_video_path << std::endl; + } + if (enable_viz) { + std::cout << "Press 'q' in the video window to quit" << std::endl; + } else { + std::cout << "Running in headless mode" << std::endl; + std::cout << "Press Ctrl+C to quit" << std::endl; + } + std::cout << "========================================\n" << std::endl; + + std::thread t_capture(captureThread, source, is_camera, std::ref(capture_queue), + std::ref(metrics), std::ref(running)); +#ifdef ENABLE_RERUN + std::thread t_inference(inferenceThread, std::ref(engine), + std::ref(capture_queue), std::ref(display_queue), + std::ref(metrics), std::ref(running), threshold, + path_finder.get(), + steering_controller.get(), + rerun_logger.get()); +#else + std::thread t_inference(inferenceThread, std::ref(engine), + std::ref(capture_queue), std::ref(display_queue), + std::ref(metrics), std::ref(running), threshold, + path_finder.get(), + steering_controller.get()); +#endif + std::thread t_display(displayThread, std::ref(display_queue), std::ref(metrics), + std::ref(running), enable_viz, save_video, output_video_path, csv_log_path); - // Wait for threads - t_capture.join(); - t_inference.join(); - t_display.join(); + // Wait for threads + t_capture.join(); + t_inference.join(); + t_display.join(); - std::cout << "\nInference pipeline stopped." << std::endl; + std::cout << "\nInference pipeline stopped." << std::endl; - return 0; - } + return 0; +} diff --git a/VisionPilot/Production_Releases/0.5/src/lane_tracking/lane_tracking.cpp b/VisionPilot/Production_Releases/0.5/src/lane_tracking/lane_tracking.cpp index bcb2bf4f5..7110d1438 100644 --- a/VisionPilot/Production_Releases/0.5/src/lane_tracking/lane_tracking.cpp +++ b/VisionPilot/Production_Releases/0.5/src/lane_tracking/lane_tracking.cpp @@ -284,6 +284,8 @@ std::pair LaneTracker::update( output_lanes.curvature = metrics.orig_curvature; // Populate BEV vis data + metrics.bev_visuals.bev_left_pts = left_pts_bev; // Store BEV points for PathFinder + metrics.bev_visuals.bev_right_pts = right_pts_bev; // Store BEV points for PathFinder metrics.bev_visuals.last_valid_width_pixels = last_valid_bev_width; metrics.bev_visuals.valid = true; } diff --git a/VisionPilot/Production_Releases/0.5/src/path_planning/estimator.cpp b/VisionPilot/Production_Releases/0.5/src/path_planning/estimator.cpp new file mode 100644 index 000000000..d1efe7647 --- /dev/null +++ b/VisionPilot/Production_Releases/0.5/src/path_planning/estimator.cpp @@ -0,0 +1,85 @@ +/** + * @file estimator.cpp + * @brief Bayes filter implementation + * + * Ported from PATHFINDER ROS2 package + */ + +#include "path_planning/estimator.hpp" + +void Estimator::initialize(const std::array &init_state) +{ + state = init_state; +} + +void Estimator::predict(const std::array &process) +{ + for (size_t i = 0; i < STATE_DIM; ++i) + { + state[i].mean += process[i].mean; // Apply delta (e.g., motion model) + state[i].variance += process[i].variance; // Add process noise + } +} + +void Estimator::update(const std::array &measurement) +{ + // Update step (product of Gaussians) + for (size_t i = 0; i < STATE_DIM; ++i) + { + double v0 = state[i].variance; + double m0 = state[i].mean; + + double v1, m1; + if (std::isnan(measurement[i].mean)) + { + state[i].variance *= 1.25; + continue; + } + else + { + v1 = measurement[i].variance; + m1 = measurement[i].mean; + } + + double v2 = (v0 * v1) / (v0 + v1); + double m2 = (m0 * v1 + m1 * v0) / (v0 + v1); + + state[i] = {m2, v2}; + } + + // Perform fusion + for (const auto &[start_idx, end_idx] : fusion_rules) + { + double inv_var_sum = 0.0; + double weighted_mean_sum = 0.0; + + for (size_t i = start_idx; i < end_idx; ++i) + { + const auto &g = state[i]; + if (g.variance <= 0.0) + continue; + + inv_var_sum += 1.0 / g.variance; + weighted_mean_sum += g.mean / g.variance; + } + + if (inv_var_sum > 0.0) + { + double fused_var = 1.0 / inv_var_sum; + double fused_mean = fused_var * weighted_mean_sum; + if (end_idx < STATE_DIM) + state[end_idx] = {fused_mean, fused_var}; + } + } +} + +void Estimator::configureFusionGroups(const std::vector> &rules) +{ + fusion_rules = rules; +} + +const std::array &Estimator::getState() const +{ + return state; +} + diff --git a/VisionPilot/Production_Releases/0.5/src/path_planning/path_finder.cpp b/VisionPilot/Production_Releases/0.5/src/path_planning/path_finder.cpp new file mode 100644 index 000000000..09dfa47c7 --- /dev/null +++ b/VisionPilot/Production_Releases/0.5/src/path_planning/path_finder.cpp @@ -0,0 +1,205 @@ +/** + * @file path_finder.cpp + * @brief PathFinder implementation + * + * Adapted from PATHFINDER's cb_drivCorr() and timer_callback() + */ + +#include "path_planning/path_finder.hpp" +#include +#include +#include +#include + +namespace autoware_pov::vision::path_planning { + +PathFinder::PathFinder(double default_lane_width) + : default_lane_width_(default_lane_width) +{ + initializeBayesFilter(); +} + +void PathFinder::initializeBayesFilter() +{ + // Configure fusion groups (from PATHFINDER pathfinder_node constructor) + // Fusion rules: {start_idx, end_idx} → fuse indices in [start, end) → result at end_idx + bayes_filter_.configureFusionGroups({ + {0, 3}, // CTE: fuse [0,1,2] (path,left,right) → [3] (fused) + {5, 7}, // Yaw: fuse [5,6] (left,right) → [7] (fused) + {9, 11} // Curvature: fuse [9,10] (left,right) → [11] (fused) + }); + + // Initialize state (large variance = uncertain) + Gaussian default_state = {0.0, 1e3}; + std::array init_state; + init_state.fill(default_state); + + // Initialize lane width with reasonable prior + init_state[12].mean = default_lane_width_; + init_state[12].variance = 0.5 * 0.5; + + bayes_filter_.initialize(init_state); + + std::cout << "[PathFinder] Initialized with default lane width: " + << default_lane_width_ << " m" << std::endl; +} + + +PathFinderOutput PathFinder::update( + const std::vector& left_pts_bev, + const std::vector& right_pts_bev) +{ + PathFinderOutput output; + output.left_valid = false; + output.right_valid = false; + output.fused_valid = false; + + // 1. Predict step (add process noise, like timer_callback in PATHFINDER) + std::array process; + std::random_device rd; + std::default_random_engine generator(rd()); + const double epsilon = 0.00001; + std::uniform_real_distribution dist(-epsilon, epsilon); + + for (size_t i = 0; i < STATE_DIM; ++i) { + process[i].mean = dist(generator); + process[i].variance = PROC_SD * PROC_SD; + } + bayes_filter_.predict(process); + + // 2. BEV points already provided (no transformation needed) + + // 3. Fit polynomials to BEV points (in meters) + auto left_coeff = fitQuadPoly(left_pts_bev); + auto right_coeff = fitQuadPoly(right_pts_bev); + + FittedCurve left_curve(left_coeff); + FittedCurve right_curve(right_coeff); + + output.left_coeff = left_coeff; + output.right_coeff = right_coeff; + output.left_valid = !std::isnan(left_curve.cte); + output.right_valid = !std::isnan(right_curve.cte); + + // Store individual metrics + output.left_cte = left_curve.cte; + output.left_yaw_error = left_curve.yaw_error; + output.left_curvature = left_curve.curvature; // Pass in AutoSteer units + output.right_cte = right_curve.cte; + output.right_yaw_error = right_curve.yaw_error; + output.right_curvature = right_curve.curvature; // Pass in AutoSteer units + + // 4. Create measurement (adapted from cb_drivCorr) + std::array measurement; + + // Set measurement variances + measurement[0].variance = STD_M_CTE * STD_M_CTE; + measurement[1].variance = STD_M_CTE * STD_M_CTE; + measurement[2].variance = STD_M_CTE * STD_M_CTE; + measurement[3].variance = STD_M_CTE * STD_M_CTE; + + measurement[4].variance = STD_M_YAW * STD_M_YAW; + measurement[5].variance = STD_M_YAW * STD_M_YAW; + measurement[6].variance = STD_M_YAW * STD_M_YAW; + measurement[7].variance = STD_M_YAW * STD_M_YAW; + + measurement[8].variance = STD_M_CURV * STD_M_CURV; + measurement[9].variance = STD_M_CURV * STD_M_CURV; + measurement[10].variance = STD_M_CURV * STD_M_CURV; + measurement[11].variance = STD_M_CURV * STD_M_CURV; + + measurement[12].variance = STD_M_WIDTH * STD_M_WIDTH; + measurement[13].variance = STD_M_WIDTH * STD_M_WIDTH; + + // Get current tracked width for CTE offset + auto width = bayes_filter_.getState()[12].mean; + + // Set measurement means + // [0,4,8] = ego path (we don't have it, set to NaN) + measurement[0].mean = std::numeric_limits::quiet_NaN(); + measurement[4].mean = std::numeric_limits::quiet_NaN(); + measurement[8].mean = std::numeric_limits::quiet_NaN(); + + // [1,5,9] = left lane (offset CTE to lane center) + measurement[1].mean = left_curve.cte + width / 2.0; + measurement[5].mean = left_curve.yaw_error; + measurement[9].mean = left_curve.curvature; + + // [2,6,10] = right lane (offset CTE to lane center) + measurement[2].mean = right_curve.cte - width / 2.0; + measurement[6].mean = right_curve.yaw_error; + measurement[10].mean = right_curve.curvature; + + // [3,7,11] = fused (computed by Bayes filter) + measurement[3].mean = std::numeric_limits::quiet_NaN(); + measurement[7].mean = std::numeric_limits::quiet_NaN(); + measurement[11].mean = std::numeric_limits::quiet_NaN(); + + // Lane width measurement (adapted from cb_drivCorr logic) + if (std::isnan(left_curve.cte) && std::isnan(right_curve.cte)) { + // Both lanes missing → use default + measurement[12].mean = default_lane_width_; + } else if (std::isnan(left_curve.cte)) { + // Left missing → keep current tracked width + measurement[12].mean = width; + } else if (std::isnan(right_curve.cte)) { + // Right missing → keep current tracked width + measurement[12].mean = width; + } else { + // Both present → direct measurement + measurement[12].mean = right_curve.cte - left_curve.cte; + } + + measurement[13].mean = std::numeric_limits::quiet_NaN(); + + // 5. Update Bayes filter + bayes_filter_.update(measurement); + + // 6. Extract fused state + const auto& state = bayes_filter_.getState(); + + output.cte = state[3].mean; + output.yaw_error = state[7].mean; + output.curvature = state[11].mean; + output.lane_width = state[12].mean; + + output.cte_variance = state[3].variance; + output.yaw_variance = state[7].variance; + output.curv_variance = state[11].variance; + output.lane_width_variance = state[12].variance; + + output.fused_valid = !std::isnan(output.cte) && + !std::isnan(output.yaw_error) && + !std::isnan(output.curvature); + + // 7. Calculate center polynomial coefficients (average left/right) + if (output.left_valid && output.right_valid) { + FittedCurve center = calculateEgoPath(left_curve, right_curve); + output.center_coeff = center.coeff; + } else if (output.left_valid) { + output.center_coeff = left_coeff; + } else if (output.right_valid) { + output.center_coeff = right_coeff; + } else { + output.center_coeff = { + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN() + }; + } + + return output; +} + +const std::array& PathFinder::getState() const +{ + return bayes_filter_.getState(); +} + +void PathFinder::reset() +{ + initializeBayesFilter(); +} + +} // namespace autoware_pov::vision::path_planning + diff --git a/VisionPilot/Production_Releases/0.5/src/path_planning/poly_fit.cpp b/VisionPilot/Production_Releases/0.5/src/path_planning/poly_fit.cpp new file mode 100644 index 000000000..5786feb3a --- /dev/null +++ b/VisionPilot/Production_Releases/0.5/src/path_planning/poly_fit.cpp @@ -0,0 +1,87 @@ +/** + * @file poly_fit.cpp + * @brief Polynomial fitting implementation + * + * Extracted from PATHFINDER package's path_finder.cpp + */ + +#include "path_planning/poly_fit.hpp" +#include +#include +#include + +namespace autoware_pov::vision::path_planning { + +FittedCurve::FittedCurve() + : coeff{std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN()}, + cte(std::numeric_limits::quiet_NaN()), + yaw_error(std::numeric_limits::quiet_NaN()), + curvature(std::numeric_limits::quiet_NaN()) +{ +} + +FittedCurve::FittedCurve(const std::array &coeff) : coeff(coeff) +{ + // Derived metrics at y=0 (vehicle position) + cte = -coeff[2]; // Lateral offset + yaw_error = -std::atan2(coeff[1], 1.0); // Heading angle + + // Curvature formula: κ = 2*c0 / (1 + c1²)^(3/2) + curvature = 2 * coeff[0] / std::pow(1 - (coeff[1] * coeff[1]), 1.5); +} + +std::array fitQuadPoly(const std::vector &points) +{ + const int degree = 2; + const size_t N = points.size(); + + // Need at least 3 points for quadratic fit + if (N <= 2) + { + return {std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN(), + std::numeric_limits::quiet_NaN()}; + } + + // Setup least squares: A * [c0, c1, c2]^T = b + // where x = c0*y² + c1*y + c2 + Eigen::MatrixXd A(N, degree + 1); + Eigen::VectorXd b(N); + + for (size_t i = 0; i < N; ++i) + { + double x = points[i].x; // Lateral position + double y = points[i].y; // Longitudinal position + + A(i, 0) = y * y; // y² term + A(i, 1) = y; // y term + A(i, 2) = 1.0; // constant term + b(i) = x; // Target + } + + // Solve using QR decomposition + Eigen::VectorXd res = A.colPivHouseholderQr().solve(b); + + std::array coeffs; + for (int i = 0; i <= degree; ++i) + { + coeffs[i] = res(i); + } + + return coeffs; +} + +FittedCurve calculateEgoPath(const FittedCurve &left_lane, const FittedCurve &right_lane) +{ + // Average the polynomial coefficients + return FittedCurve({ + (left_lane.coeff[0] + right_lane.coeff[0]) / 2.0, + (left_lane.coeff[1] + right_lane.coeff[1]) / 2.0, + (left_lane.coeff[2] + right_lane.coeff[2]) / 2.0 + }); +} + +} // namespace autoware_pov::vision::path_planning + diff --git a/VisionPilot/Production_Releases/0.5/src/steering_control/steering_controller.cpp b/VisionPilot/Production_Releases/0.5/src/steering_control/steering_controller.cpp new file mode 100644 index 000000000..d5dc50bd9 --- /dev/null +++ b/VisionPilot/Production_Releases/0.5/src/steering_control/steering_controller.cpp @@ -0,0 +1,41 @@ +/** + * @file steering_controller.cpp + * @brief Implementation of steering controller + */ + +#include "steering_control/steering_controller.hpp" + +namespace autoware_pov::vision::steering_control { + +SteeringController::SteeringController( + double K_p, + double K_i, + double K_d, + double K_S) + : K_p(K_p), K_i(K_i), K_d(K_d), K_S(K_S) +{ + std::cout << "Steering controller initialized with parameters:\n" + << " K_p: " << K_p << "\n" + << " K_i: " << K_i << "\n" + << " K_d: " << K_d << "\n" + << " K_S: " << K_S << std::endl; + prev_yaw_error = 0.0; +} + +double SteeringController::computeSteering(double cte, double yaw_error, double feed_forward_steering_estimate) +{ + // Combined controller: + // - Derivative term: K_d * (yaw_error - prev_yaw_error) + // - Stanley controller: atan(K_i * cte) + // - Proportional term: K_p * yaw_error + // - Curvature feedforward: -atan(curvature) * K_S + double steering_angle = K_d * (yaw_error - prev_yaw_error) + + std::atan(K_i * cte) + + K_p * yaw_error + + (feed_forward_steering_estimate) * K_S; + prev_yaw_error = yaw_error; + return steering_angle; +} + +} // namespace autoware_pov::vision::steering_control + diff --git a/VisionPilot/Production_Releases/0.5/src/visualization/draw_lanes.cpp b/VisionPilot/Production_Releases/0.5/src/visualization/draw_lanes.cpp index 8388510df..c96caf588 100644 --- a/VisionPilot/Production_Releases/0.5/src/visualization/draw_lanes.cpp +++ b/VisionPilot/Production_Releases/0.5/src/visualization/draw_lanes.cpp @@ -786,5 +786,67 @@ void drawBEVVis( } -} // namespace autoware_pov::vision::autosteer +// Helper func to draw metric curves projected to pixel space +void drawMetricVerification( + cv::Mat& bev_image, + const std::vector& left_metric_coeffs, + const std::vector& right_metric_coeffs +) +{ + // Constants from main.cpp (MUST MATCH EXACTLY) + const double bev_range_m = 40.0; + const double bev_height_px = 640.0; + const double bev_width_px = 640.0; + const double scale = bev_range_m / bev_height_px; // 0.0625 m/px + const double center_x = bev_width_px / 2.0; // 320.0 + const double origin_y = bev_height_px; // 640.0 + + auto drawCurve = [&](const std::vector& coeffs, cv::Scalar color) { + if (coeffs.size() < 3) return; + + // coeffs are [c0, c1, c2] for x = c0*y^2 + c1*y + c2 (in METERS) + double c0 = coeffs[0]; + double c1 = coeffs[1]; + double c2 = coeffs[2]; + + std::vector points; + + for (int y_pix = 0; y_pix < 640; ++y_pix) { + // 1. Pixel Y -> Meter Y (Longitudinal distance) + // origin_y is bottom (0m), decreases upwards + double y_meter = (origin_y - y_pix) * scale; + + // 2. Evaluate Polynomial in Meters + double x_meter = c0 * y_meter * y_meter + c1 * y_meter + c2; + + // 3. Meter X -> Pixel X (Lateral offset) + // x_meter = (x_pix - center_x) * scale + // x_pix = x_meter / scale + center_x + double x_pix = (x_meter / scale) + center_x; + + if (x_pix >= 0 && x_pix < 640) { + points.push_back(cv::Point(static_cast(x_pix), y_pix)); + } + } + + if (points.size() > 1) { + // Draw thicker line for visibility (5px) and add outline + cv::polylines(bev_image, points, false, cv::Scalar(255, 255, 255), 7, cv::LINE_AA); // White outline + cv::polylines(bev_image, points, false, color, 5, cv::LINE_AA); // Colored line + } + }; + + // Draw Left in Orange + if (!left_metric_coeffs.empty()) { + drawCurve(left_metric_coeffs, cv::Scalar(0, 165, 255)); + cv::putText(bev_image, "Metric L (Orange)", cv::Point(20, 580), cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(0, 165, 255), 2); + } + // Draw Right in Red + if (!right_metric_coeffs.empty()) { + drawCurve(right_metric_coeffs, cv::Scalar(0, 0, 255)); + cv::putText(bev_image, "Metric R (Red)", cv::Point(20, 610), cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(0, 0, 255), 2); + } +} + +} // namespace autoware_pov::vision::autosteer