diff --git a/applications/camera_calibration/CMakeLists.txt b/applications/camera_calibration/CMakeLists.txt index c2b52b4..ada8ce7 100644 --- a/applications/camera_calibration/CMakeLists.txt +++ b/applications/camera_calibration/CMakeLists.txt @@ -220,7 +220,25 @@ if (CMAKE_CUDA_COMPILER) set_target_properties(camera_calibration PROPERTIES POSITION_INDEPENDENT_CODE ON ) - + + # Tool for generating visualizations from reprojection error data provided by other software + add_executable(report_from_data + src/camera_calibration/report_from_data.cc + ) + target_include_directories(report_from_data PRIVATE + src + third_party/yaml-cpp-0.6.0/include + ) + target_link_libraries(report_from_data PRIVATE + camera_calibration_baselib + ${Boost_LIBRARIES} + yaml-cpp2 + libvis_external_io + ) + set_target_properties(report_from_data PROPERTIES + POSITION_INDEPENDENT_CODE ON + ) + # Resource files for unit tests ADD_CUSTOM_TARGET(camera_calibration_test_resources ALL diff --git a/applications/camera_calibration/src/camera_calibration/calibration.cc b/applications/camera_calibration/src/camera_calibration/calibration.cc index ecebef7..202b220 100644 --- a/applications/camera_calibration/src/camera_calibration/calibration.cc +++ b/applications/camera_calibration/src/camera_calibration/calibration.cc @@ -242,6 +242,7 @@ void RunBundleAdjustment( // Save the optimization state if (state_output_path) { SaveBAState(state_output_path, *state); + SaveDatasetAndState(state_output_path, *dataset, *state); } // Beautify all camera orientations @@ -1115,7 +1116,11 @@ bool Calibrate( } if (dataset_output_path) { - SaveDataset(dataset_output_path, *dataset); + SaveDatasetAndState(dataset_output_path, *dataset, *state); + LOG(INFO) << "Saved dataset at " << dataset_output_path; + } + else { + LOG(INFO) << "Didn't save dataset"; } } @@ -1322,10 +1327,10 @@ void CalibrateBatch( // Save the resulting calibration. if (!state_output_directory.empty()) { - SaveBAState(state_output_directory.c_str(), calibration); + SaveDatasetAndState(state_output_directory.c_str(), dataset, calibration); } if (!pruned_dataset_output_path.empty()) { - SaveDataset(pruned_dataset_output_path.c_str(), dataset); + SaveDatasetAndState(pruned_dataset_output_path.c_str(), dataset, calibration); } // Create the calibration error report. diff --git a/applications/camera_calibration/src/camera_calibration/calibration_report.cc b/applications/camera_calibration/src/camera_calibration/calibration_report.cc index 45d09aa..9cbf594 100644 --- a/applications/camera_calibration/src/camera_calibration/calibration_report.cc +++ b/applications/camera_calibration/src/camera_calibration/calibration_report.cc @@ -48,6 +48,10 @@ #include "camera_calibration/models/noncentral_generic.h" #include "camera_calibration/util.h" +#include +#include +#include + namespace vis { using boost::polygon::voronoi_builder; @@ -352,7 +356,8 @@ double ComputeBiasedness( void CreateVoronoiDiagram( - const CameraModel* cam, + const int width, + const int height, const vector& reprojection_errors, const vector& features, float* voronoi_integer_inv_scaling, @@ -363,8 +368,8 @@ void CreateVoronoiDiagram( *voronoi_integer_inv_scaling = 1.f / kVoronoiIntegerScaling; v_reprojection_errors->reserve(32000); v_points->reserve(32000); - Image v_point_image(kVoronoiIntegerScaling * cam->width(), - kVoronoiIntegerScaling * cam->height()); + Image v_point_image(kVoronoiIntegerScaling * width, + kVoronoiIntegerScaling * height); v_point_image.SetTo(static_cast(0)); for (usize i = 0; i < reprojection_errors.size(); ++ i) { @@ -390,6 +395,25 @@ void CreateVoronoiDiagram( construct_voronoi(v_points->begin(), v_points->end(), vd); } +void CreateVoronoiDiagram( + const CameraModel * cam, + const vector& reprojection_errors, + const vector& features, + float* voronoi_integer_inv_scaling, + voronoi_diagram* vd, + vector* v_reprojection_errors, + vector>* v_points) { + CreateVoronoiDiagram( + cam->width(), + cam->height(), + reprojection_errors, + features, + voronoi_integer_inv_scaling, + vd, + v_reprojection_errors, + v_points); +} + void ClipVoronoiEdge( const vector>& v_points, const voronoi_diagram::edge_type* edge, @@ -476,14 +500,14 @@ void RenderVoronoiTriangle( } }; -template +template void RenderVoronoiDiagram( int width, int height, float voronoi_integer_inv_scaling, const voronoi_diagram& vd, const vector>& v_points, const ColorComputer& color_computer, - Image* rendering) { + Image* rendering) { Image reprojection_error_direction_rendering(width, height); reprojection_error_direction_rendering.SetTo(Vec3f::Zero()); @@ -539,7 +563,7 @@ void RenderVoronoiDiagram( rendering->SetSize(reprojection_error_direction_rendering.size()); for (int y = 0; y < rendering->height(); ++ y) { for (int x = 0; x < rendering->width(); ++ x) { - (*rendering)(x, y) = (reprojection_error_direction_rendering(x, y) + Vec3f::Constant(0.5f)).cwiseMax(Vec3f::Zero()).cwiseMin(Vec3f::Constant(255.99f)).cast(); + (*rendering)(x, y) = color_computer.cast(reprojection_error_direction_rendering(x, y)); } } } @@ -553,24 +577,70 @@ struct ReprojectionDirectionColorComputer { 127 + 127 * cos(dir), 127); } + static Vec3u8 cast(Vec3f const& val) { + return (val + Vec3f::Constant(0.5f)).cwiseMax(Vec3f::Zero()).cwiseMin(Vec3f::Constant(255.99f)).cast(); + } const vector& v_reprojection_errors; }; +struct ReprojectionVectorComputer { + Vec3f operator() (int point_index) const { + return Vec3f( + v_reprojection_errors[point_index].x(), + v_reprojection_errors[point_index].y(), + 0.0); + } + + static Vec2f cast(Vec3f const& val) { + return Vec2f(val.x(), val.y()); + } + + const vector& v_reprojection_errors; +}; + void VisualizeReprojectionErrorDirections( - const CameraModel* cam, + int const width, + int const height, float voronoi_integer_inv_scaling, const voronoi_diagram& vd, const vector& v_reprojection_errors, const vector>& v_points, - Image* direction_image) { + Image* direction_image, + Image* direction_raw) { RenderVoronoiDiagram( - cam->width(), cam->height(), + width, height, voronoi_integer_inv_scaling, vd, v_points, ReprojectionDirectionColorComputer{v_reprojection_errors}, direction_image); + RenderVoronoiDiagram( + width, height, + voronoi_integer_inv_scaling, + vd, + v_points, + ReprojectionVectorComputer{v_reprojection_errors}, + direction_raw); +} + +void VisualizeReprojectionErrorDirections( + const CameraModel* cam, + float voronoi_integer_inv_scaling, + const voronoi_diagram& vd, + const vector& v_reprojection_errors, + const vector>& v_points, + Image* direction_image, + Image* direction_raw) { + VisualizeReprojectionErrorDirections( + cam->width(), + cam->height(), + voronoi_integer_inv_scaling, + vd, + v_reprojection_errors, + v_points, + direction_image, + direction_raw); } struct ReprojectionMagnitudeColorComputer { @@ -580,26 +650,74 @@ struct ReprojectionMagnitudeColorComputer { double factor = std::min(1., reprojection_error.norm() / max_error); return Vec3f(255.99f * factor, 255.99f * (1 - factor), 0); } + static Vec3u8 cast(Vec3f const& val) { + return (val + Vec3f::Constant(0.5f)).cwiseMax(Vec3f::Zero()).cwiseMin(Vec3f::Constant(255.99f)).cast(); + } double max_error; const vector& v_reprojection_errors; }; +struct ReprojectionMagnitudeComputer { + Vec3f operator() (int point_index) const { + float const length = v_reprojection_errors[point_index].norm(); + return Vec3f(length, length, length); + } + + static float cast(Vec3f const& val) { + return val.x(); + } + + const vector& v_reprojection_errors; +}; + + + void VisualizeReprojectionErrorMagnitudes( - const CameraModel* cam, + int const width, + int const height, float voronoi_integer_inv_scaling, const voronoi_diagram& vd, const vector& v_reprojection_errors, const vector>& v_points, double max_error, - Image* magnitude_image) { + Image* magnitude_image, + Image* raw_image) { RenderVoronoiDiagram( - cam->width(), cam->height(), + width, height, voronoi_integer_inv_scaling, vd, v_points, ReprojectionMagnitudeColorComputer{max_error, v_reprojection_errors}, magnitude_image); + RenderVoronoiDiagram( + width, height, + voronoi_integer_inv_scaling, + vd, + v_points, + ReprojectionMagnitudeComputer{v_reprojection_errors}, + raw_image); +} + +void VisualizeReprojectionErrorMagnitudes( + const CameraModel* cam, + float voronoi_integer_inv_scaling, + const voronoi_diagram& vd, + const vector& v_reprojection_errors, + const vector>& v_points, + double max_error, + Image* magnitude_image, + Image* raw_image) { + VisualizeReprojectionErrorMagnitudes( + cam->width(), + cam->height(), + voronoi_integer_inv_scaling, + vd, + v_reprojection_errors, + v_points, + max_error, + magnitude_image, + raw_image); } @@ -734,6 +852,42 @@ bool CreateCalibrationReportForCamera( ComputeAllReprojectionErrors( camera_index, dataset, calibration, &reprojection_error_count, &reprojection_error_sum, &reprojection_error_max, &reprojection_errors, &features); + + { + namespace rs = rapidjson; + rs::Document d; + d.SetObject(); + rs::Document::AllocatorType& allocator = d.GetAllocator(); + d.AddMember("width", rs::Value().SetInt(cam->width()), allocator); + d.AddMember("height", rs::Value().SetInt(cam->height()), allocator); + + d.AddMember("errors", rs::Value().SetArray(), allocator); + d.AddMember("features", rs::Value().SetArray(), allocator); + + rs::Value errors_j(rs::kArrayType); + for (size_t ii = 0; ii < reprojection_errors.size(); ++ii) { + rs::Value err; + err.SetArray(); + err.PushBack(rs::Value().SetDouble(reprojection_errors[ii].x()), allocator); + err.PushBack(rs::Value().SetDouble(reprojection_errors[ii].y()), allocator); + d["errors"].PushBack(err, allocator); + + rs::Value feat; + feat.SetArray(); + feat.PushBack(rs::Value().SetDouble(features[ii].x()), allocator); + feat.PushBack(rs::Value().SetDouble(features[ii].y()), allocator); + d["features"].PushBack(feat, allocator); + } + + // 3. Stringify the DOM + rs::StringBuffer buffer; + rs::Writer writer(buffer); + d.Accept(writer); + + // Output + std::ofstream out(string(base_path) + "_reprojection_errors.json"); + out << buffer.GetString() << std::endl; + } // Visualize the distribution of reprojection errors constexpr int kHistResolution = 50; // resolution of the visualization image @@ -764,18 +918,22 @@ bool CreateCalibrationReportForCamera( // Visualize the reprojection error directions Image error_direction_image; + Image error_direction_raw; VisualizeReprojectionErrorDirections( cam, voronoi_integer_inv_scaling, vd, v_reprojection_errors, v_points, - &error_direction_image); + &error_direction_image, + &error_direction_raw); error_direction_image.Write(string(base_path) + "_error_directions.png"); - + error_direction_raw.Write(string(base_path) + "_error_directions.flo"); + // Visualize the reprojection error magnitudes constexpr double max_error_in_px = 0.5; // maximum error that can be distinguished in this visualization, in pixels Image error_magnitude_image; + Image error_magnitude_raw; VisualizeReprojectionErrorMagnitudes( cam, voronoi_integer_inv_scaling, @@ -783,9 +941,11 @@ bool CreateCalibrationReportForCamera( v_reprojection_errors, v_points, max_error_in_px, - &error_magnitude_image); + &error_magnitude_image, + &error_magnitude_raw); error_magnitude_image.Write(string(base_path) + "_error_magnitudes.png"); - + error_magnitude_raw.Write(string(base_path) + "_error_magnitudes"); + // Compute the biasedness measure double biasedness = ComputeBiasedness(cam, reprojection_errors, features); @@ -984,6 +1144,284 @@ bool CreateCalibrationReportForCamera( return true; } +bool CreateCalibrationReportForData(const char* base_path, + const int camera_index, + int const width, + int const height, + const vector &reprojection_errors, + const vector &features) { + // Create the report directory if it does not exist yet + QFileInfo(base_path).dir().mkpath("."); + + // Compute all reprojection errors as input for the corresponding visualizations + usize reprojection_error_count = reprojection_errors.size(); + double reprojection_error_sum = 0.; + double reprojection_error_max = 0; + for (Vec2d const& e : reprojection_errors) { + double const length = e.norm(); + reprojection_error_max = std::max(length, reprojection_error_max); + reprojection_error_sum += length; + } + + // Visualize the distribution of reprojection errors + constexpr int kHistResolution = 50; // resolution of the visualization image + constexpr double kHistExtent = 0.2f; // visualized reprojection error extent in pixels + Image hist_image; + ComputeReprojectionErrorHistogram( + kHistResolution, kHistExtent, reprojection_errors, &hist_image); + double max_hist_entry = 0; + for (u32 y = 0; y < hist_image.height(); ++ y) { + for (u32 x = 0; x < hist_image.width(); ++ x) { + max_hist_entry = std::max(max_hist_entry, hist_image(x, y)); + } + } + Image hist_image_u8(hist_image.width(), hist_image.height()); + for (u32 y = 0; y < hist_image.height(); ++ y) { + for (u32 x = 0; x < hist_image.width(); ++ x) { + hist_image_u8(x, y) = hist_image(x, y) * 255.99f / max_hist_entry; + } + } + hist_image_u8.Write(string(base_path) + "_errors_histogram.png"); + + // Create a Voronoi diagram of the feature locations for the following visualizations + float voronoi_integer_inv_scaling; + voronoi_diagram vd; + vector v_reprojection_errors; + vector> v_points; + CreateVoronoiDiagram(width, height, reprojection_errors, features, &voronoi_integer_inv_scaling, &vd, &v_reprojection_errors, &v_points); + + // Visualize the reprojection error directions + Image error_direction_image; + Image error_direction_raw; + VisualizeReprojectionErrorDirections( + width, + height, + voronoi_integer_inv_scaling, + vd, + v_reprojection_errors, + v_points, + &error_direction_image, + &error_direction_raw); + error_direction_image.Write(string(base_path) + "_error_directions.png"); + error_direction_raw.Write(string(base_path) + "_error_directions.flo"); + + // Visualize the reprojection error magnitudes + constexpr double max_error_in_px = 0.5; // maximum error that can be distinguished in this visualization, in pixels + Image error_magnitude_image; + Image error_magnitude_raw; + VisualizeReprojectionErrorMagnitudes( + width, + height, + voronoi_integer_inv_scaling, + vd, + v_reprojection_errors, + v_points, + max_error_in_px, + &error_magnitude_image, + &error_magnitude_raw); + error_magnitude_image.Write(string(base_path) + "_error_magnitudes.png"); + error_magnitude_raw.Write(string(base_path) + "_error_magnitudes"); + +#if 0 + // Compute the biasedness measure + double biasedness = ComputeBiasedness(cam, reprojection_errors, features); + + // Compute the (approximate) field-of-view + double horizontal_fov; + double vertical_fov; + ComputeApproximateFOV(cam, &horizontal_fov, &vertical_fov); + + // Write the info file + int num_localized_images = 0; + for (usize i = 0; i < calibration.image_used.size(); ++ i) { + if (calibration.image_used[i]) { + ++ num_localized_images; + } + } + WriteReportInfoFile( + string(base_path) + "_info.txt", + cam, + horizontal_fov, + vertical_fov, + dataset.ImagesetCount(), + num_localized_images, + reprojection_errors, + reprojection_error_count, + reprojection_error_sum, + reprojection_error_max, + biasedness, + kHistExtent, + max_error_in_px); + + // Additional model-specific visualizations + if (const CentralGenericModel* cgb_model = dynamic_cast(cam)) { + // TODO: Implement this in a generic way for all other grid-based cameras as well + Image grid_point_image(cam->width(), cam->height()); + grid_point_image.SetTo(Vec3u8(0, 0, 0)); + + for (u32 y = 0; y < cgb_model->grid().height(); ++ y) { + for (u32 x = 0; x < cgb_model->grid().width(); ++ x) { + Vec2d pixel = cgb_model->GridPointToPixelCornerConv(x, y); + int px = pixel.x(); + int py = pixel.y(); + if (px >= 0 && py >= 0 && px < grid_point_image.width() && py < grid_point_image.height()) { + grid_point_image(px, py) = Vec3u8(255, 255, 255); + } + } + } + + std::ostringstream filename6; + filename6 << string(base_path) << "_grid_point_locations.png"; + grid_point_image.Write(filename6.str()); + } else if (const NoncentralGenericModel* ngbsp_model = dynamic_cast(cam)) { + // Find the point that is as close to each of the rays as possible. For + // central cameras, this will be the optical center. + CenterPointCostFunction center_point_cost; + int calibrated_area = (cam->calibration_max_y() - cam->calibration_min_y() + 1) * + (cam->calibration_max_x() - cam->calibration_min_x() + 1); + center_point_cost.lines.reserve(calibrated_area); + center_point_cost.tangents.reserve(calibrated_area); + for (int y = cam->calibration_min_y(); y <= cam->calibration_max_y(); ++ y) { + for (int x = cam->calibration_min_x(); x <= cam->calibration_max_x(); ++ x) { + Line3d line; + if (ngbsp_model->Unproject(x + 0.5f, y + 0.5f, &line)) { + center_point_cost.lines.emplace_back(line); + center_point_cost.tangents.emplace_back(); + ComputeTangentsForDirectionOrLine(line.direction(), ¢er_point_cost.tangents.back()); + } + } + } + + Vec3d center = Vec3d::Zero(); + LMOptimizer optimizer; + optimizer.Optimize( + ¢er, + center_point_cost, + /*max_iteration_count*/ 100, + /*max_lm_attempts*/ 10, + /*init_lambda*/ -1, + /*init_lambda_factor*/ 0.001f, + /*print_progress*/ false); + + // Get statistics on the distances between the center and the rays + Image line_offsets(cam->width(), cam->height()); + line_offsets.SetTo(Vec3d::Constant(numeric_limits::quiet_NaN())); + + double line_distance_sum = 0; + usize line_distance_count = 0; + double line_distance_max = 0; + vector line_distances; + + double max_line_offset_extent = 0; + + for (int y = cam->calibration_min_y(); y <= cam->calibration_max_y(); ++ y) { + for (int x = cam->calibration_min_x(); x <= cam->calibration_max_x(); ++ x) { + Line3d line; + if (ngbsp_model->Unproject(x + 0.5f, y + 0.5f, &line)) { + // Find the closest point on the line to the previously determined center point + double parameter = line.direction().dot(center - line.origin()); + Vec3d closest_point_on_line = line.origin() + parameter * line.direction(); + + Vec3d line_offset = closest_point_on_line - center; + line_offsets(x, y) = line_offset; + + double line_distance = line_offset.norm(); + line_distance_sum += line_distance; + ++ line_distance_count; + line_distance_max = std::max(line_distance_max, line_distance); + line_distances.push_back(line_distance); + + max_line_offset_extent = std::max(max_line_offset_extent, fabs(line_offset.x())); + max_line_offset_extent = std::max(max_line_offset_extent, fabs(line_offset.y())); + max_line_offset_extent = std::max(max_line_offset_extent, fabs(line_offset.z())); + } + } + } + + // stream << "line_distance_count : " << line_distance_count << std::endl; + // if (!line_distances.empty()) { + // std::sort(line_distances.begin(), line_distances.end()); + // stream << "line_distance_median : " << line_distances[line_distances.size() / 2] << std::endl; + // } + // stream << "line_distance_average : " << (line_distance_sum / line_distance_count) << std::endl; + // stream << "line_distance_maximum : " << line_distance_max << std::endl; + // stream << "" << std::endl; + + Image line_offset_visualization(cam->width(), cam->height()); + for (int y = 0; y < cam->height(); ++ y) { + for (int x = 0; x < cam->width(); ++ x) { + const Vec3d& line_offset = line_offsets(x, y); + if (line_offset.hasNaN()) { + line_offset_visualization(x, y) = Vec3u8(0, 0, 0); + } else { + line_offset_visualization(x, y) = Vec3u8( + 127 + 127 * line_offset.x() / max_line_offset_extent, + 127 + 127 * line_offset.y() / max_line_offset_extent, + 127 + 127 * line_offset.z() / max_line_offset_extent); + } + } + } + + std::ostringstream filename5; + filename5 << string(base_path) << "_line_offsets.png"; + line_offset_visualization.Write(filename5.str()); + + // Output a 3D model of the camera as an .obj file + constexpr int kStepForOBJ = 20; + ofstream obj_stream(string(base_path) + "_line_visualization.obj", std::ios::out); + ofstream obj_stream_cutoff(string(base_path) + "_line_visualization_cutoff.obj", std::ios::out); + ofstream obj_stream_origins(string(base_path) + "_line_visualization_origins.obj", std::ios::out); + if (!obj_stream || !obj_stream_cutoff || !obj_stream_origins) { + return false; + } + obj_stream << std::setprecision(14); + obj_stream_cutoff << std::setprecision(14); + obj_stream_origins << std::setprecision(14); + + int visualized_line_count = 0; + for (int y = cam->calibration_min_y(); y <= cam->calibration_max_y(); y += kStepForOBJ) { + for (int x = cam->calibration_min_x(); x <= cam->calibration_max_x(); x += kStepForOBJ) { + Line3d line; + if (ngbsp_model->Unproject(x + 0.5f, y + 0.5f, &line)) { + // Show the range of the line that is centered on the point on the line + // that is closest to the center point defined above, and has a certain + // extent in both directions. + double parameter = line.direction().dot(center - line.origin()); + Vec3d closest_point_on_line = line.origin() + parameter * line.direction(); + + // Visualized line segments are at least 2 * 10 meters long + double visualization_line_half_extent = std::max(10, 10 * (closest_point_on_line - center).norm()); + + Vec3d point_a = closest_point_on_line + visualization_line_half_extent * line.direction(); + obj_stream << "v " << point_a.x() << " " << point_a.y() << " " << point_a.z() << std::endl; + Vec3d point_b = closest_point_on_line - visualization_line_half_extent * line.direction(); + obj_stream << "v " << point_b.x() << " " << point_b.y() << " " << point_b.z() << std::endl; + + obj_stream_cutoff << "v " << point_a.x() << " " << point_a.y() << " " << point_a.z() << std::endl; + obj_stream_cutoff << "v " << closest_point_on_line.x() << " " << closest_point_on_line.y() << " " << closest_point_on_line.z() << std::endl; + + obj_stream_origins << "v " << point_a.x() << " " << point_a.y() << " " << point_a.z() << std::endl; + obj_stream_origins << "v " << point_b.x() << " " << point_b.y() << " " << point_b.z() << std::endl; + obj_stream_origins << "v " << line.origin().x() << " " << line.origin().y() << " " << line.origin().z() << std::endl; + + ++ visualized_line_count; + } + } + } + + int vertex_index = 1; // vertex indexing starts at 1 in .obj files + for (int i = 0; i < visualized_line_count; ++ i) { + obj_stream << "l " << vertex_index << " " << (vertex_index + 1) << std::endl; + obj_stream_cutoff << "l " << vertex_index << " " << (vertex_index + 1) << std::endl; + obj_stream_origins << "l " << (3 * (vertex_index / 2) + 1) << " " << (3 * (vertex_index / 2) + 2) << std::endl; + vertex_index += 2; + } + } +#endif + + return true; +} + void CreateReprojectionErrorHistogram( int camera_index, const Dataset& dataset, diff --git a/applications/camera_calibration/src/camera_calibration/calibration_report.h b/applications/camera_calibration/src/camera_calibration/calibration_report.h index 8dd7855..3820cb3 100644 --- a/applications/camera_calibration/src/camera_calibration/calibration_report.h +++ b/applications/camera_calibration/src/camera_calibration/calibration_report.h @@ -48,12 +48,21 @@ int CreateCalibrationReport( const Dataset& dataset, const BAState& calibration, const string& report_base_path); + bool CreateCalibrationReportForCamera( const char* base_path, int camera_index, const Dataset& dataset, const BAState& calibration); +bool CreateCalibrationReportForData( + const char* base_path, + int camera_index, + const int width, + const int height, + vector const& reprojection_errors, + vector const& features); + void CreateReprojectionErrorHistogram( int camera_index, const Dataset& dataset, diff --git a/applications/camera_calibration/src/camera_calibration/io/calibration_io.cc b/applications/camera_calibration/src/camera_calibration/io/calibration_io.cc index 7236ba0..8430b40 100644 --- a/applications/camera_calibration/src/camera_calibration/io/calibration_io.cc +++ b/applications/camera_calibration/src/camera_calibration/io/calibration_io.cc @@ -45,6 +45,7 @@ #include "camera_calibration/models/central_radial.h" #include "camera_calibration/models/central_thin_prism_fisheye.h" #include "camera_calibration/models/noncentral_generic.h" +#include "camera_calibration/feature_detection/feature_detector_tagged_pattern.h" namespace vis { @@ -134,6 +135,124 @@ bool SaveDataset(const char* path, const Dataset& dataset) { return true; } +std::string strip_slashes(std::string const& src) { + std::string result; + for (char c : src) { + if (c == '/') { + result += '-'; + } + else { + result += c; + } + } + return result; +} + +bool SaveDatasetAndState(const char* path, const Dataset& dataset, const BAState& state) { + QFileInfo(path).dir().mkpath("."); + + FILE* file = fopen(path, "wb"); + if (!file) { + return false; + } + + // File format identifier + u8 header[10]; + header[0] = 'c'; + header[1] = 'a'; + header[2] = 'l'; + header[3] = 'i'; + header[4] = 'b'; + header[5] = '_'; + header[6] = 'd'; + header[7] = 'a'; + header[8] = 't'; + header[9] = 'a'; + fwrite(header, 1, 10, file); + + // File format version + const u32 version = 0; + write_one(&version, file); + + // Cameras + u32 num_cameras = dataset.num_cameras(); + write_one(&num_cameras, file); + for (int camera_index = 0; camera_index < dataset.num_cameras(); ++ camera_index) { + const Vec2i& image_size = dataset.GetImageSize(camera_index); + u32 width = image_size.x(); + write_one(&width, file); + u32 height = image_size.y(); + write_one(&height, file); + } + + // Imagesets + u32 num_imagesets = dataset.ImagesetCount(); + write_one(&num_imagesets, file); + for (int imageset_index = 0; imageset_index < dataset.ImagesetCount(); ++ imageset_index) { + shared_ptr imageset = dataset.GetImageset(imageset_index); + + const string& filename = imageset->GetFilename(); + u32 filename_len = filename.size(); + write_one(&filename_len, file); + fwrite(filename.data(), 1, filename_len, file); + + for (int camera_index = 0; camera_index < dataset.num_cameras(); ++ camera_index) { + const vector& features = imageset->FeaturesOfCamera(camera_index); + + const SE3d& image_tr_global = state.camera_tr_rig[camera_index] * state.rig_tr_global[imageset_index]; + Quaterniond image_q_global = image_tr_global.unit_quaternion(); + Mat3d image_r_global = image_tr_global.rotationMatrix().cast(); + const Vec3d& image_t_global = image_tr_global.translation(); + + std::string const points_filename = std::string(path) + "-" + strip_slashes(filename) + "-" + std::to_string(camera_index) + ".yaml"; + std::ofstream points_file(points_filename); + points_file << "%YAML:1.0" << std::endl + << "---" << std::endl + << "correspondences:" << std::endl; + + LOG(INFO) << "Saving points at " << points_filename; + + u32 num_features = features.size(); + write_one(&num_features, file); + for (const PointFeature& feature : features) { + write_one(&feature.xy.x(), file); + write_one(&feature.xy.y(), file); + i32 id_32bit = feature.id; + write_one(&id_32bit, file); + + const Vec3d& point = state.points[feature.index]; + Vec3d local_point = image_r_global * point + image_t_global; + points_file << " -" << std::endl + << " id: " << feature.id << std::endl + << " point: [" << local_point[0] << ", " << local_point[1] << ", " << local_point[1] << "]" << std::endl; + } + } + } + + // Known geometries + u32 num_known_geometries = dataset.KnownGeometriesCount(); + write_one(&num_known_geometries, file); + for (int geometry_index = 0; geometry_index < dataset.KnownGeometriesCount(); ++ geometry_index) { + const KnownGeometry& geometry = dataset.GetKnownGeometry(geometry_index); + + write_one(&geometry.cell_length_in_meters, file); + + u32 feature_id_to_position_size = geometry.feature_id_to_position.size(); + write_one(&feature_id_to_position_size, file); + for (const std::pair& item : geometry.feature_id_to_position) { + i32 id_32bit = item.first; + write_one(&id_32bit, file); + i32 x_32bit = item.second.x(); + write_one(&x_32bit, file); + i32 y_32bit = item.second.y(); + write_one(&y_32bit, file); + } + } + + fclose(file); + return true; +} + bool LoadDataset(const char* path, Dataset* dataset) { FILE* file = fopen(path, "rb"); if (!file) { @@ -173,6 +292,7 @@ bool LoadDataset(const char* path, Dataset* dataset) { // Cameras u32 num_cameras; read_one(&num_cameras, file); + LOG(INFO) << "Number of cameras: " << num_cameras; dataset->Reset(num_cameras); for (int camera_index = 0; camera_index < num_cameras; ++ camera_index) { u32 width; @@ -180,12 +300,14 @@ bool LoadDataset(const char* path, Dataset* dataset) { u32 height; read_one(&height, file); dataset->SetImageSize(camera_index, Vec2i(width, height)); + LOG(INFO) << "Width and height of camera #: " << camera_index << " " << width << ", " << height; } // Imagesets u32 num_imagesets; u32 total_num_features = 0; read_one(&num_imagesets, file); + LOG(INFO) << "Number of imagesets: " << num_imagesets; for (int imageset_index = 0; imageset_index < num_imagesets; ++ imageset_index) { shared_ptr new_imageset = dataset->NewImageset(); @@ -214,6 +336,7 @@ bool LoadDataset(const char* path, Dataset* dataset) { read_one(&id_32bit, file); feature.id = id_32bit; } + LOG(INFO) << "Number of features in image: " << filename << ": " << num_features; } } @@ -221,6 +344,7 @@ bool LoadDataset(const char* path, Dataset* dataset) { u32 num_known_geometries; read_one(&num_known_geometries, file); dataset->SetKnownGeometriesCount(num_known_geometries); + LOG(INFO) << "Number of known geometries: " << num_known_geometries; for (u32 geometry_index = 0; geometry_index < num_known_geometries; ++ geometry_index) { KnownGeometry& geometry = dataset->GetKnownGeometry(geometry_index); @@ -915,7 +1039,19 @@ bool SavePointsAndIndexMapping( stream << " - feature_id: " << item.first << std::endl; stream << " point_index: " << item.second << std::endl; } - + + unordered_map* feature_id_to_coord; + /* + FeatureDetectorTaggedPattern().GetCorners( + 0, + & feature_id_to_coord); + //*/ + + for (const auto& item : calibration.feature_id_to_points_index) { + stream << " - feature_id: " << item.first << std::endl; + stream << " point_index: " << item.second << std::endl; + } + stream.close(); // TODO: Should be in a separate function. diff --git a/applications/camera_calibration/src/camera_calibration/io/calibration_io.h b/applications/camera_calibration/src/camera_calibration/io/calibration_io.h index 0e713cc..7ff2483 100644 --- a/applications/camera_calibration/src/camera_calibration/io/calibration_io.h +++ b/applications/camera_calibration/src/camera_calibration/io/calibration_io.h @@ -50,6 +50,9 @@ bool SaveDataset( const char* path, const Dataset& dataset); +bool SaveDatasetAndState(const char* path, const Dataset& dataset, const BAState& state); + + /// Tries to load a dataset from the given path. Returns true if successful. bool LoadDataset( const char* path, diff --git a/applications/camera_calibration/src/camera_calibration/main.cc b/applications/camera_calibration/src/camera_calibration/main.cc index 2f872e7..9a4e229 100644 --- a/applications/camera_calibration/src/camera_calibration/main.cc +++ b/applications/camera_calibration/src/camera_calibration/main.cc @@ -555,6 +555,7 @@ int LIBVIS_QT_MAIN(int argc, char** argv) { dataset_yaml_file << "- camera: \"" << inputs[i]->display_text.toStdString() << "\"" << endl; dataset_yaml_file << " path: \"" << image_record_directories[i].dirName().toStdString() << "\"" << endl; } + LOG(INFO) << "Wrote dataset YAML file at: " << dataset_yaml_path; } else { LOG(ERROR) << "Failed to write dataset YAML file at: " << dataset_yaml_path; } @@ -591,7 +592,9 @@ int LIBVIS_QT_MAIN(int argc, char** argv) { if (settings_window.SaveDatasetOnExit() || settings_window.show_pattern_clicked()) { QDir record_directory = QDir(settings_window.RecordDirectory()); record_directory.mkpath("."); - SaveDataset(record_directory.absoluteFilePath("features.bin").toStdString().c_str(), dataset); + std::string const dataset_path = record_directory.absoluteFilePath("features.bin").toStdString(); + SaveDataset(dataset_path.c_str(), dataset); + LOG(INFO) << "Saved dataset at " << dataset_path; } delete main_window; diff --git a/applications/camera_calibration/src/camera_calibration/report_from_data.cc b/applications/camera_calibration/src/camera_calibration/report_from_data.cc new file mode 100644 index 0000000..d5e83df --- /dev/null +++ b/applications/camera_calibration/src/camera_calibration/report_from_data.cc @@ -0,0 +1,128 @@ +// Copyright 2019 ETH Zürich, Thomas Schöps +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#undef NDEBUG +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "camera_calibration/bundle_adjustment/ba_state.h" +#include "camera_calibration/bundle_adjustment/joint_optimization.h" +#include "camera_calibration/calibration.h" +#include "camera_calibration/calibration_report.h" +#include "camera_calibration/feature_detection/feature_detector_tagged_pattern.h" +#include "camera_calibration/fitting_report.h" +#include "camera_calibration/io/calibration_io.h" +#include "camera_calibration/image_input/image_input_realsense.h" +#include "camera_calibration/image_input/image_input_v4l2.h" +#include "camera_calibration/models/all_models.h" +#include "camera_calibration/tools/tools.h" +#include "camera_calibration/ui/calibration_window.h" +#include "camera_calibration/ui/live_image_consumer.h" +#include "camera_calibration/ui/main_window.h" +#include "camera_calibration/ui/pattern_display.h" +#include "camera_calibration/ui/settings_window.h" +#include "camera_calibration/util.h" + +using namespace vis; + + +Q_DECLARE_METATYPE(QSharedPointer>) + +namespace rs = rapidjson; + +void runReport(std::string const& input_filename) { + ifstream ifs(input_filename); + rapidjson::IStreamWrapper isw(ifs); + + rapidjson::Document d; + d.ParseStream(isw); + + int const width = d["width"].GetInt(); + int const height = d["height"].GetInt(); + + vector errors; + vector features; + + assert(d["errors"].IsArray()); + assert(d["features"].IsArray()); + + rs::Value errors_j = d["errors"].GetArray(); + + rs::Value features_j = d["features"].GetArray(); + + LOG(INFO) << "File " << input_filename << " has width/height " << width << ", " << height << " and " << errors_j.Size() << " errors"; + + assert(errors_j.Size() > 0); + assert(errors_j.Size() == features_j.Size()); + + for (size_t ii = 0; ii < errors_j.Size(); ++ii) { + assert(errors_j[ii].IsArray()); + assert(features_j[ii].IsArray()); + assert(errors_j[ii].Size() == 2); + assert(features_j[ii].Size() == 2); + + errors.push_back(Vec2d(errors_j[ii][0].GetDouble(), errors_j[ii][1].GetDouble())); + features.push_back(Vec2f(features_j[ii][0].GetDouble(), features_j[ii][1].GetDouble())); + } + + LOG(INFO) << "Read all errors and features"; + + CreateCalibrationReportForData((input_filename + "_report").c_str(), 0, width, height, errors, features); + LOG(INFO) << "Finished writing report"; +} + +int LIBVIS_QT_MAIN(int argc, char** argv) { + qRegisterMetaType>>(); + srand(0); + + for (int ii = 1; ii < argc; ++ii) { + runReport(argv[ii]); + } + + return EXIT_SUCCESS; +} diff --git a/applications/camera_calibration/src/camera_calibration/tools/bundle_adjustment.cc b/applications/camera_calibration/src/camera_calibration/tools/bundle_adjustment.cc index d26cca9..404a789 100644 --- a/applications/camera_calibration/src/camera_calibration/tools/bundle_adjustment.cc +++ b/applications/camera_calibration/src/camera_calibration/tools/bundle_adjustment.cc @@ -203,7 +203,8 @@ int BundleAdjustment(const string& state_directory, const string& model_input_di // Save the optimization state after each iteration SaveBAState(model_output_directory.c_str(), state); - + SaveDatasetAndState(model_output_directory.c_str(), dataset, state); + // Save the final cost std::ofstream cost_stream((boost::filesystem::path(model_output_directory) / "cost.txt").string().c_str(), std::ios::out); if (cost_stream) { diff --git a/libvis/src/libvis/image.cc b/libvis/src/libvis/image.cc index b052747..6ab0023 100644 --- a/libvis/src/libvis/image.cc +++ b/libvis/src/libvis/image.cc @@ -26,6 +26,7 @@ // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. +#include #include "libvis/image.h" diff --git a/libvis/src/libvis/image.h b/libvis/src/libvis/image.h index 5fa6cbf..f507e06 100644 --- a/libvis/src/libvis/image.h +++ b/libvis/src/libvis/image.h @@ -35,6 +35,7 @@ #include #endif +#include #include "libvis/logging.h" #include "libvis/eigen.h" @@ -1769,6 +1770,10 @@ bool Image::Write(const string& image_file_name) const; template<> bool Image::Write(const string& image_file_name) const; template<> +bool Image::Write(const string& image_file_name) const; +template<> +bool Image::Write(const string& image_file_name) const; +template<> bool Image::Read(const string& image_file_name); template<> bool Image::Read(const string& image_file_name); diff --git a/libvis/src/libvis/image_template_specializations.h b/libvis/src/libvis/image_template_specializations.h index 4c2e45d..2e52f91 100644 --- a/libvis/src/libvis/image_template_specializations.h +++ b/libvis/src/libvis/image_template_specializations.h @@ -40,6 +40,66 @@ bool Image::Write(const string& image_file_name) const { return io->Write(image_file_name, *this); } +template<> +bool Image::Write(const string& image_file_name) const { + std::ofstream out(image_file_name + ".data"); + if (!out.good()) { + return false; + } + for(int xx = 0; xx < width(); ++xx) { + for (int yy = 0; yy < height(); ++yy) { + out << xx << "\t" << yy << "\t" << operator()(xx,yy) << std::endl; + if (!out.good()) { + return false; + } + } + out << std::endl; + } + std::ofstream gnuplot(image_file_name + ".gpl"); + gnuplot << "set term svg enhanced background rgb 'white';\n" + << "set output '" << image_file_name << ".svg';\n" + << "set view equal xy;\n" + << "set xrange [0:" << width() << "];\n" + << "set yrange [0:" << height() << "];\n" + << "set xtics out;\n" + << "set ytics out;\n" + << "set title 'Reprojection error magnitude [px]';\n" + << "plot '" << image_file_name << ".data' with image notitle;\n"; + + return true; +} + +template<> +bool Image::Write(const string& image_file_name) const { + std::ofstream out(image_file_name, std::ofstream::binary); + if (!out.good()) + return false; + + int const width = this->height(); + int const height = this->width(); + + out.write("PIEH", 4); + out.write(reinterpret_cast(&height), sizeof(height)); + out.write(reinterpret_cast(&width), sizeof(width)); + if (!out.good()) + return false; + + for (int row = 0; row < width; row++ ) { + for (int col = 0; col < height; ++col) { + float const x = operator()(col, row).x(); + float const y = operator()(col, row).y(); + out.write(reinterpret_cast(&x), sizeof x); + out.write(reinterpret_cast(&y), sizeof y); + if (!out.good()) { + return false; + } + } + } + out.close(); + return true; +} + + template<> bool Image::Read(const string& image_file_name) { ImageIO* io = ImageIORegistry::Instance()->Get(image_file_name);