diff --git a/CMakeLists.txt b/CMakeLists.txt index 93e76e1..9f0dcd6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,6 +35,8 @@ set(Boost_USE_MULTITHREADED ON) set(Boost_USE_STATIC_RUNTIME OFF) set(Boost_NO_BOOST_CMAKE ON) set(Boost_ADDITIONAL_VERSIONS "1.48" "1.48.0") +set(Boost_INCLUDE_DIR $ENV{Boost_INCLUDE_DIR}) + find_package( Boost COMPONENTS thread chrono random date_time system REQUIRED ) message(STATUS "Boost information: BOOST_ROOT = $ENV{BOOST_ROOT}") message(STATUS "Boost_INCLUDE_DIRS: ${Boost_INCLUDE_DIRS}") @@ -42,7 +44,7 @@ message(STATUS "Boost_LIBRARY_DIRS: ${Boost_LIBRARY_DIRS}") if (WIN32) find_package( NP_CAMERASDK ) - add_definitions(${Boost_LIB_DIAGNOSTIC_DEFINITIONS}) + #add_definitions(${Boost_LIB_DIAGNOSTIC_DEFINITIONS}) include_directories($ENV{MSMPI_INC}) endif() diff --git a/apps/lptOptitrack/optitrackcams.cpp b/apps/lptOptitrack/optitrackcams.cpp index 85b10f3..bbc5262 100644 --- a/apps/lptOptitrack/optitrackcams.cpp +++ b/apps/lptOptitrack/optitrackcams.cpp @@ -28,8 +28,8 @@ int main(int argc, char** argv) { string input = (argc > 1 ? argv[1] : "../../../data/input/"); string output = (argc > 2 ? argv[2] : "../../../data/output/"); - string cameras_file = input + "cameras.yaml"; //"../../../data/pivstd/4cams/cameras.yaml"; // - string camera_pairs_file = input + "camera_pairs.yaml"; //"../../../data/pivstd/4cams/camera_pairs.yaml"; // + string cameras_file = input + "8_cameras.yaml"; //"../../../data/pivstd/4cams/cameras.yaml"; // + string camera_pairs_file = input + "8_pairs.yaml"; //"../../../data/pivstd/4cams/camera_pairs.yaml"; // lpt::StreamingPipeline pipeline; pipeline.setQueueCapacity(1000); @@ -46,11 +46,11 @@ int main(int argc, char** argv) { auto matcher = lpt::PointMatcher::create(); matcher->params.match_threshold = 2.0; - matcher->params.match_threshold = 20; + // matcher->params.match_thresh_level = 20; auto matcher_cuda = lpt::PointMatcherCUDA::create(); - matcher_cuda->params.match_threshold = 2.0; //pixels - matcher_cuda->params.match_thresh_level = 20; + // matcher_cuda->params.match_threshold = 5.0; //pixels + // matcher_cuda->params.match_thresh_level = 20; auto tracker = lpt::Tracker::create(); tracker->setCostCalculator(lpt::CostMinimumAcceleration::create()); @@ -58,24 +58,23 @@ int main(int argc, char** argv) { tracker->params.min_radius_level = 4; tracker->params.max_radius = 25.0; //mm tracker->params.max_radius_level = 25; - tracker->params.KF_sigma_a = 1E-5; - tracker->params.KF_sigma_z = 1E-1; + tracker->params.KF_sigma_a = 2.75E-4; + tracker->params.KF_sigma_z = 1E-2; bool KalmanFilter = false; auto visualizer = lpt::Visualizer::create(); - double grid_side_length = 400; // mm - double grid_width = 150; // mm - int cell_counts[3] = {40, 15, 15}; - visualizer->getVolumeGrid()->setGridOrigin(-145, 33, -105); // nozzle exit: (X, Y, Z) = (105, -165, -100); - //visualizer->getVolumeGrid()->setGridOrigin(14, -23, -80); + double grid_size[3] = {2032, 1270, 1270}; // mm + double grid_width = 105; // mm + int cell_counts[3] = {21,11,11};//{51, 31, 31}; + visualizer->getVolumeGrid()->setGridOrigin(-grid_size[0]/2, 584, -grid_size[2]/2); + //visualizer->getVolumeGrid()->setGridOrigin(150,0,0); visualizer->getVolumeGrid()->setGridCellCounts(cell_counts[0], cell_counts[1], cell_counts[2]); - //visualizer->getVolumeGrid()->setGridDimensions( cell_counts[0]*grid_side_length/cell_counts[2], grid_side_length, grid_side_length); // mm - visualizer->getVolumeGrid()->setGridDimensions(grid_side_length, grid_width, grid_width); - //visualizer->getVolumeGrid()->setGridDimensions(200, 200, 100); + visualizer->getVolumeGrid()->setGridDimensions(grid_size[0], grid_size[1], grid_size[2]); pipeline.setInputDataPath(input); pipeline.setOutputDataPath(output); + pipeline.load_Rotation_Matrix(); pipeline.setKalmanFilter(KalmanFilter); pipeline.attachCameraSystem(camera_system); pipeline.attachImageProcessor(processor); diff --git a/apps/lptVirtual/virtualcams.cpp b/apps/lptVirtual/virtualcams.cpp index c366d8b..91d8197 100644 --- a/apps/lptVirtual/virtualcams.cpp +++ b/apps/lptVirtual/virtualcams.cpp @@ -51,7 +51,7 @@ int main(int argc, char** argv) { tracker->params.KF_sigma_a = 1E-5; tracker->params.KF_sigma_z = 1E-1; - bool KalmanFilter = true; + bool KalmanFilter = false; lpt::Visualizer::Ptr visualizer = lpt::Visualizer::create(); visualizer->getVolumeGrid()->setGridOrigin(0,0,0); diff --git a/data/input/camera_pairs.yaml b/data/input/camera_pairs.yaml deleted file mode 100644 index febd1cf..0000000 --- a/data/input/camera_pairs.yaml +++ /dev/null @@ -1,105 +0,0 @@ ---- -Camera_A_id: 0 -Camera_B_id: 1 -FundamentalMatrix: [1.89030405539283e-006, -1.04093196814789e-006, -0.00664678681710053, 8.61822894659134e-007, -1.63405914225239e-006, 0.00640004306461817, -0.00664128588984384, 0.00640540340926311, 1] -reprojection_error: 0 -epipolar_error: 0.260057216679508 -... ---- -Camera_A_id: 0 -Camera_B_id: 2 -FundamentalMatrix: [-6.0656236586958e-009, 5.1820556334126e-006, -0.00128470793187639, 5.53168908944493e-006, -2.89333652794565e-007, -0.0235391346045362, -0.00132212307257798, 0.0183604160275295, 1] -reprojection_error: 0 -epipolar_error: 0.308008367365057 -... ---- -Camera_A_id: 0 -Camera_B_id: 3 -FundamentalMatrix: [3.9709527872069e-007, 1.25999420132836e-007, -0.000966335176201363, 1.44784980512211e-007, -3.28362728993861e-007, -0.0010026046745834, -0.000898118308273448, -0.000967410557792806, 1] -reprojection_error: 0 -epipolar_error: 0.521354328502308 -... ---- -Camera_A_id: 0 -Camera_B_id: 4 -FundamentalMatrix: [4.7603240750977e-006, -2.89981776430465e-006, -0.0163731568266823, -4.11269066503218e-006, -4.04851598846788e-006, -0.00467891044404947, 0.013162450250859, 0.00809175937620534, 1] -reprojection_error: 0 -epipolar_error: 0.281445589932528 -... ---- -Camera_A_id: 0 -Camera_B_id: 5 -FundamentalMatrix: [3.59175896256722e-008, -2.65943244469773e-007, -0.00152209006221425, -1.99312256858501e-007, -2.42378266282531e-008, -5.66143684377367e-005, -0.00152143705623287, 6.76292726820549e-005, 1] -reprojection_error: 0 -epipolar_error: 0.222297668457031 -... ---- -Camera_A_id: 1 -Camera_B_id: 2 -FundamentalMatrix: [-2.02900039421668e-007, 2.46797218863872e-007, -0.00104275572911341, -8.14881604542648e-008, 1.91383412308923e-007, -0.000682747219010039, -0.000900660117430427, -0.000871976065070461, 1] -reprojection_error: 0 -epipolar_error: 0.294350363991477 -... ---- -Camera_A_id: 1 -Camera_B_id: 3 -FundamentalMatrix: [1.5745751944869e-005, -2.6756841752219e-005, 0.0793817274512043, -1.45769033562185e-005, -1.33560027145896e-005, 0.0248948281769208, -0.0776991259887609, -0.0201550665481825, 1] -reprojection_error: 0 -epipolar_error: 0.146085565740412 -... ---- -Camera_A_id: 1 -Camera_B_id: 4 -FundamentalMatrix: [-8.38426116038709e-009, 6.72526448025799e-007, -0.00153506585900633, 6.08103305837923e-007, 2.35704164419011e-008, -0.000103086872923331, -0.00147216225549918, -0.000385689374383281, 1] -reprojection_error: 0 -epipolar_error: 0.205157540061257 -... ---- -Camera_A_id: 1 -Camera_B_id: 5 -FundamentalMatrix: [6.46639600062174e-006, 7.52739256195743e-006, -0.0252145047027859, 3.50376006693084e-006, -5.22393333331646e-006, 0.0084021994018159, 0.0201566346982471, -0.0100949614248565, 1] -reprojection_error: 0 -epipolar_error: 0.662028399380771 -... ---- -Camera_A_id: 2 -Camera_B_id: 3 -FundamentalMatrix: [1.07932360342697e-008, -2.06918103396111e-007, -0.00147230795211467, -2.36431321172487e-007, -1.82892905602609e-008, 0.000128304298357369, -0.00151043479227269, -9.12929171398234e-005, 1] -reprojection_error: 0 -epipolar_error: 0.16512055830522 -... ---- -Camera_A_id: 2 -Camera_B_id: 4 -FundamentalMatrix: [1.57819667517063e-006, 1.76671985404789e-006, 0.0070040913044465, 2.70496429640547e-006, -1.12111871789569e-006, -0.00356866789954817, -0.00931342295225723, 0.000852862134226396, 1] -reprojection_error: 0 -epipolar_error: 0.23666693947532 -... ---- -Camera_A_id: 2 -Camera_B_id: 5 -FundamentalMatrix: [-5.8534842344974e-006, 1.57279706687389e-006, -0.00943140271360856, 1.75895366198001e-006, 4.51530772659845e-006, 0.0125271025015841, -0.0113847603022365, 0.0130130863515346, 1] -reprojection_error: 0 -epipolar_error: 1.00065844167363 -... ---- -Camera_A_id: 3 -Camera_B_id: 4 -FundamentalMatrix: [5.15263409003426e-007, -6.62603922077916e-008, -0.00276172922908678, 4.80784854992422e-007, -4.08084011971119e-007, 0.00176139197931136, -0.00291540024808448, 0.00154226743638125, 1] -reprojection_error: 0 -epipolar_error: 0.149839574640447 -... ---- -Camera_A_id: 3 -Camera_B_id: 5 -FundamentalMatrix: [-1.15290822784659e-007, 3.95174516170361e-006, -0.000902492834704301, 4.18111873154461e-006, -4.14284850967108e-008, -0.0177123337709557, -0.00155142429931453, 0.0158423949027182, 1] -reprojection_error: 0 -epipolar_error: 0.38120512528853 -... ---- -Camera_A_id: 4 -Camera_B_id: 5 -FundamentalMatrix: [-2.61769790161057e-007, 8.82681031326334e-008, -0.00076355663740375, -9.68776383777269e-008, 2.46102966728505e-007, -0.000842868085354389, -0.000810349268502695, -0.00100812886643075, 1] -reprojection_error: 0 -epipolar_error: 0.213086214932528 -... diff --git a/data/input/cameras.yaml b/data/input/cameras.yaml deleted file mode 100644 index c6cfaea..0000000 --- a/data/input/cameras.yaml +++ /dev/null @@ -1,126 +0,0 @@ ---- -CameraID: 0 -Name: "OptiTrack V120:SLIM #139494" -path: ~ -R: [-0.911489074551581, -0.0325639398692805, -0.410033238644495, -0.0522365978525739, -0.979625669728031, 0.193919790260546, -0.407993878407375, 0.198174511555083, 0.891217065675929] -r_vec_u: [0.00739567738761859, 0.00601688105371291, 0.0023495036656014] -T: [-34.568194316511, 117.812207061767, 1186.26285658949] -T_u: [0.144840926928849, 0.0767025980983719, 1.13354906466067] -f: [1296.17461562046, 1296.17461562046] -f_u: [4.28273112038165, 4.28273112038165] -c: [461.061058504711, 225.550754929627] -c_u: [6.74900376570603e-014, 6.92381107270621e-014] -distortion: [-0.30895955665299, 0.118025224039585, -0.00347342661517302, -0.00488269391608535] -distortion_uncertainty: [0.00485657762132341, 0.0204466338633594, 0.000259932365213406, 0.000555166218282362] -centriod_loc_uncertainty: 0.1 -sensor_size: [3.84, 2.88] -pixel_size: [0.006, 0.006] -avg_reprojection_error: 0 -imagelist: - [] -... ---- -CameraID: 1 -Name: "OptiTrack V120:SLIM #139492" -path: ~ -R: [0.996977423838771, -0.0646772138721925, 0.0430450271406423, 0.0776423639873358, 0.849202578444955, -0.522328100026121, -0.00277122180022112, 0.524091441227741, 0.851657490756445] -r_vec_u: [0.00314748726970439, 0.00316363503569309, 0.000967864092467003] -T: [-39.4869178357686, -143.549102337611, 1223.16391257074] -T_u: [0.0564095967285568, 0.243330921980148, 1.38377622754807] -f: [1303.67778186604, 1303.67778186604] -f_u: [4.72914111725791, 4.72914111725791] -c: [255.91342424968, 293.788278432772] -c_u: [3.37450188285301e-014, 5.4816987704878e-014] -distortion: [-0.350835394546469, 0.38374932809296, 0.00930616521683692, 0.00603352900248404] -distortion_uncertainty: [0.00550844789571186, 0.0461771359172639, 0.000962865442567619, 0.000184495203506963] -centriod_loc_uncertainty: 0.1 -sensor_size: [3.84, 2.88] -pixel_size: [0.006, 0.006] -avg_reprojection_error: 0 -imagelist: - [] -... ---- -CameraID: 2 -Name: "OptiTrack V120:SLIM #139489" -path: ~ -R: [-0.97638264418804, 0.0801221743137013, 0.200642391611585, -0.0440670889037619, -0.983024020503966, 0.178106335619389, 0.211506557354423, 0.16505820880969, 0.963338343418664] -r_vec_u: [0.00638682261412757, 0.0074272747773353, 0.00091704165214316] -T: [59.2805205871831, 96.2460725667738, 1172.75656522753] -T_u: [0.116844346256214, 0.145961637423674, 1.25874876908725] -f: [1236.34630486102, 1236.34630486102] -f_u: [1.01638353040408, 1.01638353040408] -c: [372.728373145766, 235.802347339284] -c_u: [6.80077199459246e-014, 6.79915716095375e-014] -distortion: [-0.336746069866366, 0.306094306804162, -0.00674347261859362, -0.00757846304032797] -distortion_uncertainty: [0.00363670350454599, 0.038572847131464, 0.000182700699200881, 0.000135952753938655] -centriod_loc_uncertainty: 0.1 -sensor_size: [3.84, 2.88] -pixel_size: [0.006, 0.006] -avg_reprojection_error: 0 -imagelist: - [] -... ---- -CameraID: 3 -Name: "OptiTrack V120:SLIM #139493" -path: ~ -R: [0.977039228935687, 0.0388682272013567, -0.209484142681451, 0.00295646691543979, 0.980651060241075, 0.195741557548295, 0.213038973955792, -0.191866513394804, 0.958019643125121] -r_vec_u: [0.00406256090910044, 0.0049195128863513, 0.000748430566070412] -T: [-68.0164390403498, -146.553477799792, 1222.23160904058] -T_u: [0.114938851570939, 0.128379417064062, 0.671153443354092] -f: [1292.29107947029, 1292.29107947029] -f_u: [3.59223020399041, 3.59223020399041] -c: [276.204184798948, 271.389840398] -c_u: [5.29500335025427e-014, 5.28987362502342e-014] -distortion: [-0.324414753268459, -0.0384589986621321, 0.00310626119123319, 0.00358531054211017] -distortion_uncertainty: [0.00572924879761254, 0.0460462570157496, 0.000292437171944061, 0.000259581750205132] -centriod_loc_uncertainty: 0.1 -sensor_size: [3.84, 2.88] -pixel_size: [0.006, 0.006] -avg_reprojection_error: 0 -imagelist: - [] -... ---- -CameraID: 4 -Name: "OptiTrack V120:SLIM #139490" -path: ~ -R: [-0.998576730705372, -0.0363812518824134, -0.0389989410784261, 0.0513344792497644, -0.853973547404762, -0.517777897918675, -0.014466655934447, -0.519042950866684, 0.854625725696155] -r_vec_u: [0.00552297776473109, 0.00538417047172695, 0.00255412058437853] -T: [27.6061534077227, 115.49496356, 1334.96857935969] -T_u: [0.139155852326009, 0.304261208300855, 2.03984613376198] -f: [1260.34107649065, 1260.34107649065] -f_u: [3.77155100749509, 3.77155100749509] -c: [418.356700393795, 222.082864071685] -c_u: [5.08154503734003e-014, 3.91345248276669e-014] -distortion: [-0.321857531828928, -0.0241415840805814, 0.00324825733276138, -0.00225597341218375] -distortion_uncertainty: [0.0034691852370971, 0.0216572979596887, 0.000412543273345664, 0.000102987773446025] -centriod_loc_uncertainty: 0.1 -sensor_size: [3.84, 2.88] -pixel_size: [0.006, 0.006] -avg_reprojection_error: 0 -imagelist: - [] -... ---- -CameraID: 5 -Name: "OptiTrack V120:SLIM #139491" -path: ~ -R: [0.921852922901594, -0.0703674913315067, 0.381097893856025, -0.00445256195061348, 0.981389085840653, 0.191978219819133, -0.387514339386124, -0.178672545055355, 0.904383081671466] -r_vec_u: [0.00502770511376512, 0.00462171067405638, 0.001059493807148] -T: [32.7359040638258, -160.056500669715, 1226.90160692164] -T_u: [0.149736683528924, 0.254609842485249, 1.59608432972071] -f: [1285.9557644009, 1285.9557644009] -f_u: [1.57975040544475, 1.57975040544475] -c: [189.568984742346, 311.469878938279] -c_u: [8.21209617232705e-015, 1.0192841498166e-013] -distortion: [-0.304688096283672, 0.0478186431001436, 0.000872473550124941, 0.00112623904724296] -distortion_uncertainty: [0.00393391328938593, 0.0352647424089961, 0.000346846323867536, 0.000275635615832542] -centriod_loc_uncertainty: 0.1 -sensor_size: [3.84, 2.88] -pixel_size: [0.006, 0.006] -avg_reprojection_error: 0 -imagelist: - [] -... diff --git a/modules/calib/include/calib.hpp b/modules/calib/include/calib.hpp index 3539822..4b0edfe 100644 --- a/modules/calib/include/calib.hpp +++ b/modules/calib/include/calib.hpp @@ -63,7 +63,7 @@ class Chessboard : public CalibrationBoard { class CirclesGrid : public CalibrationBoard { public: - CirclesGrid(cv::Size board_size = cv::Size(4, 11), double square_size = 16.62); + CirclesGrid(cv::Size board_size = cv::Size(4, 11), double square_size = 20.0); bool find(lpt::ImageFrame& frame); int find_circlesgrid_flags; }; @@ -91,6 +91,7 @@ class Calibrator { get_global_reference = false; updated = false; object = new lpt::ThreePointLine(); + //board = new lpt::Chessboard(cv::Size(9, 6), 25.6); board = new lpt::CirclesGrid(); calib_flags = CV_CALIB_FIX_ASPECT_RATIO | CV_CALIB_FIX_K3 | CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5; } diff --git a/modules/calib/src/calib.cpp b/modules/calib/src/calib.cpp index 19ba752..ab4468a 100644 --- a/modules/calib/src/calib.cpp +++ b/modules/calib/src/calib.cpp @@ -392,20 +392,27 @@ Chessboard::Chessboard(cv::Size board_size, double square_size) { this->object_type = lpt::CHESSBOARD; for( int i = 0; i < board_size.height; ++i ) { for( int j = 0; j < board_size.width; ++j ) { - object_points.push_back(cv::Point3d(double(j * square_size), - double(i * square_size), 0)); + object_points.push_back(cv::Point3f(float(j * square_size), + float(i * square_size), 0)); } } + find_chessboard_flags = CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE; } bool Chessboard::find(lpt::ImageFrame& frame) { frame.particles.clear(); - vector image_points; + //vector image_points; + image_points.clear(); bool found = cv::findChessboardCorners( frame.image, board_size, image_points, find_chessboard_flags); - if (found) + if (found) { cv::cornerSubPix(frame.image, image_points, cv::Size(11,11), cv::Size(-1,-1), cv::TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1 )); + for (int j = 0; j < image_points.size(); ++j) { + ParticleImage::Ptr newparticle = ParticleImage::create(j, image_points[j].x, image_points[j].y); + frame.particles.push_back(newparticle); + } + } return found; } @@ -417,10 +424,11 @@ CirclesGrid::CirclesGrid(cv::Size board_size, double square_size) { cout << "Calib Board: height = " << board_size.height << " , width = " << board_size.width << endl; for( int i = 0; i < board_size.height; i++ ) { for( int j = 0; j < board_size.width; j++ ) { - object_points.push_back(cv::Point3d(double( (2 * j + i % 2) * square_size), + object_points.push_back(cv::Point3f(double( (2 * j + i % 2) * square_size), double(i * square_size), 0)); } } + find_circlesgrid_flags = cv::CALIB_CB_ASYMMETRIC_GRID | cv::CALIB_CB_CLUSTERING; } bool CirclesGrid::find(lpt::ImageFrame& frame) { @@ -440,7 +448,6 @@ void CalibrationBoard::draw(cv::Mat& image) { cv::drawChessboardCorners( image, board_size, cv::Mat(image_points), true ); } - void Calibrator::addControls() { void* calibrator_void_ptr = static_cast ( this ); cv::createButton("Get Int Data", callbackSetIntParamDataCollection, calibrator_void_ptr, CV_CHECKBOX, 0 ); @@ -544,9 +551,9 @@ void Calibrator::calibrateCamera() { //double aperature_width, aperature_height; double fovx, fovy, focal_length, aspect_ratio; cv::Point2d principal_point; - cout << camera_matrix << endl; + cout << "camera_matrix" << endl; cout << cameras[current_camera].getCameraMatrix() << endl; - cout << dist_coeffs << endl; + cout << "dist_coeffs" << endl; cout << cameras[current_camera].getDistCoeffs() << endl; cv::calibrationMatrixValues(camera_matrix, image_size, cameras[current_camera].sensor_size[0], cameras[current_camera].sensor_size[1], fovx, fovy, @@ -720,13 +727,12 @@ bool Calibrator::findGlobalReference(lpt::ImageFrameGroup& group) { cv::Mat R = cv::Mat::zeros(3, 3, CV_64F); cv::Rodrigues(r_vec, R); - for (int i = 0; i < 3; ++i) - for (int j = 0; j < 3; ++j) + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { cameras[c].R[i][j] = R.at(i,j); - - cameras[c].T[0] = t_vec.at(0); - cameras[c].T[1] = t_vec.at(1); - cameras[c].T[2] = t_vec.at(2); + } + cameras[c].T[i] = t_vec.at(i); + } vector image_points2; @@ -1030,7 +1036,7 @@ void Calibrator::findFundamentalMatrices() { } if (pointsA.size() > 8 && pointsB.size() > 8 ) { - cv::Mat F = cv::findFundamentalMat(pointsA, pointsB); + cv::Mat F = cv::findFundamentalMat(pointsA, pointsB, CV_FM_LMEDS, 1.0, 1.0-1E-6); F >> pair_it->F; pair_it->epipolar_error = checkStereoCalibration( pointsA, pointsB, F ); } diff --git a/modules/core/include/core.hpp b/modules/core/include/core.hpp index 8c65a76..0fb63f1 100644 --- a/modules/core/include/core.hpp +++ b/modules/core/include/core.hpp @@ -109,7 +109,7 @@ class concurrent_queue } lock.unlock(); - m_condition_variable.notify_one(); + m_condition_variable.notify_one(); return ispushed; } @@ -219,6 +219,7 @@ class ImageFrame { int frame_index; cv::Mat image; vector particles; + vector> contours; ImageFrame():frame_index(-1){} ImageFrame(int f):frame_index(f){} @@ -374,7 +375,7 @@ class SharedObjects { typedef std::shared_ptr Ptr; static inline SharedObjects::Ptr create() {return std::make_shared();} - SharedObjects() : frame_rate(0), input_path("./"), output_path("./"), KF_isOn(false) { } + SharedObjects() : frame_rate(0), input_path("./"), output_path("./"), KF_isOn(false), isRotation_Correction(false) { } vector cameras; vector camera_pairs; @@ -385,6 +386,9 @@ class SharedObjects { string input_path; string output_path; bool KF_isOn; + array, 3> S; + array P; + bool isRotation_Correction; }; class Particle { diff --git a/modules/correspond/include/correspond.hpp b/modules/correspond/include/correspond.hpp index 677a315..02b1e62 100644 --- a/modules/correspond/include/correspond.hpp +++ b/modules/correspond/include/correspond.hpp @@ -18,6 +18,9 @@ namespace lpt using namespace std; + +//typedef accumulator_set< double, features > boost_accumulator; + /** * @brief MatchMap * Bookkeeping tool to identify 4-way unique matches from epipolar matches @@ -105,6 +108,15 @@ class Correspondence */ virtual void findUniqueMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap, vector& matches)=0; + /** + * @brief Find 3-way unique matches + * + * @param frame_group + * @param matchmap + * @param matches The final match list + */ + virtual void find3WayMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap, vector& matches)=0; + /** * @brief Initialize MatchMap */ @@ -156,6 +168,8 @@ class Correspondence */ void printMatchMap(const lpt::ImageFrameGroup& frame_group, string output_file_name) const; + void printMatchMap(const lpt::ImageFrameGroup& frame_group, const lpt::MatchMap& match_map, string output_file_name) const; + protected: /** * @brief Reset MatchMap @@ -197,7 +211,7 @@ class PointMatcher : public Correspondence class Parameters { public: Parameters() : match_threshold(0.5), match_thresh_level(5) {} - double match_threshold; + float match_threshold; int match_thresh_level; } params; @@ -206,14 +220,15 @@ class PointMatcher : public Correspondence virtual void initialize(); virtual void initializeEpipolarMatchThread(int thread_id){} - virtual void addControls() ; + virtual void addControls(); virtual void findEpipolarMatches(const lpt::ImageFrameGroup& cameragroup, lpt::MatchMap& matchmap); virtual void findUniqueMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& MatchMap, vector& matches); + virtual void find3WayMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& MatchMap, vector& matches); friend void callbackMatchThresh(int state, void* data) { PointMatcher* matcher = static_cast(data); - matcher->params.match_threshold = matcher->params.match_thresh_level / 10.0; + matcher->params.match_threshold = static_cast(matcher->params.match_thresh_level) / 10.0; } private: @@ -249,12 +264,15 @@ class Reconstruct3D */ virtual void draw(lpt::Frame3d& frame); - inline void setSharedObjects( std::shared_ptr new_shared_objects) { shared_objects = new_shared_objects; } + inline void setSharedObjects( std::shared_ptr new_shared_objects) { shared_objects = new_shared_objects; axis.open(shared_objects->output_path + "axis.txt"); } inline std::shared_ptr getSharedObjects() const { return shared_objects; } protected: std::shared_ptr < lpt::SharedObjects > shared_objects; private: lpt::Regression solver; + ofstream axis; + vector> positions; + size_t frame_count; }; class Reconstruct3DwithSVD : public Reconstruct3D diff --git a/modules/correspond/src/correspond.cpp b/modules/correspond/src/correspond.cpp index 34a31d5..ada7952 100644 --- a/modules/correspond/src/correspond.cpp +++ b/modules/correspond/src/correspond.cpp @@ -5,6 +5,7 @@ namespace lpt { using namespace std; + Correspondence::Correspondence() : initial_max_particles_per_image(2048), map_storage_size(500) { @@ -47,7 +48,6 @@ void Correspondence::run( //} } - void Correspondence::stop() { epipolar_match_threads.interrupt_all(); @@ -72,6 +72,7 @@ void Correspondence::runEpipolarMatching(int thread_id, lpt::concurrent_queuesecond; resetMatchMap(current_map); findEpipolarMatches(frame_group, current_map); + //printMatchMap(frame_group, current_map, "matchmap.txt"); iter->first = frame_group; this->full_maps_queue.push(iter); @@ -154,6 +155,7 @@ void Correspondence::testMatches(const ImageFrameGroup &cameragroup, const vecto void Correspondence::printMatchMap(const lpt::ImageFrameGroup& frame_group, string output_file_name) const { + cout << "printing matchmap..." << endl; ofstream fout(output_file_name.c_str()); int p_id = 0; for(int i = 0; i < frame_group.size(); ++i) { @@ -176,6 +178,31 @@ void Correspondence::printMatchMap(const lpt::ImageFrameGroup& frame_group, stri fout.close(); } +void Correspondence::printMatchMap(const lpt::ImageFrameGroup& frame_group, const lpt::MatchMap& match_map, string output_file_name) const +{ + cout << "printing matchmap..." << endl; + ofstream fout(output_file_name.c_str()); + int p_id = 0; + for(int i = 0; i < frame_group.size(); ++i) { + fout << "------------------------------------------" << endl; + for (int p = 0; p < frame_group[i].particles.size(); ++p, ++p_id) { + fout << "Cam " << i << ", " << p << ": " << endl; + for (int c = 0; c < frame_group.size(); ++c) { + fout << "\tC" << c << " -- "; + for (int d = 0; d < NUM_MATCHES; ++d) { + int index = match_map[p_id][c][d]; + if (index >=0 ) + fout << "[" << index << ", " << frame_group[c].particles[index]->id << "] "; + else + fout << index << " "; + } + fout << endl; + } + } + } + fout.close(); +} + void Correspondence::resetMatchMap(lpt::MatchMap& matchmap) { for (int a = 0; a < matchmap.size(); ++a) { @@ -223,7 +250,7 @@ void PointMatcher::initialize() void PointMatcher::addControls() { void* matcher_void_ptr = static_cast (this); - cv::createTrackbar("Match Threshold", string(), ¶ms.match_thresh_level, 50, callbackMatchThresh, matcher_void_ptr); + cv::createTrackbar("Match Threshold", string(), ¶ms.match_thresh_level, 100, callbackMatchThresh, matcher_void_ptr); } void PointMatcher::findEpipolarMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap ) @@ -356,6 +383,71 @@ void PointMatcher::findUniqueMatches(const lpt::ImageFrameGroup& frame_group, lp } } +void PointMatcher::find3WayMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap, vector& matches) { + vector num_particles(frame_group.size()); + num_particles[0] = frame_group[0].particles.size(); + for(int i = 1; i < frame_group.size(); ++i) + num_particles[i] = frame_group[i].particles.size() + num_particles[i-1]; + + int num_cameras = frame_group.size(); + matches.clear(); + + for (int cam_a = 0; cam_a < frame_group.size() - 2; ++cam_a) { + int a_start = (cam_a !=0 ? num_particles[cam_a - 1] : 0); + for (int a = 0; a < frame_group[cam_a].particles.size(); ++a) { + lpt::ParticleImage::Ptr Pa = frame_group[cam_a].particles[a]; + if( ! Pa->is_4way_matched ) { + for (int cam_b = cam_a + 1; cam_b < frame_group.size() - 1; ++cam_b) { + int b_start = (cam_b !=0 ? num_particles[cam_b-1] : 0); + for(int match_ab = 0; match_ab < NUM_MATCHES; ++match_ab) { //loop through all A,B matches + int b = matchmap[a + a_start][cam_b][match_ab]; + if (b < 0) + break; + lpt::ParticleImage::Ptr Pb = frame_group[cam_b].particles[b]; + + if( ! Pb->is_4way_matched ) { + for (int cam_c = cam_b + 1; cam_c < frame_group.size(); ++cam_c) { + int c_start = (cam_c !=0 ? num_particles[cam_c-1] : 0); + for (int match_bc = 0; match_bc < NUM_MATCHES; ++match_bc) { + int c = matchmap[b + b_start][cam_c][match_bc]; + if (c < 0) + break; + + lpt::ParticleImage::Ptr Pc = frame_group[cam_c].particles[c]; + + if( ! Pc->is_4way_matched && std::count(matchmap[a + a_start][cam_c].begin(), matchmap[a + a_start][cam_c].end(), c) ) { + lpt::Match::Ptr newmatch = lpt::Match::create(); + newmatch->addParticle(Pa,cam_a); + newmatch->addParticle(Pb,cam_b); + newmatch->addParticle(Pc,cam_c); + + matches.push_back(std::move(newmatch)); + + Pa->is_4way_matched = true; + Pb->is_4way_matched = true; + Pc->is_4way_matched = true; + + match_ab = NUM_MATCHES; + match_bc = NUM_MATCHES; + + cam_b = num_cameras; + cam_c = num_cameras; + + + + + } + + } + } + } + } + } + } + } + } +} + void PointMatcher::removeNonUniqueMatches(vector& matches) { for (int i = 0; i < matches.size(); ++i) { @@ -452,9 +544,19 @@ void PointMatcher::removeNonUniqueMatches(vector& matches) // std::move(matches4way.begin(), matches4way.end(), std::back_inserter(matches) ); //} -Reconstruct3D::Reconstruct3D() {} +Reconstruct3D::Reconstruct3D() : frame_count(0) {} -Reconstruct3D::~Reconstruct3D() {} +Reconstruct3D::~Reconstruct3D() +{ + for (size_t i=0; i& matches, lpt::Frame3d& frame) { @@ -496,9 +598,18 @@ void Reconstruct3D::reconstruct3DFrame(vector& matches, lpt::Fr //array coords = {{X[0], X[1], X[2]}}; lpt::Particle3d_Ptr newparticle = lpt::Particle3d::create(); newparticle->id = id; - newparticle->X[0] = X[0]; - newparticle->X[1] = X[1]; - newparticle->X[2] = X[2]; + if (this->shared_objects->isRotation_Correction) { + auto& S = this->shared_objects->S; + auto& P = this->shared_objects->P; + newparticle->X[0] = S[0][0] * X[0] + S[0][1] * X[1] + S[0][2] * X[2] - P[0]; + newparticle->X[1] = S[1][0] * X[0] + S[1][1] * X[1] + S[1][2] * X[2] - P[1]; + newparticle->X[2] = S[2][0] * X[0] + S[2][1] * X[1] + S[2][2] * X[2] - P[2]; + } + else { + newparticle->X[0] = X[0]; + newparticle->X[1] = X[1]; + newparticle->X[2] = X[2]; + } newparticle->frame_index = frame.frame_index; frame.objects.push_back(std::move(newparticle)); @@ -513,20 +624,41 @@ void Reconstruct3D::reconstruct3DFrame(vector& matches, lpt::Fr } } +struct myclass{ + bool operator() (lpt::Particle3d_Ptr p1, lpt::Particle3d_Ptr p2) + { + return (p1->X[1] < p2->X[1]); + } +} mycomp; + void Reconstruct3D::draw(lpt::Frame3d& frame) { auto& cameras = shared_objects->cameras; - vector object_points(frame.objects.size()); + vector object_points(frame.objects.size()); vector image_points(frame.objects.size()); + + auto particles = frame.objects; + + if (positions.size() != frame.objects.size() ) + positions.resize(frame.objects.size()); + + std::sort(particles.begin(), particles.end(), mycomp); + for (int p = 0; p < frame.objects.size(); ++p) { object_points[p].x = frame.objects[p]->X[0]; object_points[p].y = frame.objects[p]->X[1]; object_points[p].z = frame.objects[p]->X[2]; + + for (int i=0; iX.size(); i++) { + positions[p][i] += particles[p]->X[i]; + } } + + frame_count++; for (int i = 0; i < frame.camera_frames.size(); ++i) { - if (frame.camera_frames[i].image.channels() == 1) - cv::cvtColor(frame.camera_frames[i].image, frame.camera_frames[i].image, CV_GRAY2BGR); + //if (frame.camera_frames[i].image.channels() == 1) + // cv::cvtColor(frame.camera_frames[i].image, frame.camera_frames[i].image, CV_GRAY2BGR); cv::Mat R = cv::Mat(3, 3, CV_64F, cameras[i].R); cv::Mat t_vec = cv::Mat(3, 1, CV_64F, cameras[i].T); cv::Mat r_vec = cv::Mat::zeros(3,1, CV_64F); @@ -535,7 +667,7 @@ void Reconstruct3D::draw(lpt::Frame3d& frame) cv::projectPoints(cv::Mat(object_points), r_vec, t_vec, cameras[i].getCameraMatrix(), cameras[i].getDistCoeffs(), image_points); for (int p = 0; p < image_points.size(); ++p) - cv::circle(frame.camera_frames[i].image, image_points[p], 3, cv::Scalar(0,0,255), -1 ); + cv::circle( frame.camera_frames[i].image, image_points[p], 3, 255, -1 ); } } diff --git a/modules/dataaquisition/include/dataaquisition.hpp b/modules/dataaquisition/include/dataaquisition.hpp index 5d82229..839590f 100644 --- a/modules/dataaquisition/include/dataaquisition.hpp +++ b/modules/dataaquisition/include/dataaquisition.hpp @@ -29,13 +29,14 @@ copyright: Douglas Barker 2011 #ifdef USE_NP_CAMERASDK #include -#include +#include "cameralibrary.h" #endif namespace lpt { using namespace std; + using namespace CameraLibrary; class Video; class Recorder; @@ -74,11 +75,11 @@ namespace lpt { class Recorder { public: typedef std::shared_ptr Ptr; - static inline lpt::Recorder::Ptr create(int num_cams, string path = "./", int length = 240) { + static inline lpt::Recorder::Ptr create(int num_cams, string path = "./", int length = 900) { return lpt::Recorder::Ptr( new lpt::Recorder(num_cams, path, length) ); } - Recorder(int num_cams, string path = "./", int length = 240) + Recorder(int num_cams, string path = "./", int length = 900) : number_of_cameras(num_cams), path(path), clip_length(length) { videos.resize( number_of_cameras ); @@ -138,7 +139,7 @@ namespace lpt { public: typedef std::shared_ptr < lpt::CameraSystem > Ptr; - CameraSystem() : collect_particledata_from_camera(false) { } + CameraSystem() : collect_particledata_from_camera(true) { } virtual void addControls()=0; virtual bool initializeCameras()=0; @@ -238,7 +239,7 @@ namespace lpt { friend void callbackSetFrameRate( int value, void* data ); private: - std::shared_ptr sync; + cModuleSync* sync; cv::Size sensor_dim; vector optitrack_cameras; vector optitrack_frames; @@ -366,6 +367,8 @@ namespace lpt { friend void callbackSetReprojectionView( int state, void* data ); friend void callbackSetTrajectoryView( int state, void* data ); + void load_Rotation_Matrix(); + protected: std::shared_ptr < lpt::SharedObjects > shared_objects; diff --git a/modules/dataaquisition/src/dataaquisition.cpp b/modules/dataaquisition/src/dataaquisition.cpp index 05f4f31..b3d928b 100644 --- a/modules/dataaquisition/src/dataaquisition.cpp +++ b/modules/dataaquisition/src/dataaquisition.cpp @@ -10,6 +10,7 @@ copyright: Douglas Barker 2011 namespace lpt { using namespace std; +using namespace CameraLibrary; /*****lpt::Video class implementation*****/ @@ -272,18 +273,22 @@ bool Optitrack::initializeCameras() { //Set some initial camera operating parameters optitrack_cameras[camera_id]->SetFrameRate(100); // Percentage for V120-SLIM Cameras - optitrack_cameras[camera_id]->SetVideoType(CameraLibrary::SegmentMode); + optitrack_cameras[camera_id]->SetVideoType(Core::ObjectMode); optitrack_cameras[camera_id]->SetLateMJPEGDecompression(false); optitrack_cameras[camera_id]->SetAEC(false); optitrack_cameras[camera_id]->SetAGC(true); optitrack_cameras[camera_id]->SetExposure(40); optitrack_cameras[camera_id]->SetThreshold(40); optitrack_cameras[camera_id]->SetIntensity(0); //IR LED intensity if available - optitrack_cameras[camera_id]->SetTextOverlay(true); + optitrack_cameras[camera_id]->SetTextOverlay(false); optitrack_cameras[camera_id]->SetObjectColor(255); + optitrack_cameras[camera_id]->SetIRFilter(true); } + + shared_objects->frame_rate = optitrack_cameras[0]->ActualFrameRate(); cout << "\t Actual Frame Rate = " << this->getFrameRate() << endl; - sync = std::make_shared(); + + sync = CameraLibrary::cModuleSync::Create(); for ( int i = 0; i < optitrack_cameras.size(); ++i) sync->AddCamera(optitrack_cameras[i]); @@ -297,8 +302,6 @@ bool Optitrack::initializeCameras() { for ( int i = 0; i < optitrack_cameras.size(); ++i) optitrack_cameras[i]->Start(); - - shared_objects->frame_rate = optitrack_cameras[0]->ActualFrameRate(); return true; } @@ -307,10 +310,11 @@ void Optitrack::shutdown() { cout << "------------Shutting Down Cameras------------" << endl; sync->RemoveAllCameras(); + cModuleSync::Destroy( sync ); for ( int i = 0; i < optitrack_cameras.size(); ++i) { optitrack_cameras[i]->Release(); - cout << i << " "; + //cout << i << " "; } CameraLibrary::CameraManager::X().Shutdown(); cout << "\t---Optitrack Shutdown Complete" << endl; @@ -319,34 +323,34 @@ void Optitrack::shutdown() { bool Optitrack::grabFrameGroup(lpt::ImageFrameGroup& frame_group) { CameraLibrary::FrameGroup* native_frame_group = sync->GetFrameGroup(); - if(native_frame_group) { + if(native_frame_group && native_frame_group->Count() == shared_objects->cameras.size()) { auto& image_type = shared_objects->image_type; for ( int camera_id = 0; camera_id < native_frame_group->Count(); ++camera_id ) { optitrack_frames[camera_id] = native_frame_group->GetFrame(camera_id); frame_group[camera_id].image = image_type.clone(); - optitrack_frames[camera_id]->Rasterize(image_type.size().width, image_type.size().height, - static_cast(image_type.step), 8, frame_group[camera_id].image.data); frame_group[camera_id].frame_index = optitrack_frames[camera_id]->FrameID(); - frame_group[camera_id].particles.clear(); if( this->collect_particledata_from_camera ) { frame_group[camera_id].particles.clear(); for (int object_id = 0; object_id < optitrack_frames[camera_id]->ObjectCount(); object_id++) { + auto object = optitrack_frames[camera_id]->Object(object_id); frame_group[camera_id].particles.push_back ( std::move( - lpt::ParticleImage::create( - object_id, - optitrack_frames[camera_id]->Object(object_id)->X(), - optitrack_frames[camera_id]->Object(object_id)->Y(), - optitrack_frames[camera_id]->Object(object_id)->Radius() - ) + lpt::ParticleImage::create( object_id, object->X(), object->Y(), object->Radius() ) ) ); } } + else { + cv::Mat temp = image_type.clone(); + optitrack_frames[camera_id]->Rasterize(image_type.size().width, image_type.size().height, + static_cast(temp.step), 8, temp.data); + cv::Rect ROI(0, 0, image_type.size().width/2, image_type.size().height/2); + cv::resize(temp(ROI), frame_group[camera_id].image, image_type.size()); + } optitrack_frames[camera_id]->Release(); } native_frame_group->Release(); @@ -363,23 +367,28 @@ bool Optitrack::grabFrameGroup(lpt::ImageFrameGroup& frame_group) { } void Optitrack::addControls() { - string null; - init_video_mode = 1; // {0 = Precision, 1 = Segment, 2 = Object, 3 = MJPEG Mode} - init_threshold = 40; - init_exposure = 40; - init_intensity = 5; - init_framerate_mode = 2; //Mode number: {2 = 100%, 1 = 50%, 0 = 25%} for V120:SLIM cameras - void* optitrack_void_ptr = static_cast ( this ); void* cameras_void_ptr = static_cast ( &optitrack_cameras ); + + string null; + auto camera = optitrack_cameras[0]; + init_video_mode = 2; // {0 = Precision, 1 = Segment, 2 = Object, 3 = MJPEG Mode} + init_threshold = 40; + int max_threshold = camera->MaximumThreshold(); + int min_threshold = camera->MinimumThreshold(); + init_exposure = 40; + int max_exposure = camera->MaximumExposureValue(); + int min_exposure = camera->MinimumExposureValue(); + init_intensity = 5; + init_framerate_mode = 2; //Mode number: {2 = 100%, 1 = 50%, 0 = 25%} for V120:SLIM cameras cv::createButton("IR Filter", callbackSetIRFilter, cameras_void_ptr ,CV_CHECKBOX, 1 ); cv::createButton("Automatic Gain Control", callbackSetAGC, cameras_void_ptr, CV_CHECKBOX, 1 ); cv::createButton("Autoamtic Exposure Control", callbackSetAEC, cameras_void_ptr, CV_CHECKBOX, 0 ); - cv::createButton("Text Overlay", callbackSetTextOverlay, cameras_void_ptr, CV_CHECKBOX, 1 ); + cv::createButton("Text Overlay", callbackSetTextOverlay, cameras_void_ptr, CV_CHECKBOX, 0 ); cv::createTrackbar("VideoMode", null , &init_video_mode, 3, callbackSetVideoType, optitrack_void_ptr); - cv::createTrackbar("Threshold", null , &init_threshold, 255, callbackSetThreshold, cameras_void_ptr); - cv::createTrackbar("Exposure", null , &init_exposure, 480, callbackSetExposure, cameras_void_ptr); + cv::createTrackbar("Threshold", null , &init_threshold, max_threshold-min_threshold, callbackSetThreshold, cameras_void_ptr); + cv::createTrackbar("Exposure", null , &init_exposure, max_exposure-min_exposure, callbackSetExposure, cameras_void_ptr); cv::createTrackbar("FrameRateMode", null , &init_framerate_mode, 2, callbackSetFrameRate, optitrack_void_ptr); } @@ -388,36 +397,37 @@ void callbackSetVideoType( int mode, void* data ) { Optitrack* system = static_cast< Optitrack*> (data); vector& optitrack_cameras = system->optitrack_cameras; - CameraLibrary::eVideoMode new_mode; + Core::eVideoMode new_mode; + cout << "Setting Camera Mode: "; switch (mode) { case 0: cout << "Precision" << endl; - new_mode = CameraLibrary::PrecisionMode; - system->setParticleCollectionFromCamera(false); + new_mode = Core::PrecisionMode; + system->setParticleCollectionFromCamera(true); break; case 1: cout << "Segment" << endl; - new_mode = CameraLibrary::SegmentMode; - system->setParticleCollectionFromCamera(false); + new_mode = Core::SegmentMode; + system->setParticleCollectionFromCamera(true); break; case 2: cout << "Object" << endl; - new_mode = CameraLibrary::ObjectMode; + new_mode = Core::ObjectMode; system->setParticleCollectionFromCamera(true); break; case 3: cout << "MJPEG" << endl; - new_mode = CameraLibrary::MJPEGMode; + new_mode = Core::MJPEGMode; system->setParticleCollectionFromCamera(false); break; default: - new_mode = CameraLibrary::MJPEGMode; + new_mode = Core::ObjectMode; break; } for (int id = 0; id < optitrack_cameras.size(); id++) { - if (new_mode == CameraLibrary::MJPEGMode && optitrack_cameras[id]->IsMJPEGAvailable() == false) + if (new_mode == Core::MJPEGMode && optitrack_cameras[id]->IsMJPEGAvailable() == false) cout << "Camera " << id << " does not support MJPEG Mode" << endl; else optitrack_cameras[id]->SetVideoType(new_mode); @@ -426,7 +436,10 @@ void callbackSetVideoType( int mode, void* data ) void callbackSetThreshold( int value, void* data) { vector& optitrack_cameras = *static_cast< vector* > (data); + + value += optitrack_cameras[0]->MinimumThreshold(); cout << "Setting Threshold value: " << value << endl; + for (int id = 0; id < optitrack_cameras.size(); id++) optitrack_cameras[id]->SetThreshold( value ); } @@ -460,7 +473,7 @@ void callbackSetAGC(int state, void* data) { } void callbackSetTextOverlay(int state, void* data) { - // Enable or Disable autmatic gain control AGC + // Enable or Disable text overlay vector& optitrack_cameras = *static_cast< vector* > (data); cout << "Setting Text Overlay: " << (state ? "on":"off") << endl; @@ -471,13 +484,15 @@ void callbackSetTextOverlay(int state, void* data) { void callbackSetExposure(int value, void* data) { // Sets Exposure manually when AEC is not activated vector& optitrack_cameras = *static_cast< vector* > (data); + + value += optitrack_cameras[0]->MinimumExposureValue(); cout << "Setting Exposure: Desired = " << value; for (int id = 0; id < optitrack_cameras.size(); id++) optitrack_cameras[id]->SetExposure(value); Sleep(2); - int actual = optitrack_cameras[0]->Exposure(); - cout << ", Actual = "<< actual << "("<< 17.13 * actual << " micro seconds)" << endl; //Only valid for V120:SLIM + //int actual = optitrack_cameras[0]->Exposure(); + //cout << ", Actual = "<< actual << "("<< 17.13 * actual << " micro seconds)" << endl; //Only valid for V120:SLIM } void callbackSetIntensity(int value, void* data) { @@ -494,25 +509,25 @@ void callbackSetIntensity(int value, void* data) { void callbackSetFrameRate(int value, void* data) { lpt::Optitrack* camera_system = static_cast< lpt::Optitrack* > ( data ); if (camera_system) { - int fps_percentage; + int fps; switch(value) { case 2: - fps_percentage = 100; + fps = 120; break; case 1: - fps_percentage = 50; + fps = 60; break; case 0: - fps_percentage = 25; + fps = 30; break; default: - fps_percentage = 100; + fps = 120; break; } - cout << "Setting Frame Rate: Desired % = " << fps_percentage; + cout << "Setting Frame Rate: Desired = " << fps; vector& optitrack_cameras = camera_system->getOptitrackCameras(); for (int id = 0; id < optitrack_cameras.size(); id++) - optitrack_cameras[id]->SetFrameRate(fps_percentage); + optitrack_cameras[id]->SetFrameRate(fps); Sleep(2); cout << ", Actual FPS = " << optitrack_cameras[0]->ActualFrameRate() << endl; camera_system->shared_objects->frame_rate = optitrack_cameras[0]->ActualFrameRate(); @@ -723,8 +738,9 @@ void StreamingPipeline::runControlWindow() { calibrator->setGlobalReference(false); } - if( this->showReprojectionView() && ! frame3d->objects.empty() ) + if( this->showReprojectionView() && ! frame3d->objects.empty() ) { reconstructor->draw( *frame3d ); + } if ( count2 >= shared_objects->frame_rate / 20 ) { count2 = 0; @@ -782,8 +798,10 @@ void StreamingPipeline::aquireImageData() { while( camera_system->areCamerasRunning() ) { lpt::ImageFrameGroup frame_group( cameras.size() ); bool good_frames = camera_system->grabFrameGroup( frame_group ); - if ( good_frames && ! frame_queue.full() ) + + if ( good_frames && ! frame_queue.full() ) { frame_queue.push( std::move(frame_group) ); + } else boost::this_thread::sleep(sleeptime); } @@ -793,14 +811,14 @@ void StreamingPipeline::processImages(int index) { auto& cameras = shared_objects->cameras; auto& image_type = shared_objects->image_type; - cout << index << " Processing images" << endl; + cout << "Thread #" << index << " processing images" << endl; // boost::chrono::system_clock::time_point start, stop; cv::Mat map1, map2; cv::Mat temp_image; cv::Mat cam_mat = cameras[index].getCameraMatrix(); cv::Mat dist_coeffs = cameras[index].getDistCoeffs(); cv::Size image_size = image_type.size(); - cv::initUndistortRectifyMap(cam_mat, dist_coeffs, cv::Mat(), cv::Mat(), image_size, CV_32FC1, map1, map2); + //cv::initUndistortRectifyMap(cam_mat, dist_coeffs, cv::Mat(), cv::Mat(), image_size, CV_32FC1, map1, map2); bool first = true; while( camera_system->areCamerasRunning() ) { @@ -823,11 +841,14 @@ void StreamingPipeline::processImages(int index) { //start = boost::chrono::system_clock::now(); if ( this->camera_system->getParticleCollectionFromCamera() == false ) { temp_image = imageproc_frames[index].image.clone(); + //cout << "empty? " << temp_image.empty() << endl; processor->processImage( temp_image ); - detector->detectFeatures( temp_image, imageproc_frames[index].particles ); + detector->detectFeatures( temp_image, imageproc_frames[index].particles, imageproc_frames[index].contours ); } - if ( this->showDetectionView() ) + if ( this->showDetectionView() ) { detector->drawResult( imageproc_frames[index] ); + //detector->drawContours( imageproc_frames[index].image, imageproc_frames[index].contours ); + } lpt::undistortPoints(cameras[index], imageproc_frames[index] ); @@ -838,6 +859,7 @@ void StreamingPipeline::processImages(int index) { } void StreamingPipeline::solveCorrespondence() { + cout << "Correspondence Thread started" << endl; // boost::chrono::system_clock::time_point start, stop; // start = boost::chrono::system_clock::now(); matcher->run(&processed_queue, &match_queue, 1,1); @@ -848,7 +870,7 @@ void StreamingPipeline::solveCorrespondence() { } void StreamingPipeline::reconstuct3DObjects() { - cout << "Reconstruct 3D Objects Thread" << endl; + cout << "Reconstruct 3D Objects Thread started" << endl; // boost::chrono::system_clock::time_point start, stop; while( camera_system->areCamerasRunning() ) { @@ -955,6 +977,7 @@ void StreamingPipeline::run() { cout << "need to set thread priorities for Linux systems" << endl; #endif + boost::this_thread::sleep( boost::posix_time::seconds(3) ); this->runControlWindow(); // All threads running...stop called when cameras are stopped this->stop(); @@ -981,4 +1004,21 @@ void StreamingPipeline::stop() { visualizer_thread.join(); } +void StreamingPipeline::load_Rotation_Matrix() +{ + ifstream S_in, P_in; + string S = this->shared_objects->input_path + "S.txt"; + string P = this->shared_objects->input_path + "P.txt"; + S_in.open(S); + P_in.open(P); + + for (int i=0; i<3; i++) { + for (int j=0; j<3; j++) { + S_in >> this->shared_objects->S[i][j]; + } + P_in >> this->shared_objects->P[i]; + } + this->shared_objects->isRotation_Correction = true; +} + } /*NAMESPACE PT*/ diff --git a/modules/gpu/include/correspondcuda.h b/modules/gpu/include/correspondcuda.h index 88fec7d..240a062 100644 --- a/modules/gpu/include/correspondcuda.h +++ b/modules/gpu/include/correspondcuda.h @@ -46,15 +46,10 @@ class PointMatcherCUDA : public lpt::Correspondence { public: typedef std::shared_ptr Ptr; static inline lpt::PointMatcherCUDA::Ptr create() { return std::make_shared(); } - - friend void callbackMatchThreshcuda(int state, void* data) { - PointMatcherCUDA* matcher = static_cast(data); - matcher->params.match_threshold = matcher->params.match_thresh_level / static_cast(10.0); - } class Parameters { public: - Parameters() : match_thresh_level(5), match_threshold(0.5){} + Parameters() : match_thresh_level(10), match_threshold(0.5){} float match_threshold; int match_thresh_level; } params; @@ -63,12 +58,15 @@ class PointMatcherCUDA : public lpt::Correspondence { virtual void initialize(); virtual void initializeEpipolarMatchThread(int thread_id); - virtual void addControls() { - void* matcher_void_ptr = static_cast ( this ); - cv::createTrackbar("Match Thresh", string() , ¶ms.match_thresh_level, 50, callbackMatchThreshcuda, matcher_void_ptr); - } + virtual void addControls(); virtual void findUniqueMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap, vector& matches); virtual void findEpipolarMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap); + virtual void find3WayMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap, vector& matches); + + friend void callbackMatchThreshcuda(int state, void* data) { + PointMatcherCUDA* matcher = static_cast(data); + matcher->params.match_threshold = static_cast(matcher->params.match_thresh_level) / 10.0; + } private: void findEpipolarMatchesStreams(lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap); diff --git a/modules/gpu/src/correspondcuda.cu b/modules/gpu/src/correspondcuda.cu index 457cc5d..f6824b5 100644 --- a/modules/gpu/src/correspondcuda.cu +++ b/modules/gpu/src/correspondcuda.cu @@ -211,10 +211,10 @@ void PointMatcherCUDA::initializeEpipolarMatchThread(int thread_id) { streams.clear(); for (int f = 2; f < camera_pairs_h.size(); ++f) { - if ( camera_pairs_h.size() % f == 0 ) { + //if ( camera_pairs_h.size() % f == 0 ) { streams.resize(f); - break; - } + //break; + //} } cout << "Streams size = " << streams.size() << endl; for(int i = 0; i < streams.size(); ++i) @@ -230,6 +230,11 @@ void PointMatcherCUDA::initialize() { this->initializeMatchMap(); } +void PointMatcherCUDA::addControls() { + void* matcher_void_ptr = static_cast ( this ); + cv::createTrackbar("Match Thresh", string() , ¶ms.match_thresh_level, 100, callbackMatchThreshcuda, matcher_void_ptr); +} + void PointMatcherCUDA::findEpipolarMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap) { thrust::fill(matches2way_d.begin(), matches2way_d.end(), match_initializer); @@ -309,15 +314,85 @@ void PointMatcherCUDA::findEpipolarMatches(const lpt::ImageFrameGroup& frame_gro void PointMatcherCUDA::findUniqueMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap, vector& matches) { vector num_particles(frame_group.size()); - num_particles[0] = frame_group[0].particles.size(); - for(int i = 1; i < frame_group.size(); ++i) - num_particles[i] = frame_group[i].particles.size() + num_particles[i-1]; - - for (int cam_a = 0; cam_a < frame_group.size() - 1; ++cam_a) { - int a_start = (cam_a !=0 ? num_particles[cam_a - 1] : 0); - for (int a = 0; a < frame_group[cam_a].particles.size(); ++a) { - lpt::ParticleImage::Ptr Pa = frame_group[cam_a].particles[a]; - if( ! Pa->is_4way_matched ) + num_particles[0] = frame_group[0].particles.size(); + for(int i = 1; i < frame_group.size(); ++i) + num_particles[i] = frame_group[i].particles.size() + num_particles[i-1]; + + for (int cam_a = 0; cam_a < frame_group.size() - 3; ++cam_a) { + int a_start = (cam_a !=0 ? num_particles[cam_a - 1] : 0); + for (int a = 0; a < frame_group[cam_a].particles.size(); ++a) { + lpt::ParticleImage::Ptr Pa = frame_group[cam_a].particles[a]; + if( ! Pa->is_4way_matched ) + for (int cam_b = cam_a + 1; cam_b < frame_group.size() - 2; ++cam_b) { + int b_start = (cam_b !=0 ? num_particles[cam_b-1] : 0); + for(int match_ab = 0; match_ab < NUM_MATCHES; ++match_ab) { //loop through all A,B matches + int b = matchmap[a + a_start][cam_b][match_ab]; + if (b < 0) + break; + lpt::ParticleImage::Ptr Pb = frame_group[cam_b].particles[b]; + + if( ! Pb->is_4way_matched ) + for (int cam_c = cam_b + 1; cam_c < frame_group.size() - 1; ++cam_c) { + int c_start = (cam_c !=0 ? num_particles[cam_c-1] : 0); + for (int match_bc = 0; match_bc < NUM_MATCHES; ++match_bc) { + int c = matchmap[b + b_start][cam_c][match_bc]; + if (c < 0) + break; + + lpt::ParticleImage::Ptr Pc = frame_group[cam_c].particles[c]; + + if( ! Pc->is_4way_matched && std::count(matchmap[a + a_start][cam_c].begin(), matchmap[a + a_start][cam_c].end(), c) ) + for (int cam_d = cam_c + 1; cam_d < frame_group.size(); ++cam_d) { + vector matches4way; + int d_start = (cam_d !=0 ? num_particles[cam_d-1] : 0); + for (int match_cd = 0; match_cd < NUM_MATCHES; ++match_cd) { + int d = matchmap[c + c_start][cam_d][match_cd]; + if (d < 0) + break; + lpt::ParticleImage::Ptr Pd = frame_group[cam_d].particles[d]; + if( ! Pd->is_4way_matched && std::count(matchmap[a + a_start][cam_d].begin(), matchmap[a + a_start][cam_d].end(), d) && std::count(matchmap[b + b_start][cam_d].begin(), matchmap[b+b_start][cam_d].end(), d) ) { + if(! Pa->is_4way_matched && ! Pb->is_4way_matched && ! Pc->is_4way_matched && ! Pd->is_4way_matched) { + lpt::Match::Ptr newmatch = lpt::Match::create(); + newmatch->addParticle(Pa,cam_a); + newmatch->addParticle(Pb,cam_b); + newmatch->addParticle(Pc,cam_c); + newmatch->addParticle(Pd,cam_d); + matches4way.push_back(std::move(newmatch)); + Pa->is_4way_matched = true; + Pb->is_4way_matched = true; + Pc->is_4way_matched = true; + Pd->is_4way_matched = true; + match_ab = NUM_MATCHES; + match_bc = NUM_MATCHES; + match_cd = NUM_MATCHES; + + } + } + } + std::move(matches4way.begin(), matches4way.end(), std::back_inserter(matches) ); + } + } + } + } + } + } + } +} + +void PointMatcherCUDA::find3WayMatches(const lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap, vector& matches) { + vector num_particles(frame_group.size()); + num_particles[0] = frame_group[0].particles.size(); + for(int i = 1; i < frame_group.size(); ++i) + num_particles[i] = frame_group[i].particles.size() + num_particles[i-1]; + + int num_cameras = frame_group.size(); + matches.clear(); + + for (int cam_a = 0; cam_a < frame_group.size() - 2; ++cam_a) { + int a_start = (cam_a !=0 ? num_particles[cam_a - 1] : 0); + for (int a = 0; a < frame_group[cam_a].particles.size(); ++a) { + lpt::ParticleImage::Ptr Pa = frame_group[cam_a].particles[a]; + if( ! Pa->is_4way_matched ) { for (int cam_b = cam_a + 1; cam_b < frame_group.size() - 1; ++cam_b) { int b_start = (cam_b !=0 ? num_particles[cam_b-1] : 0); for(int match_ab = 0; match_ab < NUM_MATCHES; ++match_ab) { //loop through all A,B matches @@ -326,55 +401,45 @@ void PointMatcherCUDA::findUniqueMatches(const lpt::ImageFrameGroup& frame_group break; lpt::ParticleImage::Ptr Pb = frame_group[cam_b].particles[b]; - if( ! Pb->is_4way_matched ) - for (int cam_c = cam_b + 1; cam_c < frame_group.size(); ++cam_c) { - int c_start = (cam_c !=0 ? num_particles[cam_c-1] : 0); - for (int match_bc = 0; match_bc < NUM_MATCHES; ++match_bc) { - int c = matchmap[b + b_start][cam_c][match_bc]; - if (c < 0) - break; + if( ! Pb->is_4way_matched ) { + for (int cam_c = cam_b + 1; cam_c < frame_group.size(); ++cam_c) { + int c_start = (cam_c !=0 ? num_particles[cam_c-1] : 0); + for (int match_bc = 0; match_bc < NUM_MATCHES; ++match_bc) { + int c = matchmap[b + b_start][cam_c][match_bc]; + if (c < 0) + break; - lpt::ParticleImage::Ptr Pc = frame_group[cam_c].particles[c]; - - if( ! Pc->is_4way_matched && std::count(matchmap[a + a_start][cam_c].begin(), matchmap[a + a_start][cam_c].end(), c) ) - for (int cam_d = cam_c + 1; cam_d < frame_group.size(); ++cam_d) { - vector matches4way; - int d_start = (cam_d !=0 ? num_particles[cam_d-1] : 0); - for (int match_cd = 0; match_cd < NUM_MATCHES; ++match_cd) { - int d = matchmap[c + c_start][cam_d][match_cd]; - if (d < 0) - break; - lpt::ParticleImage::Ptr Pd = frame_group[cam_d].particles[d]; - if( ! Pd->is_4way_matched && std::count(matchmap[a + a_start][cam_d].begin(), matchmap[a + a_start][cam_d].end(), d) && std::count(matchmap[b + b_start][cam_d].begin(), matchmap[b+b_start][cam_d].end(), d) ) { - if(! Pa->is_4way_matched && ! Pb->is_4way_matched && ! Pc->is_4way_matched && ! Pd->is_4way_matched) { - lpt::Match::Ptr newmatch = lpt::Match::create(); - newmatch->addParticle(Pa,cam_a); - newmatch->addParticle(Pb,cam_b); - newmatch->addParticle(Pc,cam_c); - newmatch->addParticle(Pd,cam_d); - matches4way.push_back(std::move(newmatch)); - Pa->is_4way_matched = true; - Pb->is_4way_matched = true; - Pc->is_4way_matched = true; - Pd->is_4way_matched = true; - match_ab = NUM_MATCHES; - match_bc = NUM_MATCHES; - match_cd = NUM_MATCHES; + lpt::ParticleImage::Ptr Pc = frame_group[cam_c].particles[c]; + + if( ! Pc->is_4way_matched && std::count(matchmap[a + a_start][cam_c].begin(), matchmap[a + a_start][cam_c].end(), c) ) { + lpt::Match::Ptr newmatch = lpt::Match::create(); + newmatch->addParticle(Pa,cam_a); + newmatch->addParticle(Pb,cam_b); + newmatch->addParticle(Pc,cam_c); + + matches.push_back(std::move(newmatch)); + + Pa->is_4way_matched = true; + Pb->is_4way_matched = true; + Pc->is_4way_matched = true; + + match_ab = NUM_MATCHES; + match_bc = NUM_MATCHES; - } - } - } - std::move(matches4way.begin(), matches4way.end(), std::back_inserter(matches) ); + cam_b = num_cameras; + cam_c = num_cameras; + } } } } } } } - } + } + } + //cout << matches.size() << endl; } - void PointMatcherCUDA::findEpipolarMatchesStreams(lpt::ImageFrameGroup& frame_group, lpt::MatchMap& matchmap) { thrust::fill(matches2way_d.begin(), matches2way_d.end(), match_initializer); num_particles_h[0] = frame_group[0].particles.size(); diff --git a/modules/imageproc/include/imageproc.hpp b/modules/imageproc/include/imageproc.hpp index 4d7a39d..412373a 100644 --- a/modules/imageproc/include/imageproc.hpp +++ b/modules/imageproc/include/imageproc.hpp @@ -352,7 +352,7 @@ class Detector { public: typedef std::shared_ptr Ptr; - virtual void detectFeatures( const cv::Mat& image, vector& features ) = 0; + virtual void detectFeatures( const cv::Mat& image, vector& features, vector>& contours ) = 0; virtual void addControls() = 0; /** @@ -362,6 +362,8 @@ class Detector { */ void drawResult(ImageFrame& frame); + void drawContours( cv::Mat& image, vector> contours ); + /** * @brief Detector destructor */ @@ -411,7 +413,9 @@ class FindContoursDetector : public Detector { * @param image The image to be processed * @param features The vector holding detected features */ - void detectFeatures(const cv::Mat &image, vector& features ); + void detectFeatures(const cv::Mat &image, vector& features, vector>& contours ); + + //void detectFeatures(const cv::Mat &image, vector& features, vector>& contours); /** * @brief addControls @@ -424,7 +428,7 @@ class FindContoursDetector : public Detector { * @param result_image The image with contours drawn * @param contours The contours to be drawn */ - void drawContours(cv::Mat& result_image, vector > contours); + //void drawContours(cv::Mat& result_image, vector > contours); /** * @brief FindContoursDetector destructor @@ -480,7 +484,7 @@ class GoodFeaturesToTrackDetector : public Detector { * @param image The image to be processed * @param features The vector holding detected features */ - void detectFeatures( const cv::Mat& image, vector& features ); + void detectFeatures( const cv::Mat& image, vector& features, vector>& contours ); /** * @brief addControls diff --git a/modules/imageproc/src/imageproc.cpp b/modules/imageproc/src/imageproc.cpp index 2cac40b..ac2b257 100644 --- a/modules/imageproc/src/imageproc.cpp +++ b/modules/imageproc/src/imageproc.cpp @@ -10,7 +10,7 @@ namespace lpt { using namespace std; void undistortPoints(const lpt::Camera& camera, lpt::ImageFrame& frame) { - if (frame.particles.size() > 0) { + if (!frame.particles.empty()) { vector image_points(frame.particles.size()); for (int j = 0; j < frame.particles.size(); ++j) { @@ -158,9 +158,11 @@ GaussianBlur::~GaussianBlur() void Detector::drawResult(ImageFrame &frame) { - cv::cvtColor(frame.image, frame.image, CV_GRAY2BGR); + //cv::cvtColor(frame.image, frame.image, CV_GRAY2BGR); for (int i = 0; i < frame.particles.size(); i++) { - cv::circle( frame.image, cv::Point( static_cast(frame.particles[i]->x + 0.5), static_cast(frame.particles[i]->y + 0.5)), 5, cv::Scalar(0, 255, 0), 1, 8, 2); + auto particle = frame.particles[i]; + cv::circle( frame.image, cv::Point( static_cast(particle->x), static_cast(particle->y) ), + static_cast(particle->radius), 200, 1); } } @@ -174,9 +176,9 @@ FindContoursDetector::FindContoursDetector() cout << "Find Contours detector constructed" << endl; } -void FindContoursDetector::detectFeatures(const cv::Mat &image, vector &features) +void FindContoursDetector::detectFeatures(const cv::Mat &image, vector &features, vector>& contours) { - vector > contours; + //vector > contours; cv::findContours(image, contours, params.mode, params.method); //double area; for(int c = 0; c < contours.size(); ++c) { @@ -191,6 +193,22 @@ void FindContoursDetector::detectFeatures(const cv::Mat &image, vector &features, vector>& contours) +//{ +// cv::findContours(image, contours, params.mode, params.method); +// //double area; +// for(int c = 0; c < contours.size(); ++c) { +// //area = cv::contourArea( contours[c] ); +// if( contours[c].size() > (double)params.min_contour_area && contours[c].size() < (double)params.max_contour_area) { +// //if( area > (double)params.min_contour_area && area < (double)params.max_contour_area) { +// cv::Moments mom = cv::moments( cv::Mat(contours[c]) ); +// if( mom.m00 > 0 ) { +// features.push_back( ParticleImage::create(static_cast(features.size()), mom.m10 / mom.m00, mom.m01 / mom.m00) ); +// } +// } +// } +//} + void FindContoursDetector::addControls() { cout << "\t--Adding Find Contours Detector Controls to window: " << endl; @@ -198,9 +216,9 @@ void FindContoursDetector::addControls() cv::createTrackbar("Max Area", string(), ¶ms.max_contour_area, 1000, 0, 0 ); } -void FindContoursDetector::drawContours(cv::Mat &result_image, vector > contours) +void Detector::drawContours(cv::Mat &image, vector > contours) { - cv::drawContours( result_image, contours, -1, cv::Scalar(0), 1 ); + cv::drawContours( image, contours, -1, cv::Scalar(0, 255, 0), 1 ); } FindContoursDetector::~FindContoursDetector() @@ -213,7 +231,7 @@ GoodFeaturesToTrackDetector::GoodFeaturesToTrackDetector() cout << "Good Features To Track (GFTT) detector constructed" << endl; } -void GoodFeaturesToTrackDetector::detectFeatures(const cv::Mat &image, vector &features) +void GoodFeaturesToTrackDetector::detectFeatures(const cv::Mat &image, vector &features, vector>& contours) { //CV_Assert(image.depth() != sizeof(uchar)); if (! image.isContinuous()) @@ -293,7 +311,7 @@ void processImages( Camera& camera, ImageProcessor& processor, Detector& detecto cv::Mat temp_image = camera.frames[i].image.clone(); processor.processImage( temp_image ); cv::imshow("processed image", temp_image ); - detector.detectFeatures( temp_image, camera.frames[i].particles); + detector.detectFeatures( temp_image, camera.frames[i].particles, camera.frames[i].contours); detector.drawResult( camera.frames[i] ); cv::imshow(result_window.str(), camera.frames[i].image); } diff --git a/modules/tracking/include/tracking.hpp b/modules/tracking/include/tracking.hpp index 2c59cbf..a812292 100644 --- a/modules/tracking/include/tracking.hpp +++ b/modules/tracking/include/tracking.hpp @@ -176,6 +176,8 @@ class Tracker { vector trajectory_views; std::shared_ptr shared_objects; bool isSave; + size_t particle_count; + size_t traj_count; }; /** diff --git a/modules/tracking/src/tracking.cpp b/modules/tracking/src/tracking.cpp index 4298ed1..922e272 100644 --- a/modules/tracking/src/tracking.cpp +++ b/modules/tracking/src/tracking.cpp @@ -72,7 +72,7 @@ double CostMinimumAcceleration::calculate(const Trajectory3d &traj, const Partic return lpt::calculateDistance(traj.extrap_object, particle); } -Tracker::Tracker() : clear_drawings(false) +Tracker::Tracker() : clear_drawings(false), particle_count(0), traj_count(10) { cout << "Tracker constructed" << endl; } @@ -80,23 +80,38 @@ Tracker::Tracker() : clear_drawings(false) Tracker::~Tracker() { cout << "Tracker destructed" << endl; + cout << "particles detected: " << particle_count << endl; + cout << "Save trajectories? (1 for yes, 0 for no)" << endl; cin >> isSave; + //isSave = false; if (isSave) { stringstream file_name; file_name << shared_objects->output_path << "Trajectories.txt"; ofstream output(file_name.str()); - output << trajectories.size() << endl; + output << trajectories.size() + active_trajs.size() << endl; output << endl; int id = 0; + size_t link_count = 0; for (auto traj_it = trajectories.begin(); traj_it != trajectories.end(); traj_it++) { + if ( (*traj_it)->objects.size() >= 10 ) { + link_count += (*traj_it)->objects.size(); saveTrajectory(output, *traj_it, id); id++; } } + for (auto traj_it = active_trajs.begin(); traj_it != active_trajs.end(); traj_it++) { + + if ( (*traj_it)->objects.size() >= 10 ) { + link_count == (*traj_it)->objects.size(); + saveTrajectory(output, *traj_it, id); + id++; + } + } cout << id << endl; + cout << "links tracked: " << link_count << endl; } } @@ -108,7 +123,10 @@ void Tracker::saveTrajectory(ofstream& output, Trajectory3d_Ptr traj, int id) for (size_t i=0; iobjects.size(); i++) { output << traj->objects[i].first->frame_index << " "; - for (size_t j=0; jobjects[i].second.size(); j++) { + for (size_t j=0; jobjects[i].first->X.size(); j++) { + output << traj->objects[i].first->X[j] << " "; + } + for (size_t j=traj->objects[i].first->X.size(); jobjects[i].second.size(); j++) { output << traj->objects[i].second[j] << " "; } output << endl; @@ -118,6 +136,7 @@ void Tracker::saveTrajectory(ofstream& output, Trajectory3d_Ptr traj, int id) void Tracker::trackFrames(lpt::Frame3d& frame1, lpt::Frame3d& frame2 ) { + particle_count += frame1.objects.size() + frame2.objects.size(); list next_unmatched_particles; list new_trajs; @@ -127,7 +146,7 @@ void Tracker::trackFrames(lpt::Frame3d& frame1, lpt::Frame3d& frame2 ) { std::move(frame1.objects.begin(), frame1.objects.end(), std::back_inserter(unmatched_particles)); } - + // TODO: Maybe try parallelizing this loop for multi-core system // Extrapolate all active trajectories for (list::iterator traj_iter = active_trajs.begin(); traj_iter != active_trajs.end(); ++traj_iter) @@ -135,12 +154,13 @@ void Tracker::trackFrames(lpt::Frame3d& frame1, lpt::Frame3d& frame2 ) (*traj_iter)->candidates_costs.clear(); lpt::extrapolatePosition( *(*traj_iter) ); } - + // Loop over all particles in frame2 for (int p2 = 0; p2 < frame2.objects.size(); ++p2) { lpt::Particle3d& particle2 = *frame2.objects[p2]; double lowest_cost = std::numeric_limits::max(); lpt::Trajectory3d* traj_match = 0; + // Search among active trajectories for (list::iterator traj_iter = active_trajs.begin(); traj_iter != active_trajs.end(); ++traj_iter) { lpt::Trajectory3d& traj = *(*traj_iter); @@ -154,7 +174,7 @@ void Tracker::trackFrames(lpt::Frame3d& frame1, lpt::Frame3d& frame2 ) traj_match = &traj; } } - } + } // Candidate exists within search radius among active trajectories if ( traj_match ) { @@ -184,16 +204,10 @@ void Tracker::trackFrames(lpt::Frame3d& frame1, lpt::Frame3d& frame2 ) if (this->shared_objects->KF_isOn) { // Create Kalman filter - cv::Mat stateIni(9, 1, CV_64F); - - for (int i=0; i<9; i++) { - if (i < 3) { - stateIni.at(i) = particle1.get()->X[i]; - } - else { - stateIni.at(i) = 0; - } + cv::Mat stateIni = cv::Mat::zeros(9, 1, CV_64F); + for (int i=0; i<3; i++) { + stateIni.at(i) = particle1.get()->X[i]; } newtraj->initializeKF(stateIni, params.KF_sigma_a, params.KF_sigma_z); @@ -284,6 +298,11 @@ void Tracker::trackFrames(lpt::Frame3d& frame1, lpt::Frame3d& frame2 ) } // Add newly created trajectories to the active list std::move( new_trajs.begin(), new_trajs.end(), std::back_inserter(active_trajs) ); + + if (trajectories.size() / traj_count) { + cout << "Tracked " << traj_count << " trajectories" << endl; + traj_count *= 10; + } } void Tracker::addControls() diff --git a/modules/visualization/src/visualization.cpp b/modules/visualization/src/visualization.cpp index c0e1355..6da6454 100644 --- a/modules/visualization/src/visualization.cpp +++ b/modules/visualization/src/visualization.cpp @@ -1321,7 +1321,7 @@ void FiniteVolumeGrid::updateGrid() for(vtkIdType i = 0; i < grid->GetNumberOfCells(); i++) { - size_t total_count = extract_result( acceleration_accumulators[(int)i][0] ); + size_t total_count = extract_result( velocity_accumulators[(int)i][0] ); if ( total_count > 0) arrow_source_ids->SetTuple1(i, 0); vec[0] = extract_result( velocity_accumulators[(int)i][0] ); @@ -1843,6 +1843,7 @@ void FiniteVolumeGrid::savePlaneData() ofstream P_e_out(this->shared_objects->output_path + "P_e_XY.txt"); ofstream P_l_out(this->shared_objects->output_path + "P_l_XY.txt"); ofstream all_data_out(this->shared_objects->output_path + "all_data.txt"); + ofstream axis(this->shared_objects->output_path + "axis.txt"); U_out << header.str(); U_out << "XY Plane Z = \t" << k << endl; @@ -2093,6 +2094,25 @@ void FiniteVolumeGrid::savePlaneData() all_data_out << endl; } all_data_out.close(); + + array ijk; + for (int i=0; iparams.grid_cell_counts[0]; i++) { + double max = std::numeric_limits::lowest(); + for (int j=0; jparams.grid_cell_counts[1]; j++) { + for (int k=0; kparams.grid_cell_counts[2]; k++) { + int cell_id = this->getGridIndex(i,j,k); + if (velocities[cell_id][0] > max) { + ijk[0] = i; + ijk[1] = j; + ijk[2] = k; + max = velocities[cell_id][0]; + } + } + } + axis << ijk[0] << "\t" << ijk[1] << "\t" << ijk[2] << endl; + } + axis.close(); + cout << "Done Saving data" << endl; } @@ -2902,10 +2922,10 @@ void Visualizer::manageQueue() X2 = iter_next->first->X; // next particle, index 2 for (int d = 0; d < X0.size(); ++d) { - object_iter->second.at(d) = X1[d]; + //object_iter->second.at(d) = X1[d]; U1[d] = ( ( X2[d] - X0[d] ) * frame_rate ) / 2.0 / 1000.0; // Velocity object_iter->second.at(d+3) = U1[d] * 1000.0; - A[d] = ( X2[d] - 2.0 * X1[d] + X0[d] ) * frame_rate * frame_rate / 1000.0; // Acceleration + A[d] = ( X2[d] - 2*X1[d] + X0[d] ) * frame_rate * frame_rate / 1000.0; // Acceleration object_iter->second.at(d+6) = A[d] * 1000.0; } @@ -2915,7 +2935,9 @@ void Visualizer::manageQueue() for (int d = 0; d < X0.size(); ++d) { U0[d] = ( X1[d] - Xm1[d] ) * frame_rate / 2.0 / 1000.0; + //iter_previous->second.at(d+3) = U0[d] * 1000.0; U2[d] = ( X3[d] - X1[d] ) * frame_rate / 2.0 / 1000.0; + //iter_next->second.at(d+3) = U2[d] * 1000.0; } } diff --git a/test/testImageprocessor/imageprocessortest.cpp b/test/testImageprocessor/imageprocessortest.cpp index 2669bd3..493cdf8 100644 --- a/test/testImageprocessor/imageprocessortest.cpp +++ b/test/testImageprocessor/imageprocessortest.cpp @@ -79,7 +79,7 @@ int main(int argc, char** argv) for (size_t f = 0; f < true_frames.size(); ++f) { cout << "Frame " << f << ":"; processor->processImage(cameras[c].frames[f].image); - detector->detectFeatures(cameras[c].frames[f].image, cameras[c].frames[f].particles); + detector->detectFeatures(cameras[c].frames[f].image, cameras[c].frames[f].particles, cameras[c].frames[f].contours); lpt::testDetectedParticles(true_frames[f].particles, cameras[c].frames[f].particles); } } diff --git a/test/testTracking/trackingtest.cpp b/test/testTracking/trackingtest.cpp index ddc2d47..2ac961a 100644 --- a/test/testTracking/trackingtest.cpp +++ b/test/testTracking/trackingtest.cpp @@ -13,14 +13,14 @@ int main(int argc, char** argv) { input_file = argv[1]; } else{ - input_file = "../../../data/tests/scaling/256p512f"; + input_file = "../../../data/tests/synthetic.txt"; } if (argc > 2) { output_file = argv[2]; } else{ - output_file = "./output/out0"; + output_file = "../../../data/output/"; } cout << input_file << endl; @@ -44,6 +44,14 @@ int main(int argc, char** argv) { cout << "--Tracking Started:" << endl; lpt::Tracker tracker; + auto shared_objects = lpt::SharedObjects::create(); + shared_objects->frame_rate = 60; + shared_objects->KF_isOn = true; + shared_objects->output_path = output_file; + tracker.setSharedObjects(shared_objects); + tracker.setCostCalculator(lpt::CostMinimumAcceleration::create()); + tracker.params.KF_sigma_a = 1E-5; + tracker.params.KF_sigma_z = 1E-2; tracker.params.alpha = 1.0; //0.8 for pivSTD352 //0.8 for CFD 1000p4096f100fps //1.0 for scaling data sets tracker.params.max_radius = 50.0; @@ -51,9 +59,10 @@ int main(int argc, char** argv) { //--Initialize Timing function start = boost::chrono::system_clock::now(); - - for (int f = 0; f < frames.size() - 1; ++f) + + for (int f = 0; f < frames.size() - 1; ++f) { tracker.trackFrames(*frames[f], *frames[f+1]); + } stop = boost::chrono::system_clock::now();