diff --git a/modules/aruco/include/opencv2/aruco.hpp b/modules/aruco/include/opencv2/aruco.hpp index fac4dee81d4..3213c595089 100644 --- a/modules/aruco/include/opencv2/aruco.hpp +++ b/modules/aruco/include/opencv2/aruco.hpp @@ -1,683 +1,34 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ - +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #ifndef __OPENCV_ARUCO_HPP__ #define __OPENCV_ARUCO_HPP__ -#include -#include -#include -#include "opencv2/aruco/dictionary.hpp" - -/** - * @defgroup aruco ArUco Marker Detection - * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers) - * These markers are useful for easy, fast and robust camera pose estimation.ç - * - * The main functionalities are: - * - Detection of markers in an image - * - Pose estimation from a single marker or from a board/set of markers - * - Detection of ChArUco board for high subpixel accuracy - * - Camera calibration from both, ArUco boards and ChArUco boards. - * - Detection of ChArUco diamond markers - * The samples directory includes easy examples of how to use the module. - * - * The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014. - * - * Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method. - * - * @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014. - * "Automatic generation and detection of highly reliable fiducial markers under occlusion". - * Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005 - * - * @sa http://www.uco.es/investiga/grupos/ava/node/26 - * - * This module has been originally developed by Sergio Garrido-Jurado as a project - * for Google Summer of Code 2015 (GSoC 15). - * - * -*/ +#include "opencv2/aruco_detector.hpp" +#include "opencv2/aruco/aruco_calib_pose.hpp" namespace cv { namespace aruco { -//! @addtogroup aruco -//! @{ - -enum CornerRefineMethod{ - CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach - CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy - CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting - CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros -}; - -/** - * @brief Parameters for the detectMarker process: - * - adaptiveThreshWinSizeMin: minimum window size for adaptive thresholding before finding - * contours (default 3). - * - adaptiveThreshWinSizeMax: maximum window size for adaptive thresholding before finding - * contours (default 23). - * - adaptiveThreshWinSizeStep: increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax - * during the thresholding (default 10). - * - adaptiveThreshConstant: constant for adaptive thresholding before finding contours (default 7) - * - minMarkerPerimeterRate: determine minimum perimeter for marker contour to be detected. This - * is defined as a rate respect to the maximum dimension of the input image (default 0.03). - * - maxMarkerPerimeterRate: determine maximum perimeter for marker contour to be detected. This - * is defined as a rate respect to the maximum dimension of the input image (default 4.0). - * - polygonalApproxAccuracyRate: minimum accuracy during the polygonal approximation process to - * determine which contours are squares. (default 0.03) - * - minCornerDistanceRate: minimum distance between corners for detected markers relative to its - * perimeter (default 0.05) - * - minDistanceToBorder: minimum distance of any corner to the image border for detected markers - * (in pixels) (default 3) - * - minMarkerDistanceRate: minimum mean distance beetween two marker corners to be considered - * similar, so that the smaller one is removed. The rate is relative to the smaller perimeter - * of the two markers (default 0.05). - * - cornerRefinementMethod: corner refinement method. (CORNER_REFINE_NONE, no refinement. - * CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points, - * CORNER_REFINE_APRILTAG use the AprilTag2 approach). (default CORNER_REFINE_NONE) - * - cornerRefinementWinSize: window size for the corner refinement process (in pixels) (default 5). - * - cornerRefinementMaxIterations: maximum number of iterations for stop criteria of the corner - * refinement process (default 30). - * - cornerRefinementMinAccuracy: minimum error for the stop cristeria of the corner refinement - * process (default: 0.1) - * - markerBorderBits: number of bits of the marker border, i.e. marker border width (default 1). - * - perspectiveRemovePixelPerCell: number of bits (per dimension) for each cell of the marker - * when removing the perspective (default 4). - * - perspectiveRemoveIgnoredMarginPerCell: width of the margin of pixels on each cell not - * considered for the determination of the cell bit. Represents the rate respect to the total - * size of the cell, i.e. perspectiveRemovePixelPerCell (default 0.13) - * - maxErroneousBitsInBorderRate: maximum number of accepted erroneous bits in the border (i.e. - * number of allowed white bits in the border). Represented as a rate respect to the total - * number of bits per marker (default 0.35). - * - minOtsuStdDev: minimun standard deviation in pixels values during the decodification step to - * apply Otsu thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher - * than 128 or not) (default 5.0) - * - errorCorrectionRate error correction rate respect to the maximun error correction capability - * for each dictionary. (default 0.6). - * - aprilTagMinClusterPixels: reject quads containing too few pixels. (default 5) - * - aprilTagMaxNmaxima: how many corner candidates to consider when segmenting a group of pixels into a quad. (default 10) - * - aprilTagCriticalRad: Reject quads where pairs of edges have angles that are close to straight or close to - * 180 degrees. Zero means that no quads are rejected. (In radians) (default 10*PI/180) - * - aprilTagMaxLineFitMse: When fitting lines to the contours, what is the maximum mean squared error - * allowed? This is useful in rejecting contours that are far from being quad shaped; rejecting - * these quads "early" saves expensive decoding processing. (default 10.0) - * - aprilTagMinWhiteBlackDiff: When we build our model of black & white pixels, we add an extra check that - * the white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). (default 5) - * - aprilTagDeglitch: should the thresholded image be deglitched? Only useful for very noisy images. (default 0) - * - aprilTagQuadDecimate: Detection of quads can be done on a lower-resolution image, improving speed at a - * cost of pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still - * done at full resolution. (default 0.0) - * - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?) - * Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8). (default 0.0) - * - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just - * invert a normal marker by using a tilde, ~markerImage. (default false) - * - useAruco3Detection: to enable the new and faster Aruco detection strategy. The most important observation from the authors of - * Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) is, that the binary - * code of a marker can be reliably detected if the canonical image (that is used to extract the binary code) - * has a size of minSideLengthCanonicalImg (in practice tau_c=16-32 pixels). - * Link to article: https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers - * In addition, very small markers are barely useful for pose estimation and thus a we can define a minimum marker size that we - * still want to be able to detect (e.g. 50x50 pixel). - * To decouple this from the initial image size they propose to resize the input image - * to (I_w_r, I_h_r) = (tau_c / tau_dot_i) * (I_w, I_h), with tau_dot_i = tau_c + max(I_w,I_h) * tau_i. - * Here tau_i (parameter: minMarkerLengthRatioOriginalImg) is a ratio in the range [0,1]. - * If we set this to 0, the smallest marker we can detect - * has a side length of tau_c. If we set it to 1 the marker would fill the entire image. - * For a FullHD video a good value to start with is 0.1. - * - minSideLengthCanonicalImg: minimum side length of a marker in the canonical image. - * Latter is the binarized image in which contours are searched. - * So all contours with a size smaller than minSideLengthCanonicalImg*minSideLengthCanonicalImg will omitted from the search. - * - minMarkerLengthRatioOriginalImg: range [0,1], eq (2) from paper - * The parameter tau_i has a direct influence on the processing speed. - */ -struct CV_EXPORTS_W DetectorParameters { - - DetectorParameters(); - CV_WRAP static Ptr create(); - CV_WRAP bool readDetectorParameters(const FileNode& fn); - - CV_PROP_RW int adaptiveThreshWinSizeMin; - CV_PROP_RW int adaptiveThreshWinSizeMax; - CV_PROP_RW int adaptiveThreshWinSizeStep; - CV_PROP_RW double adaptiveThreshConstant; - CV_PROP_RW double minMarkerPerimeterRate; - CV_PROP_RW double maxMarkerPerimeterRate; - CV_PROP_RW double polygonalApproxAccuracyRate; - CV_PROP_RW double minCornerDistanceRate; - CV_PROP_RW int minDistanceToBorder; - CV_PROP_RW double minMarkerDistanceRate; - CV_PROP_RW int cornerRefinementMethod; - CV_PROP_RW int cornerRefinementWinSize; - CV_PROP_RW int cornerRefinementMaxIterations; - CV_PROP_RW double cornerRefinementMinAccuracy; - CV_PROP_RW int markerBorderBits; - CV_PROP_RW int perspectiveRemovePixelPerCell; - CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell; - CV_PROP_RW double maxErroneousBitsInBorderRate; - CV_PROP_RW double minOtsuStdDev; - CV_PROP_RW double errorCorrectionRate; - - // April :: User-configurable parameters. - CV_PROP_RW float aprilTagQuadDecimate; - CV_PROP_RW float aprilTagQuadSigma; - - // April :: Internal variables - CV_PROP_RW int aprilTagMinClusterPixels; - CV_PROP_RW int aprilTagMaxNmaxima; - CV_PROP_RW float aprilTagCriticalRad; - CV_PROP_RW float aprilTagMaxLineFitMse; - CV_PROP_RW int aprilTagMinWhiteBlackDiff; - CV_PROP_RW int aprilTagDeglitch; - - // to detect white (inverted) markers - CV_PROP_RW bool detectInvertedMarker; - - // New Aruco functionality proposed in the paper: - // Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) - CV_PROP_RW bool useAruco3Detection; - CV_PROP_RW int minSideLengthCanonicalImg; - CV_PROP_RW float minMarkerLengthRatioOriginalImg; -}; - - /** - * @brief Basic marker detection - * - * @param image input image - * @param dictionary indicates the type of markers that will be searched - * @param corners vector of detected marker corners. For each marker, its four corners - * are provided, (e.g std::vector > ). For N detected markers, - * the dimensions of this array is Nx4. The order of the corners is clockwise. - * @param ids vector of identifiers of the detected markers. The identifier is of type int - * (e.g. std::vector). For N detected markers, the size of ids is also N. - * The identifiers have the same order than the markers in the imgPoints array. - * @param parameters marker detection parameters - * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a - * correct codification. Useful for debugging purposes. - * - * Performs marker detection in the input image. Only markers included in the specific dictionary - * are searched. For each detected marker, it returns the 2D position of its corner in the image - * and its corresponding identifier. - * Note that this function does not perform pose estimation. - * @note The function does not correct lens distortion or takes it into account. It's recommended to undistort - * input image with corresponging camera model, if camera parameters are known - * @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard - * - */ +@deprecated Use class ArucoDetector +*/ CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr &dictionary, OutputArrayOfArrays corners, OutputArray ids, const Ptr ¶meters = DetectorParameters::create(), OutputArrayOfArrays rejectedImgPoints = noArray()); -/** @brief - * rvec/tvec define the right handed coordinate system of the marker. - * PatternPos defines center this system and axes direction. - * Axis X (red color) - first coordinate, axis Y (green color) - second coordinate, - * axis Z (blue color) - third coordinate. - * @sa estimatePoseSingleMarkers(), @ref tutorial_aruco_detection - */ -enum PatternPos { - /** @brief The marker coordinate system is centered on the middle of the marker. - * The coordinates of the four corners (CCW order) of the marker in its own coordinate system are: - * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), - * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0). - * - * These pattern points define this coordinate system: - * ![Image with axes drawn](images/singlemarkersaxes.jpg) - */ - CCW_center, - /** @brief The marker coordinate system is centered on the top-left corner of the marker. - * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: - * (0, 0, 0), (markerLength, 0, 0), - * (markerLength, markerLength, 0), (0, markerLength, 0). - * - * These pattern points define this coordinate system: - * ![Image with axes drawn](images/singlemarkersaxes2.jpg) - */ - CW_top_left_corner -}; - -/** @brief - * Pose estimation parameters - * @param pattern Defines center this system and axes direction (default PatternPos::CCW_center). - * @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided - * rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further - * optimizes them (default false). - * @param solvePnPMethod Method for solving a PnP problem: see @ref calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE). - * @sa PatternPos, solvePnP(), @ref tutorial_aruco_detection - */ -struct CV_EXPORTS_W EstimateParameters { - CV_PROP_RW PatternPos pattern; - CV_PROP_RW bool useExtrinsicGuess; - CV_PROP_RW SolvePnPMethod solvePnPMethod; - - EstimateParameters(): pattern(CCW_center), useExtrinsicGuess(false), - solvePnPMethod(SOLVEPNP_ITERATIVE) {} - - CV_WRAP static Ptr create() { - return makePtr(); - } -}; - - -/** - * @brief Pose estimation for single markers - * - * @param corners vector of already detected markers corners. For each marker, its four corners - * are provided, (e.g std::vector > ). For N detected markers, - * the dimensions of this array should be Nx4. The order of the corners should be clockwise. - * @sa detectMarkers - * @param markerLength the length of the markers' side. The returning translation vectors will - * be in the same unit. Normally, unit is meters. - * @param cameraMatrix input 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ - * @param distCoeffs vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector). - * Each element in rvecs corresponds to the specific marker in imgPoints. - * @param tvecs array of output translation vectors (e.g. std::vector). - * Each element in tvecs corresponds to the specific marker in imgPoints. - * @param _objPoints array of object points of all the marker corners - * @param estimateParameters set the origin of coordinate system and the coordinates of the four corners of the marker - * (default estimateParameters.pattern = PatternPos::CCW_center, estimateParameters.useExtrinsicGuess = false, - * estimateParameters.solvePnPMethod = SOLVEPNP_ITERATIVE). - * - * This function receives the detected markers and returns their pose estimation respect to - * the camera individually. So for each marker, one rotation and translation vector is returned. - * The returned transformation is the one that transforms points from each marker coordinate system - * to the camera coordinate system. - * The marker coordinate system is centered on the middle (by default) or on the top-left corner of the marker, - * with the Z axis perpendicular to the marker plane. - * estimateParameters defines the coordinates of the four corners of the marker in its own coordinate system (by default) are: - * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), - * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0) - * @sa use cv::drawFrameAxes to get world coordinate system axis for object points - * @sa @ref tutorial_aruco_detection - * @sa EstimateParameters - * @sa PatternPos - */ -CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength, - InputArray cameraMatrix, InputArray distCoeffs, - OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray(), - Ptr estimateParameters = EstimateParameters::create()); - - - -/** - * @brief Board of markers - * - * A board is a set of markers in the 3D space with a common coordinate system. - * The common form of a board of marker is a planar (2D) board, however any 3D layout can be used. - * A Board object is composed by: - * - The object points of the marker corners, i.e. their coordinates respect to the board system. - * - The dictionary which indicates the type of markers of the board - * - The identifier of all the markers in the board. - */ -class CV_EXPORTS_W Board { - - public: - /** - * @brief Provide way to create Board by passing necessary data. Specially needed in Python. - * - * @param objPoints array of object points of all the marker corners in the board - * @param dictionary the dictionary of markers employed for this board - * @param ids vector of the identifiers of the markers in the board - * - */ - CV_WRAP static Ptr create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids); - - /** - * @brief Set ids vector - * - * @param ids vector of the identifiers of the markers in the board (should be the same size - * as objPoints) - * - * Recommended way to set ids vector, which will fail if the size of ids does not match size - * of objPoints. - */ - CV_WRAP void setIds(InputArray ids); - - /// array of object points of all the marker corners in the board - /// each marker include its 4 corners in this order: - ///- objPoints[i][0] - left-top point of i-th marker - ///- objPoints[i][1] - right-top point of i-th marker - ///- objPoints[i][2] - right-bottom point of i-th marker - ///- objPoints[i][3] - left-bottom point of i-th marker - /// - /// Markers are placed in a certain order - row by row, left to right in every row. - /// For M markers, the size is Mx4. - CV_PROP std::vector< std::vector< Point3f > > objPoints; - - /// the dictionary of markers employed for this board - CV_PROP Ptr dictionary; - - /// vector of the identifiers of the markers in the board (same size than objPoints) - /// The identifiers refers to the board dictionary - CV_PROP_RW std::vector< int > ids; - - /// coordinate of the bottom right corner of the board, is set when calling the function create() - CV_PROP Point3f rightBottomBorder; -}; - - /** - * @brief Planar board with grid arrangement of markers - * More common type of board. All markers are placed in the same plane in a grid arrangement. - * The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard) - */ -class CV_EXPORTS_W GridBoard : public Board { - - public: - /** - * @brief Draw a GridBoard - * - * @param outSize size of the output image in pixels. - * @param img output image with the board. The size of this image will be outSize - * and the board will be on the center, keeping the board proportions. - * @param marginSize minimum margins (in pixels) of the board in the output image - * @param borderBits width of the marker borders. - * - * This function return the image of the GridBoard, ready to be printed. - */ - CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); - - - /** - * @brief Create a GridBoard object - * - * @param markersX number of markers in X direction - * @param markersY number of markers in Y direction - * @param markerLength marker side length (normally in meters) - * @param markerSeparation separation between two markers (same unit as markerLength) - * @param dictionary dictionary of markers indicating the type of markers - * @param firstMarker id of first marker in dictionary to use on board. - * @return the output GridBoard object - * - * This functions creates a GridBoard object given the number of markers in each direction and - * the marker size and marker separation. - */ - CV_WRAP static Ptr create(int markersX, int markersY, float markerLength, - float markerSeparation, const Ptr &dictionary, int firstMarker = 0); - - /** - * - */ - CV_WRAP Size getGridSize() const { return Size(_markersX, _markersY); } - - /** - * - */ - CV_WRAP float getMarkerLength() const { return _markerLength; } - - /** - * - */ - CV_WRAP float getMarkerSeparation() const { return _markerSeparation; } - - - private: - // number of markers in X and Y directions - int _markersX, _markersY; - - // marker side length (normally in meters) - float _markerLength; - - // separation between markers in the grid - float _markerSeparation; -}; - - - -/** - * @brief Pose estimation for a board of markers - * - * @param corners vector of already detected markers corners. For each marker, its four corners - * are provided, (e.g std::vector > ). For N detected markers, the - * dimensions of this array should be Nx4. The order of the corners should be clockwise. - * @param ids list of identifiers for each marker in corners - * @param board layout of markers in the board. The layout is composed by the marker identifiers - * and the positions of each marker corner in the board reference system. - * @param cameraMatrix input 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ - * @param distCoeffs vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board - * (see cv::Rodrigues). Used as initial guess if not empty. - * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board. - * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not. - * Used as initial guess if not empty. - * - * This function receives the detected markers and returns the pose of a marker board composed - * by those markers. - * A Board of marker has a single world coordinate system which is defined by the board layout. - * The returned transformation is the one that transforms points from the board coordinate system - * to the camera coordinate system. - * Input markers that are not included in the board layout are ignored. - * The function returns the number of markers from the input employed for the board pose estimation. - * Note that returning a 0 means the pose has not been estimated. - * @sa use cv::drawFrameAxes to get world coordinate system axis for object points - */ -CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr &board, - InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec, - InputOutputArray tvec, bool useExtrinsicGuess = false); - - - - -/** - * @brief Refind not detected markers based on the already detected and the board layout - * - * @param image input image - * @param board layout of markers in the board. - * @param detectedCorners vector of already detected marker corners. - * @param detectedIds vector of already detected marker identifiers. - * @param rejectedCorners vector of rejected candidates during the marker detection process. - * @param cameraMatrix optional input 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ - * @param distCoeffs optional vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param minRepDistance minimum distance between the corners of the rejected candidate and the - * reprojected marker in order to consider it as a correspondence. - * @param errorCorrectionRate rate of allowed erroneous bits respect to the error correction - * capability of the used dictionary. -1 ignores the error correction step. - * @param checkAllOrders Consider the four posible corner orders in the rejectedCorners array. - * If it set to false, only the provided corner order is considered (default true). - * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the - * original rejectedCorners array. - * @param parameters marker detection parameters - * - * This function tries to find markers that were not detected in the basic detecMarkers function. - * First, based on the current detected marker and the board layout, the function interpolates - * the position of the missing markers. Then it tries to find correspondence between the reprojected - * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate - * parameters. - * If camera parameters and distortion coefficients are provided, missing markers are reprojected - * using projectPoint function. If not, missing marker projections are interpolated using global - * homography, and all the marker corners in the board must have the same Z coordinate. - */ -CV_EXPORTS_W void refineDetectedMarkers( - InputArray image,const Ptr &board, InputOutputArrayOfArrays detectedCorners, - InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners, - InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(), - float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true, - OutputArray recoveredIdxs = noArray(), const Ptr ¶meters = DetectorParameters::create()); - - - -/** - * @brief Draw detected markers in image - * - * @param image input/output image. It must have 1 or 3 channels. The number of channels is not - * altered. - * @param corners positions of marker corners on input image. - * (e.g std::vector > ). For N detected markers, the dimensions of - * this array should be Nx4. The order of the corners should be clockwise. - * @param ids vector of identifiers for markers in markersCorners . - * Optional, if not provided, ids are not painted. - * @param borderColor color of marker borders. Rest of colors (text color and first corner color) - * are calculated based on this one to improve visualization. - * - * Given an array of detected marker corners and its corresponding ids, this functions draws - * the markers in the image. The marker borders are painted and the markers identifiers if provided. - * Useful for debugging purposes. - * - */ -CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners, - InputArray ids = noArray(), - Scalar borderColor = Scalar(0, 255, 0)); - - - -/** - * @brief Draw a canonical marker image - * - * @param dictionary dictionary of markers indicating the type of markers - * @param id identifier of the marker that will be returned. It has to be a valid id - * in the specified dictionary. - * @param sidePixels size of the image in pixels - * @param img output image with the marker - * @param borderBits width of the marker border. - * - * This function returns a marker image in its canonical form (i.e. ready to be printed) - */ -CV_EXPORTS_W void drawMarker(const Ptr &dictionary, int id, int sidePixels, OutputArray img, - int borderBits = 1); - - - -/** - * @brief Draw a planar board - * @sa _drawPlanarBoardImpl - * - * @param board layout of the board that will be drawn. The board should be planar, - * z coordinate is ignored - * @param outSize size of the output image in pixels. - * @param img output image with the board. The size of this image will be outSize - * and the board will be on the center, keeping the board proportions. - * @param marginSize minimum margins (in pixels) of the board in the output image - * @param borderBits width of the marker borders. - * - * This function return the image of a planar board, ready to be printed. It assumes - * the Board layout specified is planar by ignoring the z coordinates of the object points. - */ -CV_EXPORTS_W void drawPlanarBoard(const Ptr &board, Size outSize, OutputArray img, - int marginSize = 0, int borderBits = 1); - - - -/** - * @brief Implementation of drawPlanarBoard that accepts a raw Board pointer. - */ -void _drawPlanarBoardImpl(Board *board, Size outSize, OutputArray img, - int marginSize = 0, int borderBits = 1); - - - -/** - * @brief Calibrate a camera using aruco markers - * - * @param corners vector of detected marker corners in all frames. - * The corners should have the same format returned by detectMarkers (see #detectMarkers). - * @param ids list of identifiers for each marker in corners - * @param counter number of markers in each frame so that corners and ids can be split - * @param board Marker Board layout - * @param imageSize Size of the image used only to initialize the intrinsic camera matrix. - * @param cameraMatrix Output 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS - * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be - * initialized before calling the function. - * @param distCoeffs Output vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view - * (e.g. std::vector>). That is, each k-th rotation vector together with the corresponding - * k-th translation vector (see the next output parameter description) brings the board pattern - * from the model coordinate space (in which object points are specified) to the world coordinate - * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1). - * @param tvecs Output vector of translation vectors estimated for each pattern view. - * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. - * Order of deviations values: - * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, - * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. - * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. - * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views, - * \f$R_i, T_i\f$ are concatenated 1x3 vectors. - * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view. - * @param flags flags Different flags for the calibration process (see #calibrateCamera for details). - * @param criteria Termination criteria for the iterative optimization algorithm. - * - * This function calibrates a camera using an Aruco Board. The function receives a list of - * detected markers from several views of the Board. The process is similar to the chessboard - * calibration in calibrateCamera(). The function returns the final re-projection error. - */ -CV_EXPORTS_AS(calibrateCameraArucoExtended) double calibrateCameraAruco( - InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr &board, - Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, - OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, - OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, - OutputArray perViewErrors, int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); - - -/** @brief It's the same function as #calibrateCameraAruco but without calibration error estimation. - */ -CV_EXPORTS_W double calibrateCameraAruco( - InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr &board, - Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, - OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); - - -/** - * @brief Given a board configuration and a set of detected markers, returns the corresponding - * image points and object points to call solvePnP - * - * @param board Marker board layout. - * @param detectedCorners List of detected marker corners of the board. - * @param detectedIds List of identifiers for each marker. - * @param objPoints Vector of vectors of board marker points in the board coordinate space. - * @param imgPoints Vector of vectors of the projections of board marker corner points. +@deprecated Use class ArucoDetector */ -CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, - InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints); - +CV_EXPORTS_W void refineDetectedMarkers(InputArray image,const Ptr &board, + InputOutputArrayOfArrays detectedCorners, + InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners, + InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(), + float minRepDistance = 10.f, float errorCorrectionRate = 3.f, + bool checkAllOrders = true, OutputArray recoveredIdxs = noArray(), + const Ptr ¶meters = DetectorParameters::create()); -//! @} } } diff --git a/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp new file mode 100644 index 00000000000..7f6624ccde4 --- /dev/null +++ b/modules/aruco/include/opencv2/aruco/aruco_calib_pose.hpp @@ -0,0 +1,286 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html +#ifndef __OPENCV_ARUCO_CALIB_POSE_HPP__ +#define __OPENCV_ARUCO_CALIB_POSE_HPP__ +#include +#include + +namespace cv { +namespace aruco { + +//! @addtogroup aruco +//! @{ + +/** @brief rvec/tvec define the right handed coordinate system of the marker. + * PatternPos defines center this system and axes direction. + * Axis X (red color) - first coordinate, axis Y (green color) - second coordinate, + * axis Z (blue color) - third coordinate. + * @sa estimatePoseSingleMarkers(), @ref tutorial_aruco_detection + */ +enum PatternPos { + /** @brief The marker coordinate system is centered on the middle of the marker. + * The coordinates of the four corners (CCW order) of the marker in its own coordinate system are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes.jpg) + */ + ARUCO_CCW_CENTER, + /** @brief The marker coordinate system is centered on the top-left corner of the marker. + * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: + * (0, 0, 0), (markerLength, 0, 0), + * (markerLength, markerLength, 0), (0, markerLength, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes2.jpg) + * + * These pattern dots are convenient to use with a chessboard/ChArUco board. + */ + ARUCO_CW_TOP_LEFT_CORNER +}; + +/** @brief Pose estimation parameters + * @param pattern Defines center this system and axes direction (default PatternPos::ARUCO_CCW_CENTER). + * @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided + * rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further + * optimizes them (default false). + * @param solvePnPMethod Method for solving a PnP problem: see @ref calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE). + * @sa PatternPos, solvePnP(), @ref tutorial_aruco_detection + */ +struct CV_EXPORTS_W EstimateParameters { + CV_PROP_RW PatternPos pattern; + CV_PROP_RW bool useExtrinsicGuess; + CV_PROP_RW SolvePnPMethod solvePnPMethod; + + EstimateParameters(): pattern(ARUCO_CCW_CENTER), useExtrinsicGuess(false), + solvePnPMethod(SOLVEPNP_ITERATIVE) {} + + CV_WRAP static Ptr create() { + return makePtr(); + } +}; + + +/** + * @brief Pose estimation for single markers + * + * @param corners vector of already detected markers corners. For each marker, its four corners + * are provided, (e.g std::vector > ). For N detected markers, + * the dimensions of this array should be Nx4. The order of the corners should be clockwise. + * @sa detectMarkers + * @param markerLength the length of the markers' side. The returning translation vectors will + * be in the same unit. Normally, unit is meters. + * @param cameraMatrix input 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ + * @param distCoeffs vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector). + * Each element in rvecs corresponds to the specific marker in imgPoints. + * @param tvecs array of output translation vectors (e.g. std::vector). + * Each element in tvecs corresponds to the specific marker in imgPoints. + * @param objPoints array of object points of all the marker corners + * @param estimateParameters set the origin of coordinate system and the coordinates of the four corners of the marker + * (default estimateParameters.pattern = PatternPos::ARUCO_CCW_CENTER, estimateParameters.useExtrinsicGuess = false, + * estimateParameters.solvePnPMethod = SOLVEPNP_ITERATIVE). + * + * This function receives the detected markers and returns their pose estimation respect to + * the camera individually. So for each marker, one rotation and translation vector is returned. + * The returned transformation is the one that transforms points from each marker coordinate system + * to the camera coordinate system. + * The marker coordinate system is centered on the middle (by default) or on the top-left corner of the marker, + * with the Z axis perpendicular to the marker plane. + * estimateParameters defines the coordinates of the four corners of the marker in its own coordinate system (by default) are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0) + * @sa use cv::drawFrameAxes to get world coordinate system axis for object points + * @sa @ref tutorial_aruco_detection + * @sa EstimateParameters + * @sa PatternPos + */ +CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray rvecs, OutputArray tvecs, OutputArray objPoints = noArray(), + const Ptr& estimateParameters = EstimateParameters::create()); + +/** + * @brief Pose estimation for a board of markers + * + * @param corners vector of already detected markers corners. For each marker, its four corners + * are provided, (e.g std::vector > ). For N detected markers, the + * dimensions of this array should be Nx4. The order of the corners should be clockwise. + * @param ids list of identifiers for each marker in corners + * @param board layout of markers in the board. The layout is composed by the marker identifiers + * and the positions of each marker corner in the board reference system. + * @param cameraMatrix input 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ + * @param distCoeffs vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board + * (see cv::Rodrigues). Used as initial guess if not empty. + * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board. + * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not. + * Used as initial guess if not empty. + * + * This function receives the detected markers and returns the pose of a marker board composed + * by those markers. + * A Board of marker has a single world coordinate system which is defined by the board layout. + * The returned transformation is the one that transforms points from the board coordinate system + * to the camera coordinate system. + * Input markers that are not included in the board layout are ignored. + * The function returns the number of markers from the input employed for the board pose estimation. + * Note that returning a 0 means the pose has not been estimated. + * @sa use cv::drawFrameAxes to get world coordinate system axis for object points + */ +CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr &board, + InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec, + InputOutputArray tvec, bool useExtrinsicGuess = false); + +/** + * @brief Given a board configuration and a set of detected markers, returns the corresponding + * image points and object points to call solvePnP + * + * @param board Marker board layout. + * @param detectedCorners List of detected marker corners of the board. + * @param detectedIds List of identifiers for each marker. + * @param objPoints Vector of vectors of board marker points in the board coordinate space. + * @param imgPoints Vector of vectors of the projections of board marker corner points. +*/ +CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, + InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints); + +/** + * @brief Calibrate a camera using aruco markers + * + * @param corners vector of detected marker corners in all frames. + * The corners should have the same format returned by detectMarkers (see #detectMarkers). + * @param ids list of identifiers for each marker in corners + * @param counter number of markers in each frame so that corners and ids can be split + * @param board Marker Board layout + * @param imageSize Size of the image used only to initialize the intrinsic camera matrix. + * @param cameraMatrix Output 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS + * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be + * initialized before calling the function. + * @param distCoeffs Output vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view + * (e.g. std::vector>). That is, each k-th rotation vector together with the corresponding + * k-th translation vector (see the next output parameter description) brings the board pattern + * from the model coordinate space (in which object points are specified) to the world coordinate + * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern view. + * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. + * Order of deviations values: + * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, + * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. + * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. + * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views, + * \f$R_i, T_i\f$ are concatenated 1x3 vectors. + * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view. + * @param flags flags Different flags for the calibration process (see #calibrateCamera for details). + * @param criteria Termination criteria for the iterative optimization algorithm. + * + * This function calibrates a camera using an Aruco Board. The function receives a list of + * detected markers from several views of the Board. The process is similar to the chessboard + * calibration in calibrateCamera(). The function returns the final re-projection error. + */ +CV_EXPORTS_AS(calibrateCameraArucoExtended) +double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr &board, + Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray stdDeviationsIntrinsics, + OutputArray stdDeviationsExtrinsics, OutputArray perViewErrors, int flags = 0, + const TermCriteria& criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); + +/** @overload + * @brief It's the same function as #calibrateCameraAruco but without calibration error estimation. + */ +CV_EXPORTS_W double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter, + const Ptr &board, Size imageSize, InputOutputArray cameraMatrix, + InputOutputArray distCoeffs, OutputArrayOfArrays rvecs = noArray(), + OutputArrayOfArrays tvecs = noArray(), int flags = 0, + const TermCriteria& criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, + 30, DBL_EPSILON)); + +/** + * @brief Pose estimation for a ChArUco board given some of their corners + * @param charucoCorners vector of detected charuco corners + * @param charucoIds list of identifiers for each corner in charucoCorners + * @param board layout of ChArUco board. + * @param cameraMatrix input 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ + * @param distCoeffs vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board + * (see cv::Rodrigues). + * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board. + * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not. + * + * This function estimates a Charuco board pose from some detected corners. + * The function checks if the input corners are enough and valid to perform pose estimation. + * If pose estimation is valid, returns true, else returns false. + * @sa use cv::drawFrameAxes to get world coordinate system axis for object points + */ +CV_EXPORTS_W bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds, + const Ptr &board, InputArray cameraMatrix, + InputArray distCoeffs, InputOutputArray rvec, + InputOutputArray tvec, bool useExtrinsicGuess = false); + +/** + * @brief Calibrate a camera using Charuco corners + * + * @param charucoCorners vector of detected charuco corners per frame + * @param charucoIds list of identifiers for each corner in charucoCorners per frame + * @param board Marker Board layout + * @param imageSize input image size + * @param cameraMatrix Output 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS + * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be + * initialized before calling the function. + * @param distCoeffs Output vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view + * (e.g. std::vector>). That is, each k-th rotation vector together with the corresponding + * k-th translation vector (see the next output parameter description) brings the board pattern + * from the model coordinate space (in which object points are specified) to the world coordinate + * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern view. + * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. + * Order of deviations values: + * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, + * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. + * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. + * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views, + * \f$R_i, T_i\f$ are concatenated 1x3 vectors. + * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view. + * @param flags flags Different flags for the calibration process (see #calibrateCamera for details). + * @param criteria Termination criteria for the iterative optimization algorithm. + * + * This function calibrates a camera using a set of corners of a Charuco Board. The function + * receives a list of detected corners and its identifiers from several views of the Board. + * The function returns the final re-projection error. + */ +CV_EXPORTS_AS(calibrateCameraCharucoExtended) +double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, + const Ptr &board, Size imageSize, InputOutputArray cameraMatrix, + InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, + OutputArray perViewErrors, int flags = 0, const TermCriteria& criteria = TermCriteria( + TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); + +/** + * @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation. + */ +CV_EXPORTS_W double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, + const Ptr &board, Size imageSize, + InputOutputArray cameraMatrix, InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs = noArray(), + OutputArrayOfArrays tvecs = noArray(), int flags = 0, + const TermCriteria& criteria=TermCriteria(TermCriteria::COUNT + + TermCriteria::EPS, 30, DBL_EPSILON)); +//! @} + +} +} +#endif diff --git a/modules/aruco/include/opencv2/aruco/board.hpp b/modules/aruco/include/opencv2/aruco/board.hpp new file mode 100644 index 00000000000..85c8369e91f --- /dev/null +++ b/modules/aruco/include/opencv2/aruco/board.hpp @@ -0,0 +1,243 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html +#ifndef __OPENCV_ARUCO_BOARD_HPP__ +#define __OPENCV_ARUCO_BOARD_HPP__ + +#include +#include + +namespace cv { +namespace aruco { +//! @addtogroup aruco +//! @{ + +class Dictionary; + +/** + * @brief Board of markers + * + * A board is a set of markers in the 3D space with a common coordinate system. + * The common form of a board of marker is a planar (2D) board, however any 3D layout can be used. + * A Board object is composed by: + * - The object points of the marker corners, i.e. their coordinates respect to the board system. + * - The dictionary which indicates the type of markers of the board + * - The identifier of all the markers in the board. + */ +class CV_EXPORTS_W Board { +public: + CV_WRAP Board(); + + /** @brief Provide way to create Board by passing necessary data. Specially needed in Python. + * @param objPoints array of object points of all the marker corners in the board + * @param dictionary the dictionary of markers employed for this board + * @param ids vector of the identifiers of the markers in the board + */ + CV_WRAP static Ptr create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids); + + /** @brief Set ids vector + * @param ids vector of the identifiers of the markers in the board (should be the same size + * as objPoints) + * + * Recommended way to set ids vector, which will fail if the size of ids does not match size + * of objPoints. + */ + CV_WRAP void setIds(InputArray ids); + + /** @brief change id for ids[index] + * @param index - element index in ids + * @param newId - new value for ids[index], should be less than Dictionary size + */ + CV_WRAP void changeId(int index, int newId); + /** @brief return ids + */ + CV_WRAP const std::vector& getIds() const; + + /** @brief set dictionary + */ + CV_WRAP void setDictionary(const Ptr &dictionary); + + /** @brief return dictionary + */ + CV_WRAP Ptr getDictionary() const; + + /** @brief set objPoints + */ + CV_WRAP void setObjPoints(const std::vector > &objPoints); + + /** @brief get objPoints + */ + CV_WRAP const std::vector >& getObjPoints() const; + + /** @brief get rightBottomBorder + */ + CV_WRAP const Point3f& getRightBottomBorder() const; + +protected: + /** @brief array of object points of all the marker corners in the board each marker include its 4 corners in this order: + * - objPoints[i][0] - left-top point of i-th marker + * - objPoints[i][1] - right-top point of i-th marker + * - objPoints[i][2] - right-bottom point of i-th marker + * - objPoints[i][3] - left-bottom point of i-th marker + * + * Markers are placed in a certain order - row by row, left to right in every row. + * For M markers, the size is Mx4. + */ + CV_PROP std::vector > objPoints; + + /// the dictionary of markers employed for this board + CV_PROP Ptr dictionary; + + /// coordinate of the bottom right corner of the board, is set when calling the function create() + CV_PROP Point3f rightBottomBorder; + + /** @brief vector of the identifiers of the markers in the board (same size than objPoints) + * The identifiers refers to the board dictionary + */ + CV_PROP_RW std::vector ids; +}; + +/** + * @brief Draw a planar board + * @sa drawPlanarBoard + * + * @param board layout of the board that will be drawn. The board should be planar, + * z coordinate is ignored + * @param outSize size of the output image in pixels. + * @param img output image with the board. The size of this image will be outSize + * and the board will be on the center, keeping the board proportions. + * @param marginSize minimum margins (in pixels) of the board in the output image + * @param borderBits width of the marker borders. + * + * This function return the image of a planar board, ready to be printed. It assumes + * the Board layout specified is planar by ignoring the z coordinates of the object points. + */ +CV_EXPORTS_W void drawPlanarBoard(const Ptr &board, Size outSize, OutputArray img, + int marginSize = 0, int borderBits = 1); + +/** + * @brief Planar board with grid arrangement of markers + * More common type of board. All markers are placed in the same plane in a grid arrangement. + * The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard) + */ + +class CV_EXPORTS_W GridBoard : public Board { +public: + CV_WRAP GridBoard(); + /** + * @brief Draw a GridBoard + * + * @param outSize size of the output image in pixels. + * @param img output image with the board. The size of this image will be outSize + * and the board will be on the center, keeping the board proportions. + * @param marginSize minimum margins (in pixels) of the board in the output image + * @param borderBits width of the marker borders. + * + * This function return the image of the GridBoard, ready to be printed. + */ + CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); + + /** + * @brief Create a GridBoard object + * + * @param markersX number of markers in X direction + * @param markersY number of markers in Y direction + * @param markerLength marker side length (normally in meters) + * @param markerSeparation separation between two markers (same unit as markerLength) + * @param dictionary dictionary of markers indicating the type of markers + * @param firstMarker id of first marker in dictionary to use on board. + * @return the output GridBoard object + * + * This functions creates a GridBoard object given the number of markers in each direction and + * the marker size and marker separation. + */ + CV_WRAP static Ptr create(int markersX, int markersY, float markerLength, float markerSeparation, + const Ptr &dictionary, int firstMarker = 0); + + CV_WRAP Size getGridSize() const; + CV_WRAP float getMarkerLength() const; + CV_WRAP float getMarkerSeparation() const; + +protected: + struct GridImpl; + Ptr gridImpl; + friend class CharucoBoard; +}; + +/** + * @brief ChArUco board + * Specific class for ChArUco boards. A ChArUco board is a planar board where the markers are placed + * inside the white squares of a chessboard. The benefits of ChArUco boards is that they provide + * both, ArUco markers versatility and chessboard corner precision, which is important for + * calibration and pose estimation. + * This class also allows the easy creation and drawing of ChArUco boards. + */ +class CV_EXPORTS_W CharucoBoard : public Board { +public: + CV_WRAP CharucoBoard(); + + // vector of chessboard 3D corners precalculated + CV_PROP std::vector chessboardCorners; + + // for each charuco corner, nearest marker id and nearest marker corner id of each marker + CV_PROP std::vector > nearestMarkerIdx; + CV_PROP std::vector > nearestMarkerCorners; + + /** @brief Draw a ChArUco board + * + * @param outSize size of the output image in pixels. + * @param img output image with the board. The size of this image will be outSize + * and the board will be on the center, keeping the board proportions. + * @param marginSize minimum margins (in pixels) of the board in the output image + * @param borderBits width of the marker borders. + * + * This function return the image of the ChArUco board, ready to be printed. + */ + CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); + + + /** @brief Create a CharucoBoard object + * @param squaresX number of chessboard squares in X direction + * @param squaresY number of chessboard squares in Y direction + * @param squareLength chessboard square side length (normally in meters) + * @param markerLength marker side length (same unit than squareLength) + * @param dictionary dictionary of markers indicating the type of markers. + * The first markers in the dictionary are used to fill the white chessboard squares. + * @return the output CharucoBoard object + * + * This functions creates a CharucoBoard object given the number of squares in each direction + * and the size of the markers and chessboard squares. + */ + CV_WRAP static Ptr create(int squaresX, int squaresY, float squareLength, + float markerLength, const Ptr &dictionary); + + CV_WRAP Size getChessboardSize() const; + CV_WRAP float getSquareLength() const; + CV_WRAP float getMarkerLength() const; + +protected: + struct CharucoImpl; + Ptr charucoImpl; +}; + +/** + * @brief test whether the ChArUco markers are collinear + * + * @param board layout of ChArUco board. + * @param charucoIds list of identifiers for each corner in charucoCorners per frame. + * @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not. + * solvePnP, calibration functions will fail if the corners are collinear (true). + * + * The number of ids in charucoIDs should be <= the number of chessboard corners in the board. + * This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false). + * Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases: + * for number of charucoIDs <= 2,the function returns true. + */ +CV_EXPORTS_W bool testCharucoCornersCollinear(const Ptr &board, InputArray charucoIds); + +//! @} + +} +} + +#endif diff --git a/modules/aruco/include/opencv2/aruco/charuco.hpp b/modules/aruco/include/opencv2/aruco/charuco.hpp index 2417b2fd895..d961c7ef00e 100644 --- a/modules/aruco/include/opencv2/aruco/charuco.hpp +++ b/modules/aruco/include/opencv2/aruco/charuco.hpp @@ -1,47 +1,14 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ - +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #ifndef __OPENCV_CHARUCO_HPP__ #define __OPENCV_CHARUCO_HPP__ #include #include #include +#include +#include namespace cv { @@ -50,87 +17,6 @@ namespace aruco { //! @addtogroup aruco //! @{ - -/** - * @brief ChArUco board - * Specific class for ChArUco boards. A ChArUco board is a planar board where the markers are placed - * inside the white squares of a chessboard. The benefits of ChArUco boards is that they provide - * both, ArUco markers versatility and chessboard corner precision, which is important for - * calibration and pose estimation. - * This class also allows the easy creation and drawing of ChArUco boards. - */ -class CV_EXPORTS_W CharucoBoard : public Board { - - public: - // vector of chessboard 3D corners precalculated - CV_PROP std::vector< Point3f > chessboardCorners; - - // for each charuco corner, nearest marker id and nearest marker corner id of each marker - CV_PROP std::vector< std::vector< int > > nearestMarkerIdx; - CV_PROP std::vector< std::vector< int > > nearestMarkerCorners; - - /** - * @brief Draw a ChArUco board - * - * @param outSize size of the output image in pixels. - * @param img output image with the board. The size of this image will be outSize - * and the board will be on the center, keeping the board proportions. - * @param marginSize minimum margins (in pixels) of the board in the output image - * @param borderBits width of the marker borders. - * - * This function return the image of the ChArUco board, ready to be printed. - */ - CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1); - - - /** - * @brief Create a CharucoBoard object - * - * @param squaresX number of chessboard squares in X direction - * @param squaresY number of chessboard squares in Y direction - * @param squareLength chessboard square side length (normally in meters) - * @param markerLength marker side length (same unit than squareLength) - * @param dictionary dictionary of markers indicating the type of markers. - * The first markers in the dictionary are used to fill the white chessboard squares. - * @return the output CharucoBoard object - * - * This functions creates a CharucoBoard object given the number of squares in each direction - * and the size of the markers and chessboard squares. - */ - CV_WRAP static Ptr create(int squaresX, int squaresY, float squareLength, - float markerLength, const Ptr &dictionary); - - /** - * - */ - CV_WRAP Size getChessboardSize() const { return Size(_squaresX, _squaresY); } - - /** - * - */ - CV_WRAP float getSquareLength() const { return _squareLength; } - - /** - * - */ - CV_WRAP float getMarkerLength() const { return _markerLength; } - - private: - void _getNearestMarkerCorners(); - - // number of markers in X and Y directions - int _squaresX, _squaresY; - - // size of chessboard squares side (normally in meters) - float _squareLength; - - // marker side length (normally in meters) - float _markerLength; -}; - - - - /** * @brief Interpolate position of ChArUco board corners * @param markerCorners vector of already detected markers corners. For each marker, its four @@ -161,36 +47,6 @@ CV_EXPORTS_W int interpolateCornersCharuco(InputArrayOfArrays markerCorners, Inp InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(), int minMarkers = 2); - - - -/** - * @brief Pose estimation for a ChArUco board given some of their corners - * @param charucoCorners vector of detected charuco corners - * @param charucoIds list of identifiers for each corner in charucoCorners - * @param board layout of ChArUco board. - * @param cameraMatrix input 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ - * @param distCoeffs vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board - * (see cv::Rodrigues). - * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board. - * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not. - * - * This function estimates a Charuco board pose from some detected corners. - * The function checks if the input corners are enough and valid to perform pose estimation. - * If pose estimation is valid, returns true, else returns false. - * @sa use cv::drawFrameAxes to get world coordinate system axis for object points - */ -CV_EXPORTS_W bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds, - const Ptr &board, InputArray cameraMatrix, - InputArray distCoeffs, InputOutputArray rvec, - InputOutputArray tvec, bool useExtrinsicGuess = false); - - - - /** * @brief Draws a set of Charuco corners * @param image input/output image. It must have 1 or 3 channels. The number of channels is not @@ -206,60 +62,6 @@ CV_EXPORTS_W void drawDetectedCornersCharuco(InputOutputArray image, InputArray InputArray charucoIds = noArray(), Scalar cornerColor = Scalar(255, 0, 0)); - - -/** - * @brief Calibrate a camera using Charuco corners - * - * @param charucoCorners vector of detected charuco corners per frame - * @param charucoIds list of identifiers for each corner in charucoCorners per frame - * @param board Marker Board layout - * @param imageSize input image size - * @param cameraMatrix Output 3x3 floating-point camera matrix - * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS - * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be - * initialized before calling the function. - * @param distCoeffs Output vector of distortion coefficients - * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements - * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view - * (e.g. std::vector>). That is, each k-th rotation vector together with the corresponding - * k-th translation vector (see the next output parameter description) brings the board pattern - * from the model coordinate space (in which object points are specified) to the world coordinate - * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1). - * @param tvecs Output vector of translation vectors estimated for each pattern view. - * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. - * Order of deviations values: - * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, - * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. - * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. - * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views, - * \f$R_i, T_i\f$ are concatenated 1x3 vectors. - * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view. - * @param flags flags Different flags for the calibration process (see #calibrateCamera for details). - * @param criteria Termination criteria for the iterative optimization algorithm. - * - * This function calibrates a camera using a set of corners of a Charuco Board. The function - * receives a list of detected corners and its identifiers from several views of the Board. - * The function returns the final re-projection error. - */ -CV_EXPORTS_AS(calibrateCameraCharucoExtended) double calibrateCameraCharuco( - InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const Ptr &board, - Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, - OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, - OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, - OutputArray perViewErrors, int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); - -/** @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation. -*/ -CV_EXPORTS_W double calibrateCameraCharuco( - InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds, const Ptr &board, - Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, - OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0, - TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); - - - /** * @brief Detect ChArUco Diamond markers * @@ -335,20 +137,6 @@ CV_EXPORTS_W void drawCharucoDiamond(const Ptr &dictionary, Vec4i id int markerLength, OutputArray img, int marginSize = 0, int borderBits = 1); - -/** - * @brief test whether the ChArUco markers are collinear - * - * @param _board layout of ChArUco board. - * @param _charucoIds list of identifiers for each corner in charucoCorners per frame. - * @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not. - solvePnP, calibration functions will fail if the corners are collinear (true). - * - * The number of ids in charucoIDs should be <= the number of chessboard corners in the board. This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false). Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases: for number of charucoIDs <= 2, the function returns true. - */ -CV_EXPORTS_W bool testCharucoCornersCollinear(const Ptr &_board, - InputArray _charucoIds); - //! @} } } diff --git a/modules/aruco/include/opencv2/aruco/dictionary.hpp b/modules/aruco/include/opencv2/aruco/dictionary.hpp index ca9d0b4a0fa..cc692cf4890 100644 --- a/modules/aruco/include/opencv2/aruco/dictionary.hpp +++ b/modules/aruco/include/opencv2/aruco/dictionary.hpp @@ -1,41 +1,6 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ - +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #ifndef __OPENCV_DICTIONARY_HPP__ #define __OPENCV_DICTIONARY_HPP__ @@ -66,28 +31,22 @@ class CV_EXPORTS_W Dictionary { CV_PROP_RW int maxCorrectionBits; // maximum number of bits that can be corrected - /** - */ Dictionary(const Mat &_bytesList = Mat(), int _markerSize = 0, int _maxcorr = 0); /** - Dictionary(const Dictionary &_dictionary); + * Dictionary(const Dictionary &_dictionary); */ + Dictionary(const Ptr &dictionary); - /** - */ - Dictionary(const Ptr &_dictionary); - - - /** + /** @brief returns generateCustomDictionary(nMarkers, markerSize, randomSeed) * @see generateCustomDictionary */ CV_WRAP_AS(create) static Ptr create(int nMarkers, int markerSize, int randomSeed=0); - /** + /** @brief returns generateCustomDictionary(nMarkers, markerSize, baseDictionary, randomSeed) * @see generateCustomDictionary */ CV_WRAP_AS(create_from) static Ptr create(int nMarkers, int markerSize, @@ -108,6 +67,7 @@ class CV_EXPORTS_W Dictionary { * @brief Write a dictionary to FileStorage. Format is the same as in readDictionary(). */ CV_WRAP void writeDictionary(Ptr& fs); + /** * @see getPredefinedDictionary */ diff --git a/modules/aruco/include/opencv2/aruco_detector.hpp b/modules/aruco/include/opencv2/aruco_detector.hpp new file mode 100644 index 00000000000..f3342ebe76d --- /dev/null +++ b/modules/aruco/include/opencv2/aruco_detector.hpp @@ -0,0 +1,436 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html +#ifndef __OPENCV_ARUCO_DETECTOR_HPP__ +#define __OPENCV_ARUCO_DETECTOR_HPP__ +#include +#include + +/** + * @defgroup aruco ArUco Marker Detection + * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers) + * These markers are useful for easy, fast and robust camera pose estimation. + * + * The main functionality of ArucoDetector class is: + * - Detection of markers in an image + * + * There are even more functionalities implemented in charuco.hpp and aruco_calib_pose.hpp: + * - Pose estimation from a single marker or from a board/set of markers + * - Detection of ChArUco board for high subpixel accuracy + * - Camera calibration from both, ArUco boards and ChArUco boards. + * - Detection of ChArUco diamond markers + * The samples directory includes easy examples of how to use the module. + * + * The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014. + * + * Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method. + * + * @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014. + * "Automatic generation and detection of highly reliable fiducial markers under occlusion". + * Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005 + * + * @sa http://www.uco.es/investiga/grupos/ava/node/26 + * + * This module has been originally developed by Sergio Garrido-Jurado as a project + * for Google Summer of Code 2015 (GSoC 15). + * + * +*/ + +namespace cv { +namespace aruco { + +//! @addtogroup aruco +//! @{ + +enum CornerRefineMethod{ + CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach + CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy + CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting + CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros +}; + +/** + * @brief struct DetectorParameters is used by ArucoDetector + */ +struct CV_EXPORTS_W DetectorParameters { + DetectorParameters() { + adaptiveThreshWinSizeMin = 3; + adaptiveThreshWinSizeMax = 23; + adaptiveThreshWinSizeStep = 10; + adaptiveThreshConstant = 7; + minMarkerPerimeterRate = 0.03; + maxMarkerPerimeterRate = 4.; + polygonalApproxAccuracyRate = 0.03; + minCornerDistanceRate = 0.05; + minDistanceToBorder = 3; + minMarkerDistanceRate = 0.05; + cornerRefinementMethod = CORNER_REFINE_NONE; + cornerRefinementWinSize = 5; + cornerRefinementMaxIterations = 30; + cornerRefinementMinAccuracy = 0.1; + markerBorderBits = 1; + perspectiveRemovePixelPerCell = 4; + perspectiveRemoveIgnoredMarginPerCell = 0.13; + maxErroneousBitsInBorderRate = 0.35; + minOtsuStdDev = 5.0; + errorCorrectionRate = 0.6; + aprilTagQuadDecimate = 0.0; + aprilTagQuadSigma = 0.0; + aprilTagMinClusterPixels = 5; + aprilTagMaxNmaxima = 10; + aprilTagCriticalRad = (float)(10* CV_PI /180); + aprilTagMaxLineFitMse = 10.0; + aprilTagMinWhiteBlackDiff = 5; + aprilTagDeglitch = 0; + detectInvertedMarker = false; + useAruco3Detection = false; + minSideLengthCanonicalImg = 32; + minMarkerLengthRatioOriginalImg = 0.0; + }; + + /** @brief Create a new set of DetectorParameters with default values. + */ + CV_WRAP static Ptr create() { + Ptr params = makePtr(); + return params; + } + + /** + * @brief Read a new set of DetectorParameters from FileNode (use FileStorage.root()). + */ + CV_WRAP bool readDetectorParameters(const FileNode& fn); + + /** + * @brief Write a set of DetectorParameters to FileStorage + */ + CV_WRAP bool writeDetectorParameters(const Ptr& fs); + + /// minimum window size for adaptive thresholding before finding contours (default 3). + CV_PROP_RW int adaptiveThreshWinSizeMin; + + /// maximum window size for adaptive thresholding before finding contours (default 23). + CV_PROP_RW int adaptiveThreshWinSizeMax; + + /// increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax during the thresholding (default 10). + CV_PROP_RW int adaptiveThreshWinSizeStep; + + /// constant for adaptive thresholding before finding contours (default 7) + CV_PROP_RW double adaptiveThreshConstant; + + /** @brief determine minimum perimeter for marker contour to be detected. This is defined as a rate respect to the + * maximum dimension of the input image (default 0.03). + */ + CV_PROP_RW double minMarkerPerimeterRate; + + /** @brief determine maximum perimeter for marker contour to be detected. This is defined as a rate respect to + * the maximum dimension of the input image (default 4.0). + */ + CV_PROP_RW double maxMarkerPerimeterRate; + + /// minimum accuracy during the polygonal approximation process to determine which contours are squares. (default 0.03) + CV_PROP_RW double polygonalApproxAccuracyRate; + + /// minimum distance between corners for detected markers relative to its perimeter (default 0.05) + CV_PROP_RW double minCornerDistanceRate; + + /// minimum distance of any corner to the image border for detected markers (in pixels) (default 3) + CV_PROP_RW int minDistanceToBorder; + + /** @brief minimum mean distance beetween two marker corners to be considered imilar, so that the + * smaller one is removed. The rate is relative to the smaller perimeter of the two markers (default 0.05). + */ + CV_PROP_RW double minMarkerDistanceRate; + + /** @brief default CORNER_REFINE_NONE. + * 0:CORNER_REFINE_NONE, no refinement. + * 1: CORNER_REFINE_SUBPIX, do subpixel refinement. + * 2: CORNER_REFINE_CONTOUR use contour-Points, + * 3: CORNER_REFINE_APRILTAG use the AprilTag2 approach). + */ + CV_PROP_RW int cornerRefinementMethod; + + /// window size for the corner refinement process (in pixels) (default 5). + CV_PROP_RW int cornerRefinementWinSize; + + /// maximum number of iterations for stop criteria of the corner refinement process (default 30). + CV_PROP_RW int cornerRefinementMaxIterations; + + /// minimum error for the stop cristeria of the corner refinement process (default: 0.1) + CV_PROP_RW double cornerRefinementMinAccuracy; + + /// number of bits of the marker border, i.e. marker border width (default 1). + CV_PROP_RW int markerBorderBits; + + /// number of bits (per dimension) for each cell of the marker when removing the perspective (default 4). + CV_PROP_RW int perspectiveRemovePixelPerCell; + + /** @brief width of the margin of pixels on each cell not considered for the + * determination of the cell bit. Represents the rate respect to the total size of the cell, i.e. + * perspectiveRemovePixelPerCell (default 0.13) + */ + CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell; + + /** @brief maximum number of accepted erroneous bits in the border (i.e. number of allowed + * white bits in the border). Represented as a rate respect to the total number of bits per marker (default 0.35). + */ + CV_PROP_RW double maxErroneousBitsInBorderRate; + + /** @brief minimun standard deviation in pixels values during the decodification step to apply Otsu + * thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher than 128 or not) (default 5.0) + */ + CV_PROP_RW double minOtsuStdDev; + + /// error correction rate respect to the maximun error correction capability for each dictionary (default 0.6). + CV_PROP_RW double errorCorrectionRate; + + /** @brief April :: User-configurable parameters. + * detection of quads can be done on a lower-resolution image, improving speed at a cost of + * pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still + */ + CV_PROP_RW float aprilTagQuadDecimate; + + /// what Gaussian blur should be applied to the segmented image (used for quad detection?) + CV_PROP_RW float aprilTagQuadSigma; + + // April :: Internal variables + /// reject quads containing too few pixels (default 5). + CV_PROP_RW int aprilTagMinClusterPixels; + + /// how many corner candidates to consider when segmenting a group of pixels into a quad (default 10). + CV_PROP_RW int aprilTagMaxNmaxima; + + /** @brief reject quads where pairs of edges have angles that are close to straight or close to 180 degrees. + * Zero means that no quads are rejected. (In radians) (default 10*PI/180) + */ + CV_PROP_RW float aprilTagCriticalRad; + + /// when fitting lines to the contours, what is the maximum mean squared error + CV_PROP_RW float aprilTagMaxLineFitMse; + + /** @brief when we build our model of black & white pixels, we add an extra check that the + * white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). + * (default 5) + */ + CV_PROP_RW int aprilTagMinWhiteBlackDiff; + + /// should the thresholded image be deglitched? Only useful for very noisy images (default 0). + CV_PROP_RW int aprilTagDeglitch; + + /** @brief to check if there is a white marker. In order to generate a "white" marker just invert a + * normal marker by using a tilde, ~markerImage. (default false) + */ + CV_PROP_RW bool detectInvertedMarker; + + /** @brief new Aruco functionality proposed in the paper: + * Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) + * https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers + */ + + /// to enable the new and faster Aruco detection strategy. + CV_PROP_RW bool useAruco3Detection; + + /// minimum side length of a marker in the canonical image. Latter is the binarized image in which contours are searched. + CV_PROP_RW int minSideLengthCanonicalImg; + + /// range [0,1], eq (2) from paper. The parameter tau_i has a direct influence on the processing speed. + CV_PROP_RW float minMarkerLengthRatioOriginalImg; +}; + +/** + * @brief struct RefineParameters is used by ArucoDetector + */ +struct CV_EXPORTS_W RefineParameters { + RefineParameters() { + minRepDistance = 10.f; + errorCorrectionRate = 3.f; + checkAllOrders = true; + } + + RefineParameters(float _minRepDistance, float _errorCorrectionRate, bool _checkAllOrders): + minRepDistance(_minRepDistance), errorCorrectionRate(_errorCorrectionRate), checkAllOrders(_checkAllOrders) {} + + CV_WRAP static Ptr create(float _minRepDistance = 10.f, float _errorCorrectionRate = 3.f, + bool _checkAllOrders = true) { + return makePtr(_minRepDistance, _errorCorrectionRate, _checkAllOrders); + } + + + /** + * @brief Read a new set of RefineParameters from FileNode (use FileStorage.root()). + */ + CV_WRAP bool readRefineParameters(const FileNode& fn); + + /** @brief Write a set of RefineParameters to FileStorage + */ + CV_WRAP bool writeRefineParameters(const Ptr& fs); + + /** @brief minRepDistance minimum distance between the corners of the rejected candidate and the reprojected marker in + * order to consider it as a correspondence. + */ + CV_PROP_RW float minRepDistance; + /** @brief minRepDistance rate of allowed erroneous bits respect to the error correction + * capability of the used dictionary. -1 ignores the error correction step. + */ + CV_PROP_RW float errorCorrectionRate; + /** @brief checkAllOrders consider the four posible corner orders in the rejectedCorners array. + * If it set to false, only the provided corner order is considered (default true). + */ + CV_PROP_RW bool checkAllOrders; +}; + +/** + * @brief The main functionality of ArucoDetector class is detection of markers in an image with detectMarkers() method. + * After detecting some markers in the image, you can try to find undetected markers from this dictionary with + * refineDetectedMarkers() method. + * @see DetectorParameters, RefineParameters + */ +class CV_EXPORTS_W ArucoDetector : public Algorithm +{ +public: + /// dictionary indicates the type of markers that will be searched + CV_PROP_RW Ptr dictionary; + + /// marker detection parameters, check DetectorParameters docs to see available settings + CV_PROP_RW Ptr params; + + /// marker refine parameters + CV_PROP_RW Ptr refineParams; + + /** + * @brief Basic ArucoDetector constructor + * @param _dictionary indicates the type of markers that will be searched + * @param _params marker detection parameters + * @param _refineParams marker refine detection parameters + */ + CV_WRAP ArucoDetector(const Ptr &_dictionary = getPredefinedDictionary(DICT_4X4_50), + const Ptr &_params = DetectorParameters::create(), + const Ptr &_refineParams = RefineParameters::create()): + dictionary(_dictionary), params(_params), refineParams(_refineParams) {} + + CV_WRAP static Ptr create(const Ptr &_dictionary, const Ptr &_params) { + return makePtr(_dictionary, _params); + } + + /** + * @brief Basic marker detection + * + * @param image input image + * @param corners vector of detected marker corners. For each marker, its four corners + * are provided, (e.g std::vector > ). For N detected markers, + * the dimensions of this array is Nx4. The order of the corners is clockwise. + * @param ids vector of identifiers of the detected markers. The identifier is of type int + * (e.g. std::vector). For N detected markers, the size of ids is also N. + * The identifiers have the same order than the markers in the imgPoints array. + * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a + * correct codification. Useful for debugging purposes. + * + * Performs marker detection in the input image. Only markers included in the specific dictionary + * are searched. For each detected marker, it returns the 2D position of its corner in the image + * and its corresponding identifier. + * Note that this function does not perform pose estimation. + * @note The function does not correct lens distortion or takes it into account. It's recommended to undistort + * input image with corresponging camera model, if camera parameters are known + * @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard + */ + CV_WRAP void detectMarkers(InputArray image, OutputArrayOfArrays corners, OutputArray ids, + OutputArrayOfArrays rejectedImgPoints = noArray()); + + /** + * @brief Refind not detected markers based on the already detected and the board layout + * + * @param image input image + * @param board layout of markers in the board. + * @param detectedCorners vector of already detected marker corners. + * @param detectedIds vector of already detected marker identifiers. + * @param rejectedCorners vector of rejected candidates during the marker detection process. + * @param cameraMatrix optional input 3x3 floating-point camera matrix + * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ + * @param distCoeffs optional vector of distortion coefficients + * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements + * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the + * original rejectedCorners array. + * + * This function tries to find markers that were not detected in the basic detecMarkers function. + * First, based on the current detected marker and the board layout, the function interpolates + * the position of the missing markers. Then it tries to find correspondence between the reprojected + * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate + * parameters. + * If camera parameters and distortion coefficients are provided, missing markers are reprojected + * using projectPoint function. If not, missing marker projections are interpolated using global + * homography, and all the marker corners in the board must have the same Z coordinate. + */ + CV_WRAP void refineDetectedMarkers(InputArray image, const Ptr &board, + InputOutputArrayOfArrays detectedCorners, + InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners, + InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(), + OutputArray recoveredIdxs = noArray()); + + /** @brief Stores algorithm parameters in a file storage + */ + virtual void write(FileStorage& fs) const override { + Ptr pfs = makePtr(fs); + dictionary->writeDictionary(pfs); + params->writeDetectorParameters(pfs); + refineParams->writeRefineParameters(pfs); + } + + /** @brief simplified API for language bindings + * @overload + */ + CV_WRAP void write(const String& fileName) const { + FileStorage fs(fileName, FileStorage::WRITE); + write(fs); + } + + /** @brief Reads algorithm parameters from a file storage + */ + CV_WRAP virtual void read(const FileNode& fn) override { + dictionary->readDictionary(fn); + params->readDetectorParameters(fn); + refineParams->readRefineParameters(fn); + } +}; + +/** + * @brief Draw detected markers in image + * + * @param image input/output image. It must have 1 or 3 channels. The number of channels is not + * altered. + * @param corners positions of marker corners on input image. + * (e.g std::vector > ). For N detected markers, the dimensions of + * this array should be Nx4. The order of the corners should be clockwise. + * @param ids vector of identifiers for markers in markersCorners . + * Optional, if not provided, ids are not painted. + * @param borderColor color of marker borders. Rest of colors (text color and first corner color) + * are calculated based on this one to improve visualization. + * + * Given an array of detected marker corners and its corresponding ids, this functions draws + * the markers in the image. The marker borders are painted and the markers identifiers if provided. + * Useful for debugging purposes. + * + */ +CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners, + InputArray ids = noArray(), Scalar borderColor = Scalar(0, 255, 0)); + +/** + * @brief Draw a canonical marker image + * + * @param dictionary dictionary of markers indicating the type of markers + * @param id identifier of the marker that will be returned. It has to be a valid id + * in the specified dictionary. + * @param sidePixels size of the image in pixels + * @param img output image with the marker + * @param borderBits width of the marker border. + * + * This function returns a marker image in its canonical form (i.e. ready to be printed) + */ +CV_EXPORTS_W void drawMarker(const Ptr &dictionary, int id, int sidePixels, OutputArray img, + int borderBits = 1); + +//! @} + +} +} + +#endif diff --git a/modules/aruco/misc/java/test/ArucoTest.java b/modules/aruco/misc/java/test/ArucoTest.java new file mode 100644 index 00000000000..6e70eab1752 --- /dev/null +++ b/modules/aruco/misc/java/test/ArucoTest.java @@ -0,0 +1,58 @@ +package org.opencv.test.aruco; + +import java.util.ArrayList; +import java.util.List; + +import org.opencv.test.OpenCVTestCase; +import org.opencv.core.Scalar; +import org.opencv.core.Mat; +import org.opencv.core.CvType; +import org.opencv.aruco.*; + + +public class ArucoTest extends OpenCVTestCase { + + public void testArucoIssue3133() { + byte[][] marker = {{0,1,1},{1,1,1},{0,1,1}}; + Dictionary dictionary = Dictionary.create(1, 3); + dictionary.set_maxCorrectionBits(0); + Mat markerBits = new Mat(3, 3, CvType.CV_8UC1); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + markerBits.put(i, j, marker[i][j]); + } + } + + Mat markerCompressed = Dictionary.getByteListFromBits(markerBits); + assertMatNotEqual(markerCompressed, dictionary.get_bytesList()); + + dictionary.set_bytesList(markerCompressed); + assertMatEqual(markerCompressed, dictionary.get_bytesList()); + } + + public void testArucoDetector() { + Dictionary dictionary = Dictionary.get(0); + DetectorParameters detectorParameters = DetectorParameters.create(); + ArucoDetector detector = ArucoDetector.create(dictionary, detectorParameters); + + Mat markerImage = new Mat(); + int id = 1, offset = 5, size = 40; + Aruco.drawMarker(dictionary, id, size, markerImage, detectorParameters.get_markerBorderBits()); + + Mat image = new Mat(markerImage.rows() + 2*offset, markerImage.cols() + 2*offset, + CvType.CV_8UC1, new Scalar(255)); + Mat m = image.submat(offset, size+offset, offset, size+offset); + markerImage.copyTo(m); + + List corners = new ArrayList(); + Mat ids = new Mat(); + detector.detectMarkers(image, corners, ids); + + assertEquals(1, corners.size()); + Mat res = corners.get(0); + assertArrayEquals(new double[]{offset, offset}, res.get(0, 0), 0.0); + assertArrayEquals(new double[]{size + offset - 1, offset}, res.get(0, 1), 0.0); + assertArrayEquals(new double[]{size + offset - 1, size + offset - 1}, res.get(0, 2), 0.0); + assertArrayEquals(new double[]{offset, size + offset - 1}, res.get(0, 3), 0.0); + } +} diff --git a/modules/aruco/misc/python/test/test_aruco.py b/modules/aruco/misc/python/test/test_aruco.py index 6c76fb5ed92..ebbb86622ab 100644 --- a/modules/aruco/misc/python/test/test_aruco.py +++ b/modules/aruco/misc/python/test/test_aruco.py @@ -19,13 +19,13 @@ def test_idsAccessibility(self): aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_5X5_250) board = cv.aruco.CharucoBoard_create(7, 5, 1, 0.5, aruco_dict) - np.testing.assert_array_equal(board.ids.squeeze(), ids) + np.testing.assert_array_equal(board.getIds().squeeze(), ids) - board.ids = rev_ids - np.testing.assert_array_equal(board.ids.squeeze(), rev_ids) + board.setIds(rev_ids) + np.testing.assert_array_equal(board.getIds().squeeze(), rev_ids) board.setIds(ids) - np.testing.assert_array_equal(board.ids.squeeze(), ids) + np.testing.assert_array_equal(board.getIds().squeeze(), ids) with self.assertRaises(cv.error): board.setIds(np.array([0])) @@ -85,5 +85,55 @@ def test_getDistanceToId(self): self.assertEqual(dist, 0) + def test_aruco_detector(self): + aruco_params = cv.aruco.DetectorParameters_create() + aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_250) + aruco_detector = cv.aruco.ArucoDetector_create(aruco_dict, aruco_params) + id = 2 + marker_size = 100 + offset = 10 + img_marker = cv.aruco.drawMarker(aruco_dict, id, marker_size, aruco_params.markerBorderBits) + img_marker = np.pad(img_marker, pad_width=offset, mode='constant', constant_values=255) + gold_corners = np.array([[offset, offset],[marker_size+offset-1.0,offset], + [marker_size+offset-1.0,marker_size+offset-1.0], + [offset, marker_size+offset-1.0]], dtype=np.float32) + expected_corners, expected_ids, expected_rejected = cv.aruco.detectMarkers(img_marker, aruco_dict, + parameters=aruco_params) + + corners, ids, rejected = aruco_detector.detectMarkers(img_marker) + + self.assertEqual(1, len(ids)) + self.assertEqual(id, ids[0]) + for i in range(0, len(ids)): + np.testing.assert_array_equal(expected_corners[i], corners[i]) + np.testing.assert_array_equal(gold_corners, corners[i].reshape(4, 2)) + + def test_aruco_detector_refine(self): + aruco_params = cv.aruco.DetectorParameters_create() + aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_250) + aruco_detector = cv.aruco.ArucoDetector_create(aruco_dict, aruco_params) + board_size = (3, 4) + board = cv.aruco.GridBoard_create(board_size[0], board_size[1], 5.0, 1.0, aruco_dict) + board_image = board.draw((board_size[0]*50, board_size[1]*50), marginSize=10) + + corners, ids, rejected = aruco_detector.detectMarkers(board_image) + self.assertEqual(board_size[0]*board_size[1], len(ids)) + + part_corners, part_ids, part_rejected = corners[:-1], ids[:-1], list(rejected) + part_rejected.append(corners[-1]) + + refine_corners, refine_ids, refine_rejected, recovered_ids = aruco_detector.refineDetectedMarkers(board_image, board, part_corners, part_ids, part_rejected) + refine_corners_c, _, _, _ = cv.aruco.refineDetectedMarkers(board_image, board, part_corners, part_ids, part_rejected) + + self.assertEqual(board_size[0] * board_size[1], len(refine_ids)) + self.assertEqual(1, len(recovered_ids)) + + for i in range(0, len(ids)): + np.testing.assert_array_equal(refine_corners_c[i], refine_corners[i]) + #self.assertEqual(ids[-1], recovered_ids[0]) + self.assertEqual(ids[-1], refine_ids[-1]) + self.assertEqual((1, 4, 2), refine_corners[0].shape) + np.testing.assert_array_equal(corners, refine_corners) + if __name__ == '__main__': NewOpenCVTests.bootstrap() diff --git a/modules/aruco/perf/perf_aruco.cpp b/modules/aruco/perf/perf_aruco.cpp index 6ab830fa280..6cde729ff2e 100644 --- a/modules/aruco/perf/perf_aruco.cpp +++ b/modules/aruco/perf/perf_aruco.cpp @@ -190,6 +190,7 @@ PERF_TEST_P(EstimateAruco, ArucoFirst, ESTIMATE_PARAMS) detectorParams->minSideLengthCanonicalImg = 32; detectorParams->minMarkerLengthRatioOriginalImg = 0.04f / numMarkersInRow; } + aruco::ArucoDetector detector(dictionary, detectorParams); MarkerPainter painter(markerSize); auto image_map = painter.getProjectMarkersTile(numMarkersInRow, detectorParams, dictionary); @@ -198,7 +199,7 @@ PERF_TEST_P(EstimateAruco, ArucoFirst, ESTIMATE_PARAMS) vector ids; TEST_CYCLE() { - aruco::detectMarkers(image_map.first, dictionary, corners, ids, detectorParams); + detector.detectMarkers(image_map.first, corners, ids); } ASSERT_EQ(numMarkersInRow*numMarkersInRow, static_cast(ids.size())); double maxDistance = getMaxDistance(image_map.second, ids, corners); @@ -221,6 +222,7 @@ PERF_TEST_P(EstimateAruco, ArucoSecond, ESTIMATE_PARAMS) detectorParams->minSideLengthCanonicalImg = 64; detectorParams->minMarkerLengthRatioOriginalImg = 0.f; } + aruco::ArucoDetector detector(dictionary, detectorParams); const int markerSize = 200; const int numMarkersInRow = 11; MarkerPainter painter(markerSize); @@ -231,7 +233,7 @@ PERF_TEST_P(EstimateAruco, ArucoSecond, ESTIMATE_PARAMS) vector ids; TEST_CYCLE() { - aruco::detectMarkers(image_map.first, dictionary, corners, ids, detectorParams); + detector.detectMarkers(image_map.first, corners, ids); } ASSERT_EQ(numMarkersInRow*numMarkersInRow, static_cast(ids.size())); double maxDistance = getMaxDistance(image_map.second, ids, corners); @@ -276,6 +278,7 @@ PERF_TEST_P(EstimateLargeAruco, ArucoFHD, ESTIMATE_FHD_PARAMS) detectorParams->minSideLengthCanonicalImg = get<0>(testParams).minSideLengthCanonicalImg; detectorParams->minMarkerLengthRatioOriginalImg = get<0>(testParams).minMarkerLengthRatioOriginalImg; } + aruco::ArucoDetector detector(dictionary, detectorParams); const int markerSize = get<1>(testParams).first; // 1440 or 480 or 144 const int numMarkersInRow = get<1>(testParams).second; // 1 or 3 or 144 MarkerPainter painter(markerSize); // num pixels is 1440x1440 as in FHD 1920x1080 @@ -286,7 +289,7 @@ PERF_TEST_P(EstimateLargeAruco, ArucoFHD, ESTIMATE_FHD_PARAMS) vector ids; TEST_CYCLE() { - aruco::detectMarkers(image_map.first, dictionary, corners, ids, detectorParams); + detector.detectMarkers(image_map.first, corners, ids); } ASSERT_EQ(numMarkersInRow*numMarkersInRow, static_cast(ids.size())); double maxDistance = getMaxDistance(image_map.second, ids, corners); diff --git a/modules/aruco/perf/perf_precomp.hpp b/modules/aruco/perf/perf_precomp.hpp index e4df3216e0c..a72903624c8 100644 --- a/modules/aruco/perf/perf_precomp.hpp +++ b/modules/aruco/perf/perf_precomp.hpp @@ -5,7 +5,7 @@ #define __OPENCV_PERF_PRECOMP_HPP__ #include "opencv2/ts.hpp" -#include "opencv2/aruco.hpp" +#include "opencv2/aruco_detector.hpp" #include "opencv2/calib3d.hpp" #endif diff --git a/modules/aruco/samples/aruco_dict_utils.cpp b/modules/aruco/samples/aruco_dict_utils.cpp index 465513b3410..ab32f4f58a9 100644 --- a/modules/aruco/samples/aruco_dict_utils.cpp +++ b/modules/aruco/samples/aruco_dict_utils.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include using namespace cv; diff --git a/modules/aruco/samples/aruco_samples_utility.hpp b/modules/aruco/samples/aruco_samples_utility.hpp index c1cfe626cb7..ebdbcc1d7ec 100644 --- a/modules/aruco/samples/aruco_samples_utility.hpp +++ b/modules/aruco/samples/aruco_samples_utility.hpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/modules/aruco/samples/calibrate_camera.cpp b/modules/aruco/samples/calibrate_camera.cpp index a52bab5b161..bf056b15270 100644 --- a/modules/aruco/samples/calibrate_camera.cpp +++ b/modules/aruco/samples/calibrate_camera.cpp @@ -39,7 +39,8 @@ the use of this software, even if advised of the possibility of such damage. #include #include -#include +#include +#include #include #include #include @@ -162,6 +163,8 @@ int main(int argc, char *argv[]) { vector< vector< int > > allIds; Size imgSize; + aruco::ArucoDetector detector(dictionary, detectorParams); + while(inputVideo.grab()) { Mat image, imageCopy; inputVideo.retrieve(image); @@ -170,10 +173,10 @@ int main(int argc, char *argv[]) { vector< vector< Point2f > > corners, rejected; // detect markers - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); // refind strategy to detect more markers - if(refindStrategy) aruco::refineDetectedMarkers(image, board, corners, ids, rejected); + if(refindStrategy) detector.refineDetectedMarkers(image, board, corners, ids, rejected); // draw results image.copyTo(imageCopy); diff --git a/modules/aruco/samples/create_board.cpp b/modules/aruco/samples/create_board.cpp index d2482bce853..242688e5cb7 100644 --- a/modules/aruco/samples/create_board.cpp +++ b/modules/aruco/samples/create_board.cpp @@ -38,7 +38,7 @@ the use of this software, even if advised of the possibility of such damage. #include -#include +#include #include #include "aruco_samples_utility.hpp" diff --git a/modules/aruco/samples/create_marker.cpp b/modules/aruco/samples/create_marker.cpp index fe31ec2972f..73ce21880f6 100644 --- a/modules/aruco/samples/create_marker.cpp +++ b/modules/aruco/samples/create_marker.cpp @@ -38,7 +38,7 @@ the use of this software, even if advised of the possibility of such damage. #include -#include +#include #include #include "aruco_samples_utility.hpp" diff --git a/modules/aruco/samples/detect_board.cpp b/modules/aruco/samples/detect_board.cpp index 883be8dd2e3..7f9c3d1ebc7 100644 --- a/modules/aruco/samples/detect_board.cpp +++ b/modules/aruco/samples/detect_board.cpp @@ -38,7 +38,8 @@ the use of this software, even if advised of the possibility of such damage. #include -#include +#include +#include #include #include #include "aruco_samples_utility.hpp" @@ -135,7 +136,7 @@ int main(int argc, char *argv[]) { cerr << "Dictionary not specified" << endl; return 0; } - + aruco::ArucoDetector detector(dictionary, detectorParams); VideoCapture inputVideo; int waitTime; if(!video.empty()) { @@ -168,12 +169,12 @@ int main(int argc, char *argv[]) { Vec3d rvec, tvec; // detect markers - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); // refind strategy to detect more markers if(refindStrategy) - aruco::refineDetectedMarkers(image, board, corners, ids, rejected, camMatrix, - distCoeffs); + detector.refineDetectedMarkers(image, board, corners, ids, rejected, camMatrix, + distCoeffs); // estimate board pose int markersOfBoardDetected = 0; diff --git a/modules/aruco/samples/detect_markers.cpp b/modules/aruco/samples/detect_markers.cpp index f7d17b9f8a8..de38c2a1b2c 100644 --- a/modules/aruco/samples/detect_markers.cpp +++ b/modules/aruco/samples/detect_markers.cpp @@ -38,7 +38,8 @@ the use of this software, even if advised of the possibility of such damage. #include -#include +#include +#include #include #include "aruco_samples_utility.hpp" @@ -134,7 +135,7 @@ int main(int argc, char *argv[]) { return 0; } } - + aruco::ArucoDetector detector(dictionary, detectorParams); VideoCapture inputVideo; int waitTime; if(!video.empty()) { @@ -159,7 +160,7 @@ int main(int argc, char *argv[]) { vector< Vec3d > rvecs, tvecs; // detect markers and estimate pose - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); if(estimatePose && ids.size() > 0) aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs); diff --git a/modules/aruco/samples/tutorial_charuco_create_detect.cpp b/modules/aruco/samples/tutorial_charuco_create_detect.cpp index 89b538c8a01..1c47003b57a 100644 --- a/modules/aruco/samples/tutorial_charuco_create_detect.cpp +++ b/modules/aruco/samples/tutorial_charuco_create_detect.cpp @@ -50,7 +50,7 @@ static inline void detectCharucoBoardWithCalibrationPose() //! [midcornerdet] std::vector markerIds; std::vector > markerCorners; - cv::aruco::detectMarkers(image, board->dictionary, markerCorners, markerIds, params); + cv::aruco::detectMarkers(image, board->getDictionary(), markerCorners, markerIds, params); //! [midcornerdet] // if at least one marker detected if (markerIds.size() > 0) { @@ -100,7 +100,7 @@ static inline void detectCharucoBoardWithoutCalibration() image.copyTo(imageCopy); std::vector markerIds; std::vector > markerCorners; - cv::aruco::detectMarkers(image, board->dictionary, markerCorners, markerIds, params); + cv::aruco::detectMarkers(image, board->getDictionary(), markerCorners, markerIds, params); //or //cv::aruco::detectMarkers(image, dictionary, markerCorners, markerIds, params); // if at least one marker detected diff --git a/modules/aruco/src/apriltag_quad_thresh.cpp b/modules/aruco/src/apriltag/apriltag_quad_thresh.cpp similarity index 91% rename from modules/aruco/src/apriltag_quad_thresh.cpp rename to modules/aruco/src/apriltag/apriltag_quad_thresh.cpp index 20c193725fa..fcc038b25fd 100644 --- a/modules/aruco/src/apriltag_quad_thresh.cpp +++ b/modules/aruco/src/apriltag/apriltag_quad_thresh.cpp @@ -16,7 +16,7 @@ // because we use a fixed-point 16 bit integer representation with one // fractional bit. -#include "precomp.hpp" +#include "../precomp.hpp" #include "apriltag_quad_thresh.hpp" //#define APRIL_DEBUG @@ -1028,17 +1028,7 @@ int fit_quad(const Ptr &_params, const Mat im, zarray_t *clu return res; } -/** - * - * @param nCidx0 - * @param nCidx1 - * @param nClusters - * @param nW - * @param nH - * @param nquads - * @param td - * @param im - */ + static void do_quad(int nCidx0, int nCidx1, zarray_t &nClusters, int nW, int nH, zarray_t *nquads, const Ptr &td, const Mat im){ CV_Assert(nquads != NULL); @@ -1078,12 +1068,6 @@ static void do_quad(int nCidx0, int nCidx1, zarray_t &nClusters, int nW, int nH, } } -/** - * - * @param mIm - * @param parameters - * @param mThresh - */ void threshold(const Mat mIm, const Ptr ¶meters, Mat& mThresh){ int w = mIm.cols, h = mIm.rows; int s = (unsigned) mIm.step; @@ -1306,14 +1290,7 @@ static void _darken(const Mat &im){ } #endif -/** - * - * @param parameters - * @param mImg - * @param contours - * @return - */ -zarray_t *apriltag_quad_thresh(const Ptr ¶meters, const Mat & mImg, std::vector< std::vector< Point > > &contours){ +zarray_t *apriltag_quad_thresh(const Ptr ¶meters, const Mat & mImg, std::vector > &contours){ //////////////////////////////////////////////////////// // step 1. threshold the image, creating the edge image. @@ -1499,7 +1476,7 @@ out = Mat::zeros(h, w, CV_8UC3); zarray_t *cluster; _zarray_get(clusters, i, &cluster); - std::vector< Point > cnt; + std::vector cnt; for (int j = 0; j < _zarray_size(cluster); j++) { struct pt *p; _zarray_get_volatile(cluster, j, &p); @@ -1567,4 +1544,123 @@ imwrite("2.5 debug_lines.pnm", out); return quads; } +void _apriltag(Mat im_orig, const Ptr & _params, std::vector > &candidates, + std::vector > &contours){ + + /////////////////////////////////////////////////////////// + /// Step 1. Detect quads according to requested image decimation + /// and blurring parameters. + Mat quad_im; + + if (_params->aprilTagQuadDecimate > 1){ + resize(im_orig, quad_im, Size(), 1/_params->aprilTagQuadDecimate, 1/_params->aprilTagQuadDecimate, INTER_AREA); + } + else { + im_orig.copyTo(quad_im); + } + + // Apply a Blur + if (_params->aprilTagQuadSigma != 0) { + // compute a reasonable kernel width by figuring that the + // kernel should go out 2 std devs. + // + // max sigma ksz + // 0.499 1 (disabled) + // 0.999 3 + // 1.499 5 + // 1.999 7 + + float sigma = fabsf((float) _params->aprilTagQuadSigma); + + int ksz = cvFloor(4 * sigma); // 2 std devs in each direction + ksz |= 1; // make odd number + + if (ksz > 1) { + if (_params->aprilTagQuadSigma > 0) + GaussianBlur(quad_im, quad_im, Size(ksz, ksz), sigma, sigma, BORDER_REPLICATE); + else { + Mat orig; + quad_im.copyTo(orig); + GaussianBlur(quad_im, quad_im, Size(ksz, ksz), sigma, sigma, BORDER_REPLICATE); + + // SHARPEN the image by subtracting the low frequency components. + for (int y = 0; y < orig.rows; y++) { + for (int x = 0; x < orig.cols; x++) { + int vorig = orig.data[y*orig.step + x]; + int vblur = quad_im.data[y*quad_im.step + x]; + + int v = 2*vorig - vblur; + if (v < 0) + v = 0; + if (v > 255) + v = 255; + + quad_im.data[y*quad_im.step + x] = (uint8_t) v; + } + } + } + } + } + +#ifdef APRIL_DEBUG + imwrite("1.1 debug_preprocess.pnm", quad_im); +#endif + + /////////////////////////////////////////////////////////// + /// Step 2. do the Threshold :: get the set of candidate quads + zarray_t *quads = apriltag_quad_thresh(_params, quad_im, contours); + + CV_Assert(quads != NULL); + + // adjust centers of pixels so that they correspond to the + // original full-resolution image. + if (_params->aprilTagQuadDecimate > 1) { + for (int i = 0; i < _zarray_size(quads); i++) { + struct sQuad *q; + _zarray_get_volatile(quads, i, &q); + for (int j = 0; j < 4; j++) { + q->p[j][0] *= _params->aprilTagQuadDecimate; + q->p[j][1] *= _params->aprilTagQuadDecimate; + } + } + } + +#ifdef APRIL_DEBUG + Mat im_quads = im_orig.clone(); + im_quads = im_quads*0.5; + srandom(0); + + for (int i = 0; i < _zarray_size(quads); i++) { + struct sQuad *quad; + _zarray_get_volatile(quads, i, &quad); + + const int bias = 100; + int color = bias + (random() % (255-bias)); + + line(im_quads, Point(quad->p[0][0], quad->p[0][1]), Point(quad->p[1][0], quad->p[1][1]), color, 1); + line(im_quads, Point(quad->p[1][0], quad->p[1][1]), Point(quad->p[2][0], quad->p[2][1]), color, 1); + line(im_quads, Point(quad->p[2][0], quad->p[2][1]), Point(quad->p[3][0], quad->p[3][1]), color, 1); + line(im_quads, Point(quad->p[3][0], quad->p[3][1]), Point(quad->p[0][0], quad->p[0][1]), color, 1); + } + imwrite("1.2 debug_quads_raw.pnm", im_quads); +#endif + + //////////////////////////////////////////////////////////////// + /// Step 3. Save the output :: candidate corners + for (int i = 0; i < _zarray_size(quads); i++) { + struct sQuad *quad; + _zarray_get_volatile(quads, i, &quad); + + std::vector corners; + corners.push_back(Point2f(quad->p[3][0], quad->p[3][1])); //pA + corners.push_back(Point2f(quad->p[0][0], quad->p[0][1])); //pB + corners.push_back(Point2f(quad->p[1][0], quad->p[1][1])); //pC + corners.push_back(Point2f(quad->p[2][0], quad->p[2][1])); //pD + + candidates.push_back(corners); + } + + _zarray_destroy(quads); +} + }} diff --git a/modules/aruco/src/apriltag_quad_thresh.hpp b/modules/aruco/src/apriltag/apriltag_quad_thresh.hpp similarity index 91% rename from modules/aruco/src/apriltag_quad_thresh.hpp rename to modules/aruco/src/apriltag/apriltag_quad_thresh.hpp index fd392c84477..2b8647ca15d 100644 --- a/modules/aruco/src/apriltag_quad_thresh.hpp +++ b/modules/aruco/src/apriltag/apriltag_quad_thresh.hpp @@ -19,7 +19,8 @@ #ifndef _OPENCV_APRIL_QUAD_THRESH_HPP_ #define _OPENCV_APRIL_QUAD_THRESH_HPP_ -#include "opencv2/aruco.hpp" +#include +#include "opencv2/aruco_detector.hpp" #include "unionfind.hpp" #include "zmaxheap.hpp" #include "zarray.hpp" @@ -104,22 +105,15 @@ int quad_segment_agg(int sz, struct line_fit_pt *lfps, int indices[4]); **/ int fit_quad(const Ptr &_params, const Mat im, zarray_t *cluster, struct sQuad *quad); -/** - * - * @param mIm - * @param parameters - * @param mThresh - */ + void threshold(const Mat mIm, const Ptr ¶meters, Mat& mThresh); -/** - * - * @param parameters - * @param mImg - * @param contours - * @return - */ -zarray_t *apriltag_quad_thresh(const Ptr ¶meters, const Mat & mImg, std::vector< std::vector< Point > > &contours); + +zarray_t *apriltag_quad_thresh(const Ptr ¶meters, const Mat & mImg, + std::vector > &contours); + +void _apriltag(Mat im_orig, const Ptr & _params, std::vector > &candidates, + std::vector > &contours); }} #endif diff --git a/modules/aruco/src/predefined_dictionaries_apriltag.hpp b/modules/aruco/src/apriltag/predefined_dictionaries_apriltag.hpp similarity index 100% rename from modules/aruco/src/predefined_dictionaries_apriltag.hpp rename to modules/aruco/src/apriltag/predefined_dictionaries_apriltag.hpp diff --git a/modules/aruco/src/unionfind.hpp b/modules/aruco/src/apriltag/unionfind.hpp similarity index 100% rename from modules/aruco/src/unionfind.hpp rename to modules/aruco/src/apriltag/unionfind.hpp diff --git a/modules/aruco/src/zarray.hpp b/modules/aruco/src/apriltag/zarray.hpp similarity index 100% rename from modules/aruco/src/zarray.hpp rename to modules/aruco/src/apriltag/zarray.hpp diff --git a/modules/aruco/src/zmaxheap.cpp b/modules/aruco/src/apriltag/zmaxheap.cpp similarity index 99% rename from modules/aruco/src/zmaxheap.cpp rename to modules/aruco/src/apriltag/zmaxheap.cpp index fe17821936a..f3b315e7317 100644 --- a/modules/aruco/src/zmaxheap.cpp +++ b/modules/aruco/src/apriltag/zmaxheap.cpp @@ -12,7 +12,7 @@ // of the authors and should not be interpreted as representing official policies, // either expressed or implied, of the Regents of The University of Michigan. -#include "precomp.hpp" +#include "../precomp.hpp" #include "zmaxheap.hpp" diff --git a/modules/aruco/src/zmaxheap.hpp b/modules/aruco/src/apriltag/zmaxheap.hpp similarity index 100% rename from modules/aruco/src/zmaxheap.hpp rename to modules/aruco/src/apriltag/zmaxheap.hpp diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index fccb3d965ad..92abb40ae83 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -1,1904 +1,32 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ - -#include "precomp.hpp" #include "opencv2/aruco.hpp" -#include -#include - -#include "apriltag_quad_thresh.hpp" -#include "zarray.hpp" - -#include - -//#define APRIL_DEBUG -#ifdef APRIL_DEBUG -#include "opencv2/imgcodecs.hpp" -#endif namespace cv { namespace aruco { using namespace std; - -/** - * - */ -DetectorParameters::DetectorParameters() - : adaptiveThreshWinSizeMin(3), - adaptiveThreshWinSizeMax(23), - adaptiveThreshWinSizeStep(10), - adaptiveThreshConstant(7), - minMarkerPerimeterRate(0.03), - maxMarkerPerimeterRate(4.), - polygonalApproxAccuracyRate(0.03), - minCornerDistanceRate(0.05), - minDistanceToBorder(3), - minMarkerDistanceRate(0.05), - cornerRefinementMethod(CORNER_REFINE_NONE), - cornerRefinementWinSize(5), - cornerRefinementMaxIterations(30), - cornerRefinementMinAccuracy(0.1), - markerBorderBits(1), - perspectiveRemovePixelPerCell(4), - perspectiveRemoveIgnoredMarginPerCell(0.13), - maxErroneousBitsInBorderRate(0.35), - minOtsuStdDev(5.0), - errorCorrectionRate(0.6), - aprilTagQuadDecimate(0.0), - aprilTagQuadSigma(0.0), - aprilTagMinClusterPixels(5), - aprilTagMaxNmaxima(10), - aprilTagCriticalRad( (float)(10* CV_PI /180) ), - aprilTagMaxLineFitMse(10.0), - aprilTagMinWhiteBlackDiff(5), - aprilTagDeglitch(0), - detectInvertedMarker(false), - useAruco3Detection(false), - minSideLengthCanonicalImg(32), - minMarkerLengthRatioOriginalImg(0.0) -{} - - -/** - * @brief Create a new set of DetectorParameters with default values. - */ -Ptr DetectorParameters::create() { - Ptr params = makePtr(); - return params; -} - -template -static inline bool readParameter(const FileNode& node, T& parameter) -{ - if (!node.empty()) { - node >> parameter; - return true; - } - return false; -} - -/** - * @brief Read a new set of DetectorParameters from FileStorage. - */ -bool DetectorParameters::readDetectorParameters(const FileNode& fn) -{ - if(fn.empty()) - return true; - bool checkRead = false; - checkRead |= readParameter(fn["adaptiveThreshWinSizeMin"], this->adaptiveThreshWinSizeMin); - checkRead |= readParameter(fn["adaptiveThreshWinSizeMax"], this->adaptiveThreshWinSizeMax); - checkRead |= readParameter(fn["adaptiveThreshWinSizeStep"], this->adaptiveThreshWinSizeStep); - checkRead |= readParameter(fn["adaptiveThreshConstant"], this->adaptiveThreshConstant); - checkRead |= readParameter(fn["minMarkerPerimeterRate"], this->minMarkerPerimeterRate); - checkRead |= readParameter(fn["maxMarkerPerimeterRate"], this->maxMarkerPerimeterRate); - checkRead |= readParameter(fn["polygonalApproxAccuracyRate"], this->polygonalApproxAccuracyRate); - checkRead |= readParameter(fn["minCornerDistanceRate"], this->minCornerDistanceRate); - checkRead |= readParameter(fn["minDistanceToBorder"], this->minDistanceToBorder); - checkRead |= readParameter(fn["minMarkerDistanceRate"], this->minMarkerDistanceRate); - checkRead |= readParameter(fn["cornerRefinementMethod"], this->cornerRefinementMethod); - checkRead |= readParameter(fn["cornerRefinementWinSize"], this->cornerRefinementWinSize); - checkRead |= readParameter(fn["cornerRefinementMaxIterations"], this->cornerRefinementMaxIterations); - checkRead |= readParameter(fn["cornerRefinementMinAccuracy"], this->cornerRefinementMinAccuracy); - checkRead |= readParameter(fn["markerBorderBits"], this->markerBorderBits); - checkRead |= readParameter(fn["perspectiveRemovePixelPerCell"], this->perspectiveRemovePixelPerCell); - checkRead |= readParameter(fn["perspectiveRemoveIgnoredMarginPerCell"], this->perspectiveRemoveIgnoredMarginPerCell); - checkRead |= readParameter(fn["maxErroneousBitsInBorderRate"], this->maxErroneousBitsInBorderRate); - checkRead |= readParameter(fn["minOtsuStdDev"], this->minOtsuStdDev); - checkRead |= readParameter(fn["errorCorrectionRate"], this->errorCorrectionRate); - // new aruco 3 functionality - checkRead |= readParameter(fn["useAruco3Detection"], this->useAruco3Detection); - checkRead |= readParameter(fn["minSideLengthCanonicalImg"], this->minSideLengthCanonicalImg); - checkRead |= readParameter(fn["minMarkerLengthRatioOriginalImg"], this->minMarkerLengthRatioOriginalImg); - return checkRead; -} - - -/** - * @brief Convert input image to gray if it is a 3-channels image - */ -static void _convertToGrey(InputArray _in, OutputArray _out) { - - CV_Assert(_in.type() == CV_8UC1 || _in.type() == CV_8UC3); - - if(_in.type() == CV_8UC3) - cvtColor(_in, _out, COLOR_BGR2GRAY); - else - _in.copyTo(_out); -} - - -/** - * @brief Threshold input image using adaptive thresholding - */ -static void _threshold(InputArray _in, OutputArray _out, int winSize, double constant) { - - CV_Assert(winSize >= 3); - if(winSize % 2 == 0) winSize++; // win size must be odd - adaptiveThreshold(_in, _out, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, winSize, constant); -} - - -/** - * @brief Given a tresholded image, find the contours, calculate their polygonal approximation - * and take those that accomplish some conditions - */ -static void _findMarkerContours(InputArray _in, vector< vector< Point2f > > &candidates, - vector< vector< Point > > &contoursOut, double minPerimeterRate, - double maxPerimeterRate, double accuracyRate, - double minCornerDistanceRate, int minDistanceToBorder, int minSize) { - - CV_Assert(minPerimeterRate > 0 && maxPerimeterRate > 0 && accuracyRate > 0 && - minCornerDistanceRate >= 0 && minDistanceToBorder >= 0); - - // calculate maximum and minimum sizes in pixels - unsigned int minPerimeterPixels = - (unsigned int)(minPerimeterRate * max(_in.getMat().cols, _in.getMat().rows)); - unsigned int maxPerimeterPixels = - (unsigned int)(maxPerimeterRate * max(_in.getMat().cols, _in.getMat().rows)); - - // for aruco3 functionality - if (minSize != 0) { - minPerimeterPixels = 4*minSize; - } - - Mat contoursImg; - _in.getMat().copyTo(contoursImg); - vector< vector< Point > > contours; - findContours(contoursImg, contours, RETR_LIST, CHAIN_APPROX_NONE); - // now filter list of contours - for(unsigned int i = 0; i < contours.size(); i++) { - // check perimeter - if(contours[i].size() < minPerimeterPixels || contours[i].size() > maxPerimeterPixels) - continue; - - // check is square and is convex - vector< Point > approxCurve; - approxPolyDP(contours[i], approxCurve, double(contours[i].size()) * accuracyRate, true); - if(approxCurve.size() != 4 || !isContourConvex(approxCurve)) continue; - - // check min distance between corners - double minDistSq = - max(contoursImg.cols, contoursImg.rows) * max(contoursImg.cols, contoursImg.rows); - for(int j = 0; j < 4; j++) { - double d = (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) * - (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) + - (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y) * - (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y); - minDistSq = min(minDistSq, d); - } - double minCornerDistancePixels = double(contours[i].size()) * minCornerDistanceRate; - if(minDistSq < minCornerDistancePixels * minCornerDistancePixels) continue; - - // check if it is too near to the image border - bool tooNearBorder = false; - for(int j = 0; j < 4; j++) { - if(approxCurve[j].x < minDistanceToBorder || approxCurve[j].y < minDistanceToBorder || - approxCurve[j].x > contoursImg.cols - 1 - minDistanceToBorder || - approxCurve[j].y > contoursImg.rows - 1 - minDistanceToBorder) - tooNearBorder = true; - } - if(tooNearBorder) continue; - - // if it passes all the test, add to candidates vector - vector< Point2f > currentCandidate; - currentCandidate.resize(4); - for(int j = 0; j < 4; j++) { - currentCandidate[j] = Point2f((float)approxCurve[j].x, (float)approxCurve[j].y); - } - candidates.push_back(currentCandidate); - contoursOut.push_back(contours[i]); - } -} - - -/** - * @brief Assure order of candidate corners is clockwise direction - */ -static void _reorderCandidatesCorners(vector< vector< Point2f > > &candidates) { - - for(unsigned int i = 0; i < candidates.size(); i++) { - double dx1 = candidates[i][1].x - candidates[i][0].x; - double dy1 = candidates[i][1].y - candidates[i][0].y; - double dx2 = candidates[i][2].x - candidates[i][0].x; - double dy2 = candidates[i][2].y - candidates[i][0].y; - double crossProduct = (dx1 * dy2) - (dy1 * dx2); - - if(crossProduct < 0.0) { // not clockwise direction - swap(candidates[i][1], candidates[i][3]); - } - } -} - -/** - * @brief to make sure that the corner's order of both candidates (default/white) is the same - */ -static vector< Point2f > alignContourOrder( Point2f corner, vector< Point2f > candidate){ - uint8_t r=0; - double min = cv::norm( Vec2f( corner - candidate[0] ), NORM_L2SQR); - for(uint8_t pos=1; pos < 4; pos++) { - double nDiff = cv::norm( Vec2f( corner - candidate[pos] ), NORM_L2SQR); - if(nDiff < min){ - r = pos; - min =nDiff; - } - } - std::rotate(candidate.begin(), candidate.begin() + r, candidate.end()); - return candidate; -} - -/** - * @brief Check candidates that are too close to each other, save the potential candidates - * (i.e. biggest/smallest contour) and remove the rest - */ -static void _filterTooCloseCandidates(const vector< vector< Point2f > > &candidatesIn, - vector< vector< vector< Point2f > > > &candidatesSetOut, - const vector< vector< Point > > &contoursIn, - vector< vector< vector< Point > > > &contoursSetOut, - double minMarkerDistanceRate, bool detectInvertedMarker) { - - CV_Assert(minMarkerDistanceRate >= 0); - vector candGroup; - candGroup.resize(candidatesIn.size(), -1); - vector< vector > groupedCandidates; - for(unsigned int i = 0; i < candidatesIn.size(); i++) { - bool isSingleContour = true; - for(unsigned int j = i + 1; j < candidatesIn.size(); j++) { - - int minimumPerimeter = min((int)contoursIn[i].size(), (int)contoursIn[j].size() ); - - // fc is the first corner considered on one of the markers, 4 combinations are possible - for(int fc = 0; fc < 4; fc++) { - double distSq = 0; - for(int c = 0; c < 4; c++) { - // modC is the corner considering first corner is fc - int modC = (c + fc) % 4; - distSq += (candidatesIn[i][modC].x - candidatesIn[j][c].x) * - (candidatesIn[i][modC].x - candidatesIn[j][c].x) + - (candidatesIn[i][modC].y - candidatesIn[j][c].y) * - (candidatesIn[i][modC].y - candidatesIn[j][c].y); - } - distSq /= 4.; - - // if mean square distance is too low, remove the smaller one of the two markers - double minMarkerDistancePixels = double(minimumPerimeter) * minMarkerDistanceRate; - if(distSq < minMarkerDistancePixels * minMarkerDistancePixels) { - isSingleContour = false; - // i and j are not related to a group - if(candGroup[i]<0 && candGroup[j]<0){ - // mark candidates with their corresponding group number - candGroup[i] = candGroup[j] = (int)groupedCandidates.size(); - - // create group - vector grouped; - grouped.push_back(i); - grouped.push_back(j); - groupedCandidates.push_back( grouped ); - } - // i is related to a group - else if(candGroup[i] > -1 && candGroup[j] == -1){ - int group = candGroup[i]; - candGroup[j] = group; - - // add to group - groupedCandidates[group].push_back( j ); - } - // j is related to a group - else if(candGroup[j] > -1 && candGroup[i] == -1){ - int group = candGroup[j]; - candGroup[i] = group; - - // add to group - groupedCandidates[group].push_back( i ); - } - } - } - } - if (isSingleContour && candGroup[i] < 0) - { - candGroup[i] = (int)groupedCandidates.size(); - vector grouped; - grouped.push_back(i); - grouped.push_back(i); // step "save possible candidates" require minimum 2 elements - groupedCandidates.push_back(grouped); - } - } - - // save possible candidates - candidatesSetOut.clear(); - contoursSetOut.clear(); - - vector< vector< Point2f > > biggerCandidates; - vector< vector< Point > > biggerContours; - vector< vector< Point2f > > smallerCandidates; - vector< vector< Point > > smallerContours; - - // save possible candidates - for(unsigned int i = 0; i < groupedCandidates.size(); i++) { - unsigned int smallerIdx = groupedCandidates[i][0]; - unsigned int biggerIdx = smallerIdx; - double smallerArea = contourArea(candidatesIn[smallerIdx]); - double biggerArea = smallerArea; - - // evaluate group elements - for(unsigned int j = 1; j < groupedCandidates[i].size(); j++) { - unsigned int currIdx = groupedCandidates[i][j]; - double currArea = contourArea(candidatesIn[currIdx]); - - // check if current contour is bigger - if(currArea >= biggerArea) { - biggerIdx = currIdx; - biggerArea = currArea; - } - - // check if current contour is smaller - if(currArea < smallerArea && detectInvertedMarker) { - smallerIdx = currIdx; - smallerArea = currArea; - } - } - - // add contours and candidates - biggerCandidates.push_back(candidatesIn[biggerIdx]); - biggerContours.push_back(contoursIn[biggerIdx]); - if(detectInvertedMarker) { - smallerCandidates.push_back(alignContourOrder(candidatesIn[biggerIdx][0], candidatesIn[smallerIdx])); - smallerContours.push_back(contoursIn[smallerIdx]); - } - } - // to preserve the structure :: candidateSet< defaultCandidates, whiteCandidates > - // default candidates - candidatesSetOut.push_back(biggerCandidates); - contoursSetOut.push_back(biggerContours); - // white candidates - candidatesSetOut.push_back(smallerCandidates); - contoursSetOut.push_back(smallerContours); -} - -/** - * @brief Initial steps on finding square candidates - */ -static void _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > > &candidates, - vector< vector< Point > > &contours, - const Ptr ¶ms) { - - CV_Assert(params->adaptiveThreshWinSizeMin >= 3 && params->adaptiveThreshWinSizeMax >= 3); - CV_Assert(params->adaptiveThreshWinSizeMax >= params->adaptiveThreshWinSizeMin); - CV_Assert(params->adaptiveThreshWinSizeStep > 0); - - // number of window sizes (scales) to apply adaptive thresholding - int nScales = (params->adaptiveThreshWinSizeMax - params->adaptiveThreshWinSizeMin) / - params->adaptiveThreshWinSizeStep + 1; - - vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); - vector< vector< vector< Point > > > contoursArrays((size_t) nScales); - - ////for each value in the interval of thresholding window sizes - parallel_for_(Range(0, nScales), [&](const Range& range) { - const int begin = range.start; - const int end = range.end; - - for (int i = begin; i < end; i++) { - int currScale = params->adaptiveThreshWinSizeMin + i * params->adaptiveThreshWinSizeStep; - // threshold - Mat thresh; - _threshold(grey, thresh, currScale, params->adaptiveThreshConstant); - - // detect rectangles - _findMarkerContours(thresh, candidatesArrays[i], contoursArrays[i], - params->minMarkerPerimeterRate, params->maxMarkerPerimeterRate, - params->polygonalApproxAccuracyRate, params->minCornerDistanceRate, - params->minDistanceToBorder, params->minSideLengthCanonicalImg); - } - }); - // join candidates - for(int i = 0; i < nScales; i++) { - for(unsigned int j = 0; j < candidatesArrays[i].size(); j++) { - candidates.push_back(candidatesArrays[i][j]); - contours.push_back(contoursArrays[i][j]); - } - } -} - - -/** - * @brief Detect square candidates in the input image - */ -static void _detectCandidates(InputArray _grayImage, vector< vector< vector< Point2f > > >& candidatesSetOut, - vector< vector< vector< Point > > >& contoursSetOut, const Ptr &_params) { - Mat grey = _grayImage.getMat(); - CV_DbgAssert(grey.total() != 0); - CV_DbgAssert(grey.type() == CV_8UC1); - - /// 1. DETECT FIRST SET OF CANDIDATES - vector< vector< Point2f > > candidates; - vector< vector< Point > > contours; - _detectInitialCandidates(grey, candidates, contours, _params); - /// 2. SORT CORNERS - _reorderCandidatesCorners(candidates); - - /// 3. FILTER OUT NEAR CANDIDATE PAIRS - // save the outter/inner border (i.e. potential candidates) - _filterTooCloseCandidates(candidates, candidatesSetOut, contours, contoursSetOut, - _params->minMarkerDistanceRate, _params->detectInvertedMarker); -} - - -/** - * @brief Given an input image and candidate corners, extract the bits of the candidate, including - * the border bits - */ -static Mat _extractBits(InputArray _image, InputArray _corners, int markerSize, - int markerBorderBits, int cellSize, double cellMarginRate, - double minStdDevOtsu) { - - CV_Assert(_image.getMat().channels() == 1); - CV_Assert(_corners.total() == 4); - CV_Assert(markerBorderBits > 0 && cellSize > 0 && cellMarginRate >= 0 && cellMarginRate <= 1); - CV_Assert(minStdDevOtsu >= 0); - - // number of bits in the marker - int markerSizeWithBorders = markerSize + 2 * markerBorderBits; - int cellMarginPixels = int(cellMarginRate * cellSize); - - Mat resultImg; // marker image after removing perspective - int resultImgSize = markerSizeWithBorders * cellSize; - Mat resultImgCorners(4, 1, CV_32FC2); - resultImgCorners.ptr< Point2f >(0)[0] = Point2f(0, 0); - resultImgCorners.ptr< Point2f >(0)[1] = Point2f((float)resultImgSize - 1, 0); - resultImgCorners.ptr< Point2f >(0)[2] = - Point2f((float)resultImgSize - 1, (float)resultImgSize - 1); - resultImgCorners.ptr< Point2f >(0)[3] = Point2f(0, (float)resultImgSize - 1); - - // remove perspective - Mat transformation = getPerspectiveTransform(_corners, resultImgCorners); - warpPerspective(_image, resultImg, transformation, Size(resultImgSize, resultImgSize), - INTER_NEAREST); - - // output image containing the bits - Mat bits(markerSizeWithBorders, markerSizeWithBorders, CV_8UC1, Scalar::all(0)); - - // check if standard deviation is enough to apply Otsu - // if not enough, it probably means all bits are the same color (black or white) - Mat mean, stddev; - // Remove some border just to avoid border noise from perspective transformation - Mat innerRegion = resultImg.colRange(cellSize / 2, resultImg.cols - cellSize / 2) - .rowRange(cellSize / 2, resultImg.rows - cellSize / 2); - meanStdDev(innerRegion, mean, stddev); - if(stddev.ptr< double >(0)[0] < minStdDevOtsu) { - // all black or all white, depending on mean value - if(mean.ptr< double >(0)[0] > 127) - bits.setTo(1); - else - bits.setTo(0); - return bits; - } - - // now extract code, first threshold using Otsu - threshold(resultImg, resultImg, 125, 255, THRESH_BINARY | THRESH_OTSU); - - // for each cell - for(int y = 0; y < markerSizeWithBorders; y++) { - for(int x = 0; x < markerSizeWithBorders; x++) { - int Xstart = x * (cellSize) + cellMarginPixels; - int Ystart = y * (cellSize) + cellMarginPixels; - Mat square = resultImg(Rect(Xstart, Ystart, cellSize - 2 * cellMarginPixels, - cellSize - 2 * cellMarginPixels)); - // count white pixels on each cell to assign its value - size_t nZ = (size_t) countNonZero(square); - if(nZ > square.total() / 2) bits.at< unsigned char >(y, x) = 1; - } - } - - return bits; -} - - - -/** - * @brief Return number of erroneous bits in border, i.e. number of white bits in border. - */ -static int _getBorderErrors(const Mat &bits, int markerSize, int borderSize) { - - int sizeWithBorders = markerSize + 2 * borderSize; - - CV_Assert(markerSize > 0 && bits.cols == sizeWithBorders && bits.rows == sizeWithBorders); - - int totalErrors = 0; - for(int y = 0; y < sizeWithBorders; y++) { - for(int k = 0; k < borderSize; k++) { - if(bits.ptr< unsigned char >(y)[k] != 0) totalErrors++; - if(bits.ptr< unsigned char >(y)[sizeWithBorders - 1 - k] != 0) totalErrors++; - } - } - for(int x = borderSize; x < sizeWithBorders - borderSize; x++) { - for(int k = 0; k < borderSize; k++) { - if(bits.ptr< unsigned char >(k)[x] != 0) totalErrors++; - if(bits.ptr< unsigned char >(sizeWithBorders - 1 - k)[x] != 0) totalErrors++; - } - } - return totalErrors; -} - - -/** - * @brief Tries to identify one candidate given the dictionary - * @return candidate typ. zero if the candidate is not valid, - * 1 if the candidate is a black candidate (default candidate) - * 2 if the candidate is a white candidate - */ -static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArray _image, - const vector& _corners, int& idx, - const Ptr& params, int& rotation, - const float scale = 1.f) -{ - CV_DbgAssert(_corners.size() == 4); - CV_DbgAssert(_image.getMat().total() != 0); - CV_DbgAssert(params->markerBorderBits > 0); - uint8_t typ=1; - // get bits - // scale corners to the correct size to search on the corresponding image pyramid - vector scaled_corners(4); - for (int i = 0; i < 4; ++i) { - scaled_corners[i].x = _corners[i].x * scale; - scaled_corners[i].y = _corners[i].y * scale; - } - - Mat candidateBits = - _extractBits(_image, scaled_corners, dictionary->markerSize, params->markerBorderBits, - params->perspectiveRemovePixelPerCell, - params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); - - // analyze border bits - int maximumErrorsInBorder = - int(dictionary->markerSize * dictionary->markerSize * params->maxErroneousBitsInBorderRate); - int borderErrors = - _getBorderErrors(candidateBits, dictionary->markerSize, params->markerBorderBits); - - // check if it is a white marker - if(params->detectInvertedMarker){ - // to get from 255 to 1 - Mat invertedImg = ~candidateBits-254; - int invBError = _getBorderErrors(invertedImg, dictionary->markerSize, params->markerBorderBits); - // white marker - if(invBError maximumErrorsInBorder) return 0; // border is wrong - - // take only inner bits - Mat onlyBits = - candidateBits.rowRange(params->markerBorderBits, - candidateBits.rows - params->markerBorderBits) - .colRange(params->markerBorderBits, candidateBits.cols - params->markerBorderBits); - - // try to indentify the marker - if(!dictionary->identify(onlyBits, idx, rotation, params->errorCorrectionRate)) - return 0; - - return typ; -} - -/** - * @brief Copy the contents of a corners vector to an OutputArray, settings its size. - */ -static void _copyVector2Output(vector< vector< Point2f > > &vec, OutputArrayOfArrays out, const float scale = 1.f) { - out.create((int)vec.size(), 1, CV_32FC2); - - if(out.isMatVector()) { - for (unsigned int i = 0; i < vec.size(); i++) { - out.create(4, 1, CV_32FC2, i); - Mat &m = out.getMatRef(i); - Mat(Mat(vec[i]).t()*scale).copyTo(m); - } - } - else if(out.isUMatVector()) { - for (unsigned int i = 0; i < vec.size(); i++) { - out.create(4, 1, CV_32FC2, i); - UMat &m = out.getUMatRef(i); - Mat(Mat(vec[i]).t()*scale).copyTo(m); - } - } - else if(out.kind() == _OutputArray::STD_VECTOR_VECTOR){ - for (unsigned int i = 0; i < vec.size(); i++) { - out.create(4, 1, CV_32FC2, i); - Mat m = out.getMat(i); - Mat(Mat(vec[i]).t()*scale).copyTo(m); - } - } - else { - CV_Error(cv::Error::StsNotImplemented, - "Only Mat vector, UMat vector, and vector OutputArrays are currently supported."); - } -} - -/** - * @brief rotate the initial corner to get to the right position - */ -static void correctCornerPosition( vector< Point2f >& _candidate, int rotate){ - std::rotate(_candidate.begin(), _candidate.begin() + 4 - rotate, _candidate.end()); -} - -static size_t _findOptPyrImageForCanonicalImg( - const std::vector& img_pyr, - const int scaled_width, - const int cur_perimeter, - const int min_perimeter) { - CV_Assert(scaled_width > 0); - size_t optLevel = 0; - float dist = std::numeric_limits::max(); - for (size_t i = 0; i < img_pyr.size(); ++i) { - const float scale = img_pyr[i].cols / static_cast(scaled_width); - const float perimeter_scaled = cur_perimeter * scale; - // instead of std::abs() favor the larger pyramid level by checking if the distance is postive - // will slow down the algorithm but find more corners in the end - const float new_dist = perimeter_scaled - min_perimeter; - if (new_dist < dist && new_dist > 0.f) { - dist = new_dist; - optLevel = i; - } - } - return optLevel; -} - -/** - * @brief Identify square candidates according to a marker dictionary - */ - -static void _identifyCandidates(InputArray grey, - const std::vector& image_pyr, - vector< vector< vector< Point2f > > >& _candidatesSet, - vector< vector< vector > >& _contoursSet, const Ptr &_dictionary, - vector< vector< Point2f > >& _accepted, vector< vector >& _contours, vector< int >& ids, - const Ptr ¶ms, - OutputArrayOfArrays _rejected = noArray()) { - CV_DbgAssert(grey.getMat().total() != 0); - CV_DbgAssert(grey.getMat().type() == CV_8UC1); - int ncandidates = (int)_candidatesSet[0].size(); - vector< vector< Point2f > > accepted; - vector< vector< Point2f > > rejected; - vector< vector< Point > > contours; - - vector< int > idsTmp(ncandidates, -1); - vector< int > rotated(ncandidates, 0); - vector< uint8_t > validCandidates(ncandidates, 0); - - //// Analyze each of the candidates - parallel_for_(Range(0, ncandidates), [&](const Range &range) { - const int begin = range.start; - const int end = range.end; - - vector< vector< Point2f > >& candidates = params->detectInvertedMarker ? _candidatesSet[1] : _candidatesSet[0]; - vector< vector< Point > >& contourS = params->detectInvertedMarker ? _contoursSet[1] : _contoursSet[0]; - - for(int i = begin; i < end; i++) { - int currId = -1; - // implements equation (4) - if (params->useAruco3Detection) { - const int perimeterOfContour = static_cast(contourS[i].size()); - const int min_perimeter = params->minSideLengthCanonicalImg * 4; - const size_t nearestImgId = _findOptPyrImageForCanonicalImg(image_pyr, grey.cols(), perimeterOfContour, min_perimeter); - const float scale = image_pyr[nearestImgId].cols / static_cast(grey.cols()); - - validCandidates[i] = _identifyOneCandidate(_dictionary, image_pyr[nearestImgId], candidates[i], currId, params, rotated[i], scale); - } - else { - validCandidates[i] = _identifyOneCandidate(_dictionary, grey, candidates[i], currId, params, rotated[i]); - } - - if(validCandidates[i] > 0) - idsTmp[i] = currId; - } - }); - - for(int i = 0; i < ncandidates; i++) { - if(validCandidates[i] > 0) { - // to choose the right set of candidates :: 0 for default, 1 for white markers - uint8_t set = validCandidates[i]-1; - - // shift corner positions to the correct rotation - correctCornerPosition(_candidatesSet[set][i], rotated[i]); - - if( !params->detectInvertedMarker && validCandidates[i] == 2 ) - continue; - - // add valid candidate - accepted.push_back(_candidatesSet[set][i]); - ids.push_back(idsTmp[i]); - - contours.push_back(_contoursSet[set][i]); - - } else { - rejected.push_back(_candidatesSet[0][i]); - } - } - - // parse output - _accepted = accepted; - - _contours= contours; - - if(_rejected.needed()) { - _copyVector2Output(rejected, _rejected); - } -} - - -/** - * @brief Return object points for the system centered in a middle (by default) or in a top left corner of single - * marker, given the marker length - */ -static void _getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints, - EstimateParameters estimateParameters) { - - CV_Assert(markerLength > 0); - - _objPoints.create(4, 1, CV_32FC3); - Mat objPoints = _objPoints.getMat(); - // set coordinate system in the top-left corner of the marker, with Z pointing out - if (estimateParameters.pattern == CW_top_left_corner) { - objPoints.ptr(0)[0] = Vec3f(0.f, 0.f, 0); - objPoints.ptr(0)[1] = Vec3f(markerLength, 0.f, 0); - objPoints.ptr(0)[2] = Vec3f(markerLength, markerLength, 0); - objPoints.ptr(0)[3] = Vec3f(0.f, markerLength, 0); - } - else if (estimateParameters.pattern == CCW_center) { - objPoints.ptr(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0); - objPoints.ptr(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0); - objPoints.ptr(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0); - objPoints.ptr(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0); - } - else - CV_Error(Error::StsBadArg, "Unknown estimateParameters pattern"); -} - -/** - * Line fitting A * B = C :: Called from function refineCandidateLines - * @param nContours, contour-container - */ -static Point3f _interpolate2Dline(const std::vector& nContours){ - CV_Assert(nContours.size() >= 2); - float minX, minY, maxX, maxY; - minX = maxX = nContours[0].x; - minY = maxY = nContours[0].y; - - for(unsigned int i = 0; i< nContours.size(); i++){ - minX = nContours[i].x < minX ? nContours[i].x : minX; - minY = nContours[i].y < minY ? nContours[i].y : minY; - maxX = nContours[i].x > maxX ? nContours[i].x : maxX; - maxY = nContours[i].y > maxY ? nContours[i].y : maxY; - } - - Mat A = Mat::ones((int)nContours.size(), 2, CV_32F); // Coefficient Matrix (N x 2) - Mat B((int)nContours.size(), 1, CV_32F); // Variables Matrix (N x 1) - Mat C; // Constant - - if(maxX - minX > maxY - minY){ - for(unsigned int i =0; i < nContours.size(); i++){ - A.at(i,0)= nContours[i].x; - B.at(i,0)= nContours[i].y; - } - - solve(A, B, C, DECOMP_NORMAL); - - return Point3f(C.at(0, 0), -1., C.at(1, 0)); - } - else{ - for(unsigned int i =0; i < nContours.size(); i++){ - A.at(i,0)= nContours[i].y; - B.at(i,0)= nContours[i].x; - } - - solve(A, B, C, DECOMP_NORMAL); - - return Point3f(-1., C.at(0, 0), C.at(1, 0)); - } - -} - -/** - * Find the Point where the lines crosses :: Called from function refineCandidateLines - * @param nLine1 - * @param nLine2 - * @return Crossed Point - */ -static Point2f _getCrossPoint(Point3f nLine1, Point3f nLine2){ - Matx22f A(nLine1.x, nLine1.y, nLine2.x, nLine2.y); - Vec2f B(-nLine1.z, -nLine2.z); - return Vec2f(A.solve(B).val); -} - -/** - * Refine Corners using the contour vector :: Called from function detectMarkers - * @param nContours, contour-container - * @param nCorners, candidate Corners - * @param camMatrix, cameraMatrix input 3x3 floating-point camera matrix - * @param distCoeff, distCoeffs vector of distortion coefficient - */ -static void _refineCandidateLines(std::vector& nContours, std::vector& nCorners){ - vector contour2f(nContours.begin(), nContours.end()); - /* 5 groups :: to group the edges - * 4 - classified by its corner - * extra group - (temporary) if contours do not begin with a corner - */ - vector cntPts[5]; - int cornerIndex[4]={-1}; - int group=4; - - for ( unsigned int i =0; i < nContours.size(); i++ ) { - for(unsigned int j=0; j<4; j++){ - if ( nCorners[j] == contour2f[i] ){ - cornerIndex[j] = i; - group=j; - } - } - cntPts[group].push_back(contour2f[i]); - } - for (int i = 0; i < 4; i++) - { - CV_Assert(cornerIndex[i] != -1); - } - - // saves extra group into corresponding - if( !cntPts[4].empty() ){ - for( unsigned int i=0; i < cntPts[4].size() ; i++ ) - cntPts[group].push_back(cntPts[4].at(i)); - cntPts[4].clear(); - } - - //Evaluate contour direction :: using the position of the detected corners - int inc=1; - - inc = ( (cornerIndex[0] > cornerIndex[1]) && (cornerIndex[3] > cornerIndex[0]) ) ? -1:inc; - inc = ( (cornerIndex[2] > cornerIndex[3]) && (cornerIndex[1] > cornerIndex[2]) ) ? -1:inc; - - // calculate the line :: who passes through the grouped points - Point3f lines[4]; - for(int i=0; i<4; i++){ - lines[i]=_interpolate2Dline(cntPts[i]); - } - - /* - * calculate the corner :: where the lines crosses to each other - * clockwise direction no clockwise direction - * 0 1 - * .---. 1 .---. 2 - * | | | | - * 3 .___. 0 .___. - * 2 3 - */ - for(int i=0; i < 4; i++){ - if(inc<0) - nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+1)%4 ]); // 01 12 23 30 - else - nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+3)%4 ]); // 30 01 12 23 - } -} - -#ifdef APRIL_DEBUG -static void _darken(const Mat &im){ - for (int y = 0; y < im.rows; y++) { - for (int x = 0; x < im.cols; x++) { - im.data[im.cols*y+x] /= 2; - } - } -} -#endif - -/** - * - * @param im_orig - * @param _params - * @param candidates - * @param contours - */ -static void _apriltag(Mat im_orig, const Ptr & _params, std::vector< std::vector< Point2f > > &candidates, - std::vector< std::vector< Point > > &contours){ - - /////////////////////////////////////////////////////////// - /// Step 1. Detect quads according to requested image decimation - /// and blurring parameters. - Mat quad_im; - im_orig.copyTo(quad_im); - - if (_params->aprilTagQuadDecimate > 1){ - resize(im_orig, quad_im, Size(), 1/_params->aprilTagQuadDecimate, 1/_params->aprilTagQuadDecimate, INTER_AREA ); - } - - // Apply a Blur - if (_params->aprilTagQuadSigma != 0) { - // compute a reasonable kernel width by figuring that the - // kernel should go out 2 std devs. - // - // max sigma ksz - // 0.499 1 (disabled) - // 0.999 3 - // 1.499 5 - // 1.999 7 - - float sigma = fabsf((float) _params->aprilTagQuadSigma); - - int ksz = cvFloor(4 * sigma); // 2 std devs in each direction - ksz |= 1; // make odd number - - if (ksz > 1) { - if (_params->aprilTagQuadSigma > 0) - GaussianBlur(quad_im, quad_im, Size(ksz, ksz), sigma, sigma, BORDER_REPLICATE); - else { - Mat orig; - quad_im.copyTo(orig); - GaussianBlur(quad_im, quad_im, Size(ksz, ksz), sigma, sigma, BORDER_REPLICATE); - - // SHARPEN the image by subtracting the low frequency components. - for (int y = 0; y < orig.rows; y++) { - for (int x = 0; x < orig.cols; x++) { - int vorig = orig.data[y*orig.step + x]; - int vblur = quad_im.data[y*quad_im.step + x]; - - int v = 2*vorig - vblur; - if (v < 0) - v = 0; - if (v > 255) - v = 255; - - quad_im.data[y*quad_im.step + x] = (uint8_t) v; - } - } - } - } - } - -#ifdef APRIL_DEBUG - imwrite("1.1 debug_preprocess.pnm", quad_im); -#endif - - /////////////////////////////////////////////////////////// - /// Step 2. do the Threshold :: get the set of candidate quads - zarray_t *quads = apriltag_quad_thresh(_params, quad_im, contours); - - CV_Assert(quads != NULL); - - // adjust centers of pixels so that they correspond to the - // original full-resolution image. - if (_params->aprilTagQuadDecimate > 1) { - for (int i = 0; i < _zarray_size(quads); i++) { - struct sQuad *q; - _zarray_get_volatile(quads, i, &q); - for (int j = 0; j < 4; j++) { - q->p[j][0] *= _params->aprilTagQuadDecimate; - q->p[j][1] *= _params->aprilTagQuadDecimate; - } - } - } - -#ifdef APRIL_DEBUG - Mat im_quads = im_orig.clone(); - im_quads = im_quads*0.5; - srandom(0); - - for (int i = 0; i < _zarray_size(quads); i++) { - struct sQuad *quad; - _zarray_get_volatile(quads, i, &quad); - - const int bias = 100; - int color = bias + (random() % (255-bias)); - - line(im_quads, Point(quad->p[0][0], quad->p[0][1]), Point(quad->p[1][0], quad->p[1][1]), color, 1); - line(im_quads, Point(quad->p[1][0], quad->p[1][1]), Point(quad->p[2][0], quad->p[2][1]), color, 1); - line(im_quads, Point(quad->p[2][0], quad->p[2][1]), Point(quad->p[3][0], quad->p[3][1]), color, 1); - line(im_quads, Point(quad->p[3][0], quad->p[3][1]), Point(quad->p[0][0], quad->p[0][1]), color, 1); - } - imwrite("1.2 debug_quads_raw.pnm", im_quads); -#endif - - //////////////////////////////////////////////////////////////// - /// Step 3. Save the output :: candidate corners - for (int i = 0; i < _zarray_size(quads); i++) { - struct sQuad *quad; - _zarray_get_volatile(quads, i, &quad); - - std::vector< Point2f > corners; - corners.push_back(Point2f(quad->p[3][0], quad->p[3][1])); //pA - corners.push_back(Point2f(quad->p[0][0], quad->p[0][1])); //pB - corners.push_back(Point2f(quad->p[1][0], quad->p[1][1])); //pC - corners.push_back(Point2f(quad->p[2][0], quad->p[2][1])); //pD - - candidates.push_back(corners); - } - - _zarray_destroy(quads); -} - -static inline void findCornerInPyrImage(const float scale_init, const int closest_pyr_image_idx, - const std::vector& grey_pyramid, Mat corners, - const Ptr& params) { - // scale them to the closest pyramid level - if (scale_init != 1.f) - corners *= scale_init; // scale_init * scale_pyr - for (int idx = closest_pyr_image_idx - 1; idx >= 0; --idx) { - // scale them to new pyramid level - corners *= 2.f; // *= scale_pyr; - // use larger win size for larger images - const int subpix_win_size = std::max(grey_pyramid[idx].cols, grey_pyramid[idx].rows) > 1080 ? 5 : 3; - cornerSubPix(grey_pyramid[idx], corners, - Size(subpix_win_size, subpix_win_size), - Size(-1, -1), - TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, - params->cornerRefinementMaxIterations, - params->cornerRefinementMinAccuracy)); - } -} - -/** - */ void detectMarkers(InputArray _image, const Ptr &_dictionary, OutputArrayOfArrays _corners, OutputArray _ids, const Ptr &_params, OutputArrayOfArrays _rejectedImgPoints) { - - CV_Assert(!_image.empty()); - CV_Assert(_params->markerBorderBits > 0); - // check that the parameters are set correctly if Aruco3 is used - CV_Assert(!(_params->useAruco3Detection == true && - _params->minSideLengthCanonicalImg == 0 && - _params->minMarkerLengthRatioOriginalImg == 0.0)); - - Mat grey; - _convertToGrey(_image.getMat(), grey); - - // Aruco3 functionality is the extension of Aruco. - // The description can be found in: - // [1] Speeded up detection of squared fiducial markers, 2018, FJ Romera-Ramirez et al. - // if Aruco3 functionality if not wanted - // change some parameters to be sure to turn it off - if (!_params->useAruco3Detection) { - _params->minMarkerLengthRatioOriginalImg = 0.0; - _params->minSideLengthCanonicalImg = 0; - } - else { - // always turn on corner refinement in case of Aruco3, due to upsampling - _params->cornerRefinementMethod = CORNER_REFINE_SUBPIX; - } - - /// Step 0: equation (2) from paper [1] - const float fxfy = (!_params->useAruco3Detection ? 1.f : _params->minSideLengthCanonicalImg / - (_params->minSideLengthCanonicalImg + std::max(grey.cols, grey.rows)*_params->minMarkerLengthRatioOriginalImg)); - - /// Step 1: create image pyramid. Section 3.4. in [1] - std::vector grey_pyramid; - int closest_pyr_image_idx = 0, num_levels = 0; - //// Step 1.1: resize image with equation (1) from paper [1] - if (_params->useAruco3Detection) { - const float scale_pyr = 2.f; - const float img_area = static_cast(grey.rows*grey.cols); - const float min_area_marker = static_cast(_params->minSideLengthCanonicalImg*_params->minSideLengthCanonicalImg); - // find max level - num_levels = static_cast(log2(img_area / min_area_marker)/scale_pyr); - // the closest pyramid image to the downsampled segmentation image - // will later be used as start index for corner upsampling - const float scale_img_area = img_area * fxfy * fxfy; - closest_pyr_image_idx = cvRound(log2(img_area / scale_img_area)/scale_pyr); - } - cv::buildPyramid(grey, grey_pyramid, num_levels); - - // resize to segmentation image - // in this reduces size the contours will be detected - if (fxfy != 1.f) - cv::resize(grey, grey, cv::Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows))); - - /// STEP 2: Detect marker candidates - vector< vector< Point2f > > candidates; - vector< vector< Point > > contours; - vector< int > ids; - - vector< vector< vector< Point2f > > > candidatesSet; - vector< vector< vector< Point > > > contoursSet; - - /// STEP 2.a Detect marker candidates :: using AprilTag - if(_params->cornerRefinementMethod == CORNER_REFINE_APRILTAG){ - _apriltag(grey, _params, candidates, contours); - - candidatesSet.push_back(candidates); - contoursSet.push_back(contours); - } - /// STEP 2.b Detect marker candidates :: traditional way - else - _detectCandidates(grey, candidatesSet, contoursSet, _params); - - /// STEP 2: Check candidate codification (identify markers) - _identifyCandidates(grey, grey_pyramid, candidatesSet, contoursSet, _dictionary, - candidates, contours, ids, _params, _rejectedImgPoints); - - // copy to output arrays - _copyVector2Output(candidates, _corners); - Mat(ids).copyTo(_ids); - - /// STEP 3: Corner refinement :: use corner subpix - if( _params->cornerRefinementMethod == CORNER_REFINE_SUBPIX ) { - CV_Assert(_params->cornerRefinementWinSize > 0 && _params->cornerRefinementMaxIterations > 0 && - _params->cornerRefinementMinAccuracy > 0); - // Do subpixel estimation. In Aruco3 start on the lowest pyramid level and upscale the corners - parallel_for_(Range(0, _corners.cols()), [&](const Range& range) { - const int begin = range.start; - const int end = range.end; - - for (int i = begin; i < end; i++) { - if (_params->useAruco3Detection) { - const float scale_init = (float) grey_pyramid[closest_pyr_image_idx].cols / grey.cols; - findCornerInPyrImage(scale_init, closest_pyr_image_idx, grey_pyramid, _corners.getMat(i), _params); - } - else - cornerSubPix(grey, _corners.getMat(i), - Size(_params->cornerRefinementWinSize, _params->cornerRefinementWinSize), - Size(-1, -1), - TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, - _params->cornerRefinementMaxIterations, - _params->cornerRefinementMinAccuracy)); - } - }); - } - - /// STEP 3, Optional : Corner refinement :: use contour container - if( _params->cornerRefinementMethod == CORNER_REFINE_CONTOUR){ - - if(! _ids.empty()){ - - // do corner refinement using the contours for each detected markers - parallel_for_(Range(0, _corners.cols()), [&](const Range& range) { - for (int i = range.start; i < range.end; i++) { - _refineCandidateLines(contours[i], candidates[i]); - } - }); - - // copy the corners to the output array - _copyVector2Output(candidates, _corners); - } - } - if (_params->cornerRefinementMethod != CORNER_REFINE_APRILTAG && - _params->cornerRefinementMethod != CORNER_REFINE_SUBPIX) { - // scale to orignal size, this however will lead to inaccurate detections! - _copyVector2Output(candidates, _corners, 1.f/fxfy); - } -} - -/** - */ -void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, - InputArray _cameraMatrix, InputArray _distCoeffs, - OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints, - Ptr estimateParameters) { - - CV_Assert(markerLength > 0); - CV_Assert(estimateParameters); - - Mat markerObjPoints; - _getSingleMarkerObjectPoints(markerLength, markerObjPoints, *estimateParameters); - int nMarkers = (int)_corners.total(); - _rvecs.create(nMarkers, 1, CV_64FC3); - _tvecs.create(nMarkers, 1, CV_64FC3); - - Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat(); - - //// for each marker, calculate its pose - parallel_for_(Range(0, nMarkers), [&](const Range& range) { - const int begin = range.start; - const int end = range.end; - - for (int i = begin; i < end; i++) { - solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs, rvecs.at(i), - tvecs.at(i), estimateParameters->useExtrinsicGuess, estimateParameters->solvePnPMethod); - } - }); - - if(_objPoints.needed()){ - markerObjPoints.convertTo(_objPoints, -1); - } -} - - - -void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, - InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints) { - - CV_Assert(board->ids.size() == board->objPoints.size()); - CV_Assert(detectedIds.total() == detectedCorners.total()); - - size_t nDetectedMarkers = detectedIds.total(); - - vector< Point3f > objPnts; - objPnts.reserve(nDetectedMarkers); - - vector< Point2f > imgPnts; - imgPnts.reserve(nDetectedMarkers); - - // look for detected markers that belong to the board and get their information - for(unsigned int i = 0; i < nDetectedMarkers; i++) { - int currentId = detectedIds.getMat().ptr< int >(0)[i]; - for(unsigned int j = 0; j < board->ids.size(); j++) { - if(currentId == board->ids[j]) { - for(int p = 0; p < 4; p++) { - objPnts.push_back(board->objPoints[j][p]); - imgPnts.push_back(detectedCorners.getMat(i).ptr< Point2f >(0)[p]); - } - } - } - } - - // create output - Mat(objPnts).copyTo(objPoints); - Mat(imgPnts).copyTo(imgPoints); + ArucoDetector detector(_dictionary, _params); + detector.detectMarkers(_image, _corners, _ids, _rejectedImgPoints); } - - -/** - * Project board markers that are not included in the list of detected markers - */ -static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, - InputOutputArray _detectedIds, InputArray _cameraMatrix, - InputArray _distCoeffs, - vector< vector< Point2f > >& _undetectedMarkersProjectedCorners, - OutputArray _undetectedMarkersIds) { - - // first estimate board pose with the current avaible markers - Mat rvec, tvec; - int boardDetectedMarkers; - boardDetectedMarkers = aruco::estimatePoseBoard(_detectedCorners, _detectedIds, _board, - _cameraMatrix, _distCoeffs, rvec, tvec); - - // at least one marker from board so rvec and tvec are valid - if(boardDetectedMarkers == 0) return; - - // search undetected markers and project them using the previous pose - vector< vector< Point2f > > undetectedCorners; - vector< int > undetectedIds; - for(unsigned int i = 0; i < _board->ids.size(); i++) { - int foundIdx = -1; - for(unsigned int j = 0; j < _detectedIds.total(); j++) { - if(_board->ids[i] == _detectedIds.getMat().ptr< int >()[j]) { - foundIdx = j; - break; - } - } - - // not detected - if(foundIdx == -1) { - undetectedCorners.push_back(vector< Point2f >()); - undetectedIds.push_back(_board->ids[i]); - projectPoints(_board->objPoints[i], rvec, tvec, _cameraMatrix, _distCoeffs, - undetectedCorners.back()); - } - } - - - // parse output - Mat(undetectedIds).copyTo(_undetectedMarkersIds); - _undetectedMarkersProjectedCorners = undetectedCorners; -} - - - -/** - * Interpolate board markers that are not included in the list of detected markers using - * global homography - */ -static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, - InputOutputArray _detectedIds, - vector< vector< Point2f > >& _undetectedMarkersProjectedCorners, - OutputArray _undetectedMarkersIds) { - - - // check board points are in the same plane, if not, global homography cannot be applied - CV_Assert(_board->objPoints.size() > 0); - CV_Assert(_board->objPoints[0].size() > 0); - float boardZ = _board->objPoints[0][0].z; - for(unsigned int i = 0; i < _board->objPoints.size(); i++) { - for(unsigned int j = 0; j < _board->objPoints[i].size(); j++) { - CV_Assert(boardZ == _board->objPoints[i][j].z); - } - } - - vector< Point2f > detectedMarkersObj2DAll; // Object coordinates (without Z) of all the detected - // marker corners in a single vector - vector< Point2f > imageCornersAll; // Image corners of all detected markers in a single vector - vector< vector< Point2f > > undetectedMarkersObj2D; // Object coordinates (without Z) of all - // missing markers in different vectors - vector< int > undetectedMarkersIds; // ids of missing markers - // find markers included in board, and missing markers from board. Fill the previous vectors - for(unsigned int j = 0; j < _board->ids.size(); j++) { - bool found = false; - for(unsigned int i = 0; i < _detectedIds.total(); i++) { - if(_detectedIds.getMat().ptr< int >()[i] == _board->ids[j]) { - for(int c = 0; c < 4; c++) { - imageCornersAll.push_back(_detectedCorners.getMat(i).ptr< Point2f >()[c]); - detectedMarkersObj2DAll.push_back( - Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); - } - found = true; - break; - } - } - if(!found) { - undetectedMarkersObj2D.push_back(vector< Point2f >()); - for(int c = 0; c < 4; c++) { - undetectedMarkersObj2D.back().push_back( - Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); - } - undetectedMarkersIds.push_back(_board->ids[j]); - } - } - if(imageCornersAll.size() == 0) return; - - // get homography from detected markers - Mat transformation = findHomography(detectedMarkersObj2DAll, imageCornersAll); - - _undetectedMarkersProjectedCorners.resize(undetectedMarkersIds.size()); - - // for each undetected marker, apply transformation - for(unsigned int i = 0; i < undetectedMarkersObj2D.size(); i++) { - perspectiveTransform(undetectedMarkersObj2D[i], _undetectedMarkersProjectedCorners[i], transformation); - } - - Mat(undetectedMarkersIds).copyTo(_undetectedMarkersIds); -} - - - -/** - */ void refineDetectedMarkers(InputArray _image, const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds, InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix, InputArray _distCoeffs, float minRepDistance, float errorCorrectionRate, bool checkAllOrders, OutputArray _recoveredIdxs, const Ptr &_params) { - - CV_Assert(minRepDistance > 0); - - if(_detectedIds.total() == 0 || _rejectedCorners.total() == 0) return; - - DetectorParameters ¶ms = *_params; - - // get projections of missing markers in the board - vector< vector< Point2f > > undetectedMarkersCorners; - vector< int > undetectedMarkersIds; - if(_cameraMatrix.total() != 0) { - // reproject based on camera projection model - _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, _cameraMatrix, _distCoeffs, - undetectedMarkersCorners, undetectedMarkersIds); - - } else { - // reproject based on global homography - _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, undetectedMarkersCorners, - undetectedMarkersIds); - } - - // list of missing markers indicating if they have been assigned to a candidate - vector< bool > alreadyIdentified(_rejectedCorners.total(), false); - - // maximum bits that can be corrected - Dictionary &dictionary = *(_board->dictionary); - int maxCorrectionRecalculated = - int(double(dictionary.maxCorrectionBits) * errorCorrectionRate); - - Mat grey; - _convertToGrey(_image, grey); - - // vector of final detected marker corners and ids - vector > finalAcceptedCorners; - vector< int > finalAcceptedIds; - // fill with the current markers - finalAcceptedCorners.resize(_detectedCorners.total()); - finalAcceptedIds.resize(_detectedIds.total()); - for(unsigned int i = 0; i < _detectedIds.total(); i++) { - finalAcceptedCorners[i] = _detectedCorners.getMat(i).clone(); - finalAcceptedIds[i] = _detectedIds.getMat().ptr< int >()[i]; - } - vector< int > recoveredIdxs; // original indexes of accepted markers in _rejectedCorners - - // for each missing marker, try to find a correspondence - for(unsigned int i = 0; i < undetectedMarkersIds.size(); i++) { - - // best match at the moment - int closestCandidateIdx = -1; - double closestCandidateDistance = minRepDistance * minRepDistance + 1; - Mat closestRotatedMarker; - - for(unsigned int j = 0; j < _rejectedCorners.total(); j++) { - if(alreadyIdentified[j]) continue; - - // check distance - double minDistance = closestCandidateDistance + 1; - bool valid = false; - int validRot = 0; - for(int c = 0; c < 4; c++) { // first corner in rejected candidate - double currentMaxDistance = 0; - for(int k = 0; k < 4; k++) { - Point2f rejCorner = _rejectedCorners.getMat(j).ptr< Point2f >()[(c + k) % 4]; - Point2f distVector = undetectedMarkersCorners[i][k] - rejCorner; - double cornerDist = distVector.x * distVector.x + distVector.y * distVector.y; - currentMaxDistance = max(currentMaxDistance, cornerDist); - } - // if distance is better than current best distance - if(currentMaxDistance < closestCandidateDistance) { - valid = true; - validRot = c; - minDistance = currentMaxDistance; - } - if(!checkAllOrders) break; - } - - if(!valid) continue; - - // apply rotation - Mat rotatedMarker; - if(checkAllOrders) { - rotatedMarker = Mat(4, 1, CV_32FC2); - for(int c = 0; c < 4; c++) - rotatedMarker.ptr< Point2f >()[c] = - _rejectedCorners.getMat(j).ptr< Point2f >()[(c + 4 + validRot) % 4]; - } - else rotatedMarker = _rejectedCorners.getMat(j); - - // last filter, check if inner code is close enough to the assigned marker code - int codeDistance = 0; - // if errorCorrectionRate, dont check code - if(errorCorrectionRate >= 0) { - - // extract bits - Mat bits = _extractBits( - grey, rotatedMarker, dictionary.markerSize, params.markerBorderBits, - params.perspectiveRemovePixelPerCell, - params.perspectiveRemoveIgnoredMarginPerCell, params.minOtsuStdDev); - - Mat onlyBits = - bits.rowRange(params.markerBorderBits, bits.rows - params.markerBorderBits) - .colRange(params.markerBorderBits, bits.rows - params.markerBorderBits); - - codeDistance = - dictionary.getDistanceToId(onlyBits, undetectedMarkersIds[i], false); - } - - // if everythin is ok, assign values to current best match - if(errorCorrectionRate < 0 || codeDistance < maxCorrectionRecalculated) { - closestCandidateIdx = j; - closestCandidateDistance = minDistance; - closestRotatedMarker = rotatedMarker; - } - } - - // if at least one good match, we have rescue the missing marker - if(closestCandidateIdx >= 0) { - - // subpixel refinement - if(_params->cornerRefinementMethod == CORNER_REFINE_SUBPIX) { - CV_Assert(params.cornerRefinementWinSize > 0 && - params.cornerRefinementMaxIterations > 0 && - params.cornerRefinementMinAccuracy > 0); - cornerSubPix(grey, closestRotatedMarker, - Size(params.cornerRefinementWinSize, params.cornerRefinementWinSize), - Size(-1, -1), TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, - params.cornerRefinementMaxIterations, - params.cornerRefinementMinAccuracy)); - } - - // remove from rejected - alreadyIdentified[closestCandidateIdx] = true; - - // add to detected - finalAcceptedCorners.push_back(closestRotatedMarker); - finalAcceptedIds.push_back(undetectedMarkersIds[i]); - - // add the original index of the candidate - recoveredIdxs.push_back(closestCandidateIdx); - } - } - - // parse output - if(finalAcceptedIds.size() != _detectedIds.total()) { - // parse output - Mat(finalAcceptedIds).copyTo(_detectedIds); - _copyVector2Output(finalAcceptedCorners, _detectedCorners); - - // recalculate _rejectedCorners based on alreadyIdentified - vector > finalRejected; - for(unsigned int i = 0; i < alreadyIdentified.size(); i++) { - if(!alreadyIdentified[i]) { - finalRejected.push_back(_rejectedCorners.getMat(i).clone()); - } - } - _copyVector2Output(finalRejected, _rejectedCorners); - - if(_recoveredIdxs.needed()) { - Mat(recoveredIdxs).copyTo(_recoveredIdxs); - } - } -} - - - - -/** - */ -int estimatePoseBoard(InputArrayOfArrays _corners, InputArray _ids, const Ptr &board, - InputArray _cameraMatrix, InputArray _distCoeffs, InputOutputArray _rvec, - InputOutputArray _tvec, bool useExtrinsicGuess) { - - CV_Assert(_corners.total() == _ids.total()); - - // get object and image points for the solvePnP function - Mat objPoints, imgPoints; - getBoardObjectAndImagePoints(board, _corners, _ids, objPoints, imgPoints); - - CV_Assert(imgPoints.total() == objPoints.total()); - - if(objPoints.total() == 0) // 0 of the detected markers in board - return 0; - - solvePnP(objPoints, imgPoints, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); - - // divide by four since all the four corners are concatenated in the array for each marker - return (int)objPoints.total() / 4; -} - - - - -/** - */ -void GridBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { - _drawPlanarBoardImpl(this, outSize, _img, marginSize, borderBits); + Ptr refineParams = RefineParameters::create(minRepDistance, errorCorrectionRate, checkAllOrders); + ArucoDetector detector(_board->getDictionary(), _params, refineParams); + detector.refineDetectedMarkers(_image, _board, _detectedCorners, _detectedIds, _rejectedCorners, _cameraMatrix, + _distCoeffs, _recoveredIdxs); } - -/** -*/ -Ptr Board::create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids) { - - CV_Assert(objPoints.total() == ids.total()); - CV_Assert(objPoints.type() == CV_32FC3 || objPoints.type() == CV_32FC1); - - std::vector< std::vector< Point3f > > obj_points_vector; - Point3f rightBottomBorder = Point3f(0.f, 0.f, 0.f); - for (unsigned int i = 0; i < objPoints.total(); i++) { - std::vector corners; - Mat corners_mat = objPoints.getMat(i); - - if(corners_mat.type() == CV_32FC1) - corners_mat = corners_mat.reshape(3); - CV_Assert(corners_mat.total() == 4); - - for (int j = 0; j < 4; j++) { - const Point3f& corner = corners_mat.at(j); - corners.push_back(corner); - rightBottomBorder.x = std::max(rightBottomBorder.x, corner.x); - rightBottomBorder.y = std::max(rightBottomBorder.y, corner.y); - rightBottomBorder.z = std::max(rightBottomBorder.z, corner.z); - } - obj_points_vector.push_back(corners); - } - - Ptr res = makePtr(); - ids.copyTo(res->ids); - res->objPoints = obj_points_vector; - res->dictionary = cv::makePtr(dictionary); - res->rightBottomBorder = rightBottomBorder; - return res; -} - -/** - */ -void Board::setIds(InputArray ids_) { - CV_Assert(objPoints.size() == ids_.total()); - ids_.copyTo(this->ids); -} - -/** - */ -Ptr GridBoard::create(int markersX, int markersY, float markerLength, float markerSeparation, - const Ptr &dictionary, int firstMarker) { - - CV_Assert(markersX > 0 && markersY > 0 && markerLength > 0 && markerSeparation > 0); - - Ptr res = makePtr(); - - res->_markersX = markersX; - res->_markersY = markersY; - res->_markerLength = markerLength; - res->_markerSeparation = markerSeparation; - res->dictionary = dictionary; - - size_t totalMarkers = (size_t) markersX * markersY; - res->ids.resize(totalMarkers); - res->objPoints.reserve(totalMarkers); - - // fill ids with first identifiers - for(unsigned int i = 0; i < totalMarkers; i++) { - res->ids[i] = i + firstMarker; - } - - // calculate Board objPoints - for(int y = 0; y < markersY; y++) { - for(int x = 0; x < markersX; x++) { - vector corners(4); - corners[0] = Point3f(x * (markerLength + markerSeparation), - y * (markerLength + markerSeparation), 0); - corners[1] = corners[0] + Point3f(markerLength, 0, 0); - corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); - corners[3] = corners[0] + Point3f(0, markerLength, 0); - res->objPoints.push_back(corners); - } - } - res->rightBottomBorder = Point3f(markersX * markerLength + markerSeparation * (markersX - 1), - markersY * markerLength + markerSeparation * (markersY - 1), 0.f); - return res; -} - - - -/** - */ -void drawDetectedMarkers(InputOutputArray _image, InputArrayOfArrays _corners, - InputArray _ids, Scalar borderColor) { - - - CV_Assert(_image.getMat().total() != 0 && - (_image.getMat().channels() == 1 || _image.getMat().channels() == 3)); - CV_Assert((_corners.total() == _ids.total()) || _ids.total() == 0); - - // calculate colors - Scalar textColor, cornerColor; - textColor = cornerColor = borderColor; - swap(textColor.val[0], textColor.val[1]); // text color just sawp G and R - swap(cornerColor.val[1], cornerColor.val[2]); // corner color just sawp G and B - - int nMarkers = (int)_corners.total(); - for(int i = 0; i < nMarkers; i++) { - Mat currentMarker = _corners.getMat(i); - CV_Assert(currentMarker.total() == 4 && currentMarker.type() == CV_32FC2); - - // draw marker sides - for(int j = 0; j < 4; j++) { - Point2f p0, p1; - p0 = currentMarker.ptr< Point2f >(0)[j]; - p1 = currentMarker.ptr< Point2f >(0)[(j + 1) % 4]; - line(_image, p0, p1, borderColor, 1); - } - // draw first corner mark - rectangle(_image, currentMarker.ptr< Point2f >(0)[0] - Point2f(3, 3), - currentMarker.ptr< Point2f >(0)[0] + Point2f(3, 3), cornerColor, 1, LINE_AA); - - // draw ID - if(_ids.total() != 0) { - Point2f cent(0, 0); - for(int p = 0; p < 4; p++) - cent += currentMarker.ptr< Point2f >(0)[p]; - cent = cent / 4.; - stringstream s; - s << "id=" << _ids.getMat().ptr< int >(0)[i]; - putText(_image, s.str(), cent, FONT_HERSHEY_SIMPLEX, 0.5, textColor, 2); - } - } -} - - -/** - */ -void drawMarker(const Ptr &dictionary, int id, int sidePixels, OutputArray _img, int borderBits) { - dictionary->drawMarker(id, sidePixels, _img, borderBits); -} - - - -void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, int marginSize, - int borderBits) { - - CV_Assert(!outSize.empty()); - CV_Assert(marginSize >= 0); - - _img.create(outSize, CV_8UC1); - Mat out = _img.getMat(); - out.setTo(Scalar::all(255)); - out.adjustROI(-marginSize, -marginSize, -marginSize, -marginSize); - - // calculate max and min values in XY plane - CV_Assert(_board->objPoints.size() > 0); - float minX, maxX, minY, maxY; - minX = maxX = _board->objPoints[0][0].x; - minY = maxY = _board->objPoints[0][0].y; - - for(unsigned int i = 0; i < _board->objPoints.size(); i++) { - for(int j = 0; j < 4; j++) { - minX = min(minX, _board->objPoints[i][j].x); - maxX = max(maxX, _board->objPoints[i][j].x); - minY = min(minY, _board->objPoints[i][j].y); - maxY = max(maxY, _board->objPoints[i][j].y); - } - } - - float sizeX = maxX - minX; - float sizeY = maxY - minY; - - // proportion transformations - float xReduction = sizeX / float(out.cols); - float yReduction = sizeY / float(out.rows); - - // determine the zone where the markers are placed - if(xReduction > yReduction) { - int nRows = int(sizeY / xReduction); - int rowsMargins = (out.rows - nRows) / 2; - out.adjustROI(-rowsMargins, -rowsMargins, 0, 0); - } else { - int nCols = int(sizeX / yReduction); - int colsMargins = (out.cols - nCols) / 2; - out.adjustROI(0, 0, -colsMargins, -colsMargins); - } - - // now paint each marker - Dictionary &dictionary = *(_board->dictionary); - Mat marker; - Point2f outCorners[3]; - Point2f inCorners[3]; - for(unsigned int m = 0; m < _board->objPoints.size(); m++) { - // transform corners to markerZone coordinates - for(int j = 0; j < 3; j++) { - Point2f pf = Point2f(_board->objPoints[m][j].x, _board->objPoints[m][j].y); - // move top left to 0, 0 - pf -= Point2f(minX, minY); - pf.x = pf.x / sizeX * float(out.cols); - pf.y = pf.y / sizeY * float(out.rows); - outCorners[j] = pf; - } - - // get marker - Size dst_sz(outCorners[2] - outCorners[0]); // assuming CCW order - dst_sz.width = dst_sz.height = std::min(dst_sz.width, dst_sz.height); //marker should be square - dictionary.drawMarker(_board->ids[m], dst_sz.width, marker, borderBits); - - if((outCorners[0].y == outCorners[1].y) && (outCorners[1].x == outCorners[2].x)) { - // marker is aligned to image axes - marker.copyTo(out(Rect(outCorners[0], dst_sz))); - continue; - } - - // interpolate tiny marker to marker position in markerZone - inCorners[0] = Point2f(-0.5f, -0.5f); - inCorners[1] = Point2f(marker.cols - 0.5f, -0.5f); - inCorners[2] = Point2f(marker.cols - 0.5f, marker.rows - 0.5f); - - // remove perspective - Mat transformation = getAffineTransform(inCorners, outCorners); - warpAffine(marker, out, transformation, out.size(), INTER_LINEAR, - BORDER_TRANSPARENT); - } -} - - - -/** - */ -void drawPlanarBoard(const Ptr &_board, Size outSize, OutputArray _img, int marginSize, - int borderBits) { - _drawPlanarBoardImpl(_board, outSize, _img, marginSize, borderBits); -} - - - -/** - */ -double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, - const Ptr &board, Size imageSize, InputOutputArray _cameraMatrix, - InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, - OutputArrayOfArrays _tvecs, - OutputArray _stdDeviationsIntrinsics, - OutputArray _stdDeviationsExtrinsics, - OutputArray _perViewErrors, - int flags, TermCriteria criteria) { - - // for each frame, get properly processed imagePoints and objectPoints for the calibrateCamera - // function - vector< Mat > processedObjectPoints, processedImagePoints; - size_t nFrames = _counter.total(); - int markerCounter = 0; - for(size_t frame = 0; frame < nFrames; frame++) { - int nMarkersInThisFrame = _counter.getMat().ptr< int >()[frame]; - vector< Mat > thisFrameCorners; - vector< int > thisFrameIds; - - CV_Assert(nMarkersInThisFrame > 0); - - thisFrameCorners.reserve((size_t) nMarkersInThisFrame); - thisFrameIds.reserve((size_t) nMarkersInThisFrame); - for(int j = markerCounter; j < markerCounter + nMarkersInThisFrame; j++) { - thisFrameCorners.push_back(_corners.getMat(j)); - thisFrameIds.push_back(_ids.getMat().ptr< int >()[j]); - } - markerCounter += nMarkersInThisFrame; - Mat currentImgPoints, currentObjPoints; - getBoardObjectAndImagePoints(board, thisFrameCorners, thisFrameIds, currentObjPoints, - currentImgPoints); - if(currentImgPoints.total() > 0 && currentObjPoints.total() > 0) { - processedImagePoints.push_back(currentImgPoints); - processedObjectPoints.push_back(currentObjPoints); - } - } - - return calibrateCamera(processedObjectPoints, processedImagePoints, imageSize, _cameraMatrix, - _distCoeffs, _rvecs, _tvecs, _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, - _perViewErrors, flags, criteria); -} - - - -/** - */ -double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, - const Ptr &board, Size imageSize, InputOutputArray _cameraMatrix, - InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, - OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria) { - return calibrateCameraAruco(_corners, _ids, _counter, board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, - noArray(), noArray(), noArray(), flags, criteria); -} - - - } } diff --git a/modules/aruco/src/aruco_calib_pose.cpp b/modules/aruco/src/aruco_calib_pose.cpp new file mode 100644 index 00000000000..1290126d5c7 --- /dev/null +++ b/modules/aruco/src/aruco_calib_pose.cpp @@ -0,0 +1,257 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include + +namespace cv { +namespace aruco { +using namespace std; + +void getBoardObjectAndImagePoints(const Ptr &board, InputArrayOfArrays detectedCorners, InputArray detectedIds, + OutputArray objPoints, OutputArray imgPoints) { + CV_Assert(board->getIds().size() == board->getObjPoints().size()); + CV_Assert(detectedIds.total() == detectedCorners.total()); + + size_t nDetectedMarkers = detectedIds.total(); + + vector objPnts; + objPnts.reserve(nDetectedMarkers); + + vector imgPnts; + imgPnts.reserve(nDetectedMarkers); + + // look for detected markers that belong to the board and get their information + for(unsigned int i = 0; i < nDetectedMarkers; i++) { + int currentId = detectedIds.getMat().ptr< int >(0)[i]; + for(unsigned int j = 0; j < board->getIds().size(); j++) { + if(currentId == board->getIds()[j]) { + for(int p = 0; p < 4; p++) { + objPnts.push_back(board->getObjPoints()[j][p]); + imgPnts.push_back(detectedCorners.getMat(i).ptr< Point2f >(0)[p]); + } + } + } + } + + // create output + Mat(objPnts).copyTo(objPoints); + Mat(imgPnts).copyTo(imgPoints); +} + +/** + * @brief Return object points for the system centered in a middle (by default) or in a top left corner of single + * marker, given the marker length + */ +static Mat _getSingleMarkerObjectPoints(float markerLength, const EstimateParameters& estimateParameters) { + CV_Assert(markerLength > 0); + Mat objPoints(4, 1, CV_32FC3); + // set coordinate system in the top-left corner of the marker, with Z pointing out + if (estimateParameters.pattern == ARUCO_CW_TOP_LEFT_CORNER) { + objPoints.ptr(0)[0] = Vec3f(0.f, 0.f, 0); + objPoints.ptr(0)[1] = Vec3f(markerLength, 0.f, 0); + objPoints.ptr(0)[2] = Vec3f(markerLength, markerLength, 0); + objPoints.ptr(0)[3] = Vec3f(0.f, markerLength, 0); + } + else if (estimateParameters.pattern == ARUCO_CCW_CENTER) { + objPoints.ptr(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0); + objPoints.ptr(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0); + objPoints.ptr(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0); + objPoints.ptr(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0); + } + else + CV_Error(Error::StsBadArg, "Unknown estimateParameters pattern"); + return objPoints; +} + +void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, + InputArray _cameraMatrix, InputArray _distCoeffs, + OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints, + const Ptr& estimateParameters) { + CV_Assert(markerLength > 0); + + Mat markerObjPoints = _getSingleMarkerObjectPoints(markerLength, *estimateParameters); + int nMarkers = (int)_corners.total(); + _rvecs.create(nMarkers, 1, CV_64FC3); + _tvecs.create(nMarkers, 1, CV_64FC3); + + Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat(); + + //// for each marker, calculate its pose + parallel_for_(Range(0, nMarkers), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; + + for (int i = begin; i < end; i++) { + solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs, rvecs.at(i), + tvecs.at(i), estimateParameters->useExtrinsicGuess, estimateParameters->solvePnPMethod); + } + }); + + if(_objPoints.needed()){ + markerObjPoints.convertTo(_objPoints, -1); + } +} + +int estimatePoseBoard(InputArrayOfArrays _corners, InputArray _ids, const Ptr &board, + InputArray _cameraMatrix, InputArray _distCoeffs, InputOutputArray _rvec, + InputOutputArray _tvec, bool useExtrinsicGuess) { + CV_Assert(_corners.total() == _ids.total()); + + // get object and image points for the solvePnP function + Mat objPoints, imgPoints; + getBoardObjectAndImagePoints(board, _corners, _ids, objPoints, imgPoints); + + CV_Assert(imgPoints.total() == objPoints.total()); + + if(objPoints.total() == 0) // 0 of the detected markers in board + return 0; + + solvePnP(objPoints, imgPoints, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); + + // divide by four since all the four corners are concatenated in the array for each marker + return (int)objPoints.total() / 4; +} + +/** + * Check if a set of 3d points are enough for calibration. Z coordinate is ignored. + * Only axis parallel lines are considered + */ +static bool _arePointsEnoughForPoseEstimation(const vector &points) { + if(points.size() < 4) return false; + + vector sameXValue; // different x values in points + vector sameXCounter; // number of points with the x value in sameXValue + for(unsigned int i = 0; i < points.size(); i++) { + bool found = false; + for(unsigned int j = 0; j < sameXValue.size(); j++) { + if(sameXValue[j] == points[i].x) { + found = true; + sameXCounter[j]++; + } + } + if(!found) { + sameXValue.push_back(points[i].x); + sameXCounter.push_back(1); + } + } + + // count how many x values has more than 2 points + int moreThan2 = 0; + for(unsigned int i = 0; i < sameXCounter.size(); i++) { + if(sameXCounter[i] >= 2) moreThan2++; + } + + // if we have more than 1 two xvalues with more than 2 points, calibration is ok + if(moreThan2 > 1) + return true; + return false; +} + +bool estimatePoseCharucoBoard(InputArray _charucoCorners, InputArray _charucoIds, + const Ptr &_board, InputArray _cameraMatrix, InputArray _distCoeffs, + InputOutputArray _rvec, InputOutputArray _tvec, bool useExtrinsicGuess) { + CV_Assert((_charucoCorners.getMat().total() == _charucoIds.getMat().total())); + + // need, at least, 4 corners + if(_charucoIds.getMat().total() < 4) return false; + + vector objPoints; + objPoints.reserve(_charucoIds.getMat().total()); + for(unsigned int i = 0; i < _charucoIds.getMat().total(); i++) { + int currId = _charucoIds.getMat().at< int >(i); + CV_Assert(currId >= 0 && currId < (int)_board->chessboardCorners.size()); + objPoints.push_back(_board->chessboardCorners[currId]); + } + + // points need to be in different lines, check if detected points are enough + if(!_arePointsEnoughForPoseEstimation(objPoints)) return false; + + solvePnP(objPoints, _charucoCorners, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); + return true; +} + +double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, + const Ptr &board, Size imageSize, InputOutputArray _cameraMatrix, + InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, + OutputArrayOfArrays _tvecs, + OutputArray _stdDeviationsIntrinsics, + OutputArray _stdDeviationsExtrinsics, + OutputArray _perViewErrors, + int flags, const TermCriteria& criteria) { + // for each frame, get properly processed imagePoints and objectPoints for the calibrateCamera + // function + vector processedObjectPoints, processedImagePoints; + size_t nFrames = _counter.total(); + int markerCounter = 0; + for(size_t frame = 0; frame < nFrames; frame++) { + int nMarkersInThisFrame = _counter.getMat().ptr< int >()[frame]; + vector thisFrameCorners; + vector thisFrameIds; + + CV_Assert(nMarkersInThisFrame > 0); + + thisFrameCorners.reserve((size_t) nMarkersInThisFrame); + thisFrameIds.reserve((size_t) nMarkersInThisFrame); + for(int j = markerCounter; j < markerCounter + nMarkersInThisFrame; j++) { + thisFrameCorners.push_back(_corners.getMat(j)); + thisFrameIds.push_back(_ids.getMat().ptr< int >()[j]); + } + markerCounter += nMarkersInThisFrame; + Mat currentImgPoints, currentObjPoints; + getBoardObjectAndImagePoints(board, thisFrameCorners, thisFrameIds, currentObjPoints, + currentImgPoints); + if(currentImgPoints.total() > 0 && currentObjPoints.total() > 0) { + processedImagePoints.push_back(currentImgPoints); + processedObjectPoints.push_back(currentObjPoints); + } + } + return calibrateCamera(processedObjectPoints, processedImagePoints, imageSize, _cameraMatrix, _distCoeffs, _rvecs, + _tvecs, _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, _perViewErrors, flags, criteria); +} + +double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, const Ptr &board, + Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, + OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, const TermCriteria& criteria) { + return calibrateCameraAruco(_corners, _ids, _counter, board, imageSize, _cameraMatrix, _distCoeffs, + _rvecs, _tvecs, noArray(), noArray(), noArray(), flags, criteria); +} + +double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, + const Ptr &_board, Size imageSize, + InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, + OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, + OutputArray _stdDeviationsIntrinsics, + OutputArray _stdDeviationsExtrinsics, + OutputArray _perViewErrors, + int flags, const TermCriteria& criteria) { + CV_Assert(_charucoIds.total() > 0 && (_charucoIds.total() == _charucoCorners.total())); + + // Join object points of charuco corners in a single vector for calibrateCamera() function + vector > allObjPoints; + allObjPoints.resize(_charucoIds.total()); + for(unsigned int i = 0; i < _charucoIds.total(); i++) { + unsigned int nCorners = (unsigned int)_charucoIds.getMat(i).total(); + CV_Assert(nCorners > 0 && nCorners == _charucoCorners.getMat(i).total()); + allObjPoints[i].reserve(nCorners); + + for(unsigned int j = 0; j < nCorners; j++) { + int pointId = _charucoIds.getMat(i).at< int >(j); + CV_Assert(pointId >= 0 && pointId < (int)_board->chessboardCorners.size()); + allObjPoints[i].push_back(_board->chessboardCorners[pointId]); + } + } + return calibrateCamera(allObjPoints, _charucoCorners, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, + _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, _perViewErrors, flags, criteria); +} + +double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, + const Ptr &_board, Size imageSize, InputOutputArray _cameraMatrix, + InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, + int flags, const TermCriteria& criteria) { +return calibrateCameraCharuco(_charucoCorners, _charucoIds, _board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, + _tvecs, noArray(), noArray(), noArray(), flags, criteria); +} + +} +} diff --git a/modules/aruco/src/aruco_detector.cpp b/modules/aruco/src/aruco_detector.cpp new file mode 100644 index 00000000000..45f7cc101b8 --- /dev/null +++ b/modules/aruco/src/aruco_detector.cpp @@ -0,0 +1,1261 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "precomp.hpp" +#include +#include "opencv2/aruco_detector.hpp" +#include "opencv2/aruco/aruco_calib_pose.hpp" +#include "aruco_utils.hpp" +#include "apriltag/apriltag_quad_thresh.hpp" +#include + +namespace cv { +namespace aruco { + +using namespace std; + +static inline bool readWrite(DetectorParameters ¶ms, const Ptr& readNode = nullptr, + const Ptr& writeStorage = nullptr) { + CV_Assert(!readNode.empty() || !writeStorage.empty()); + bool check = false; + + check |= readWriteParameter("adaptiveThreshWinSizeMin", params.adaptiveThreshWinSizeMin, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshWinSizeMax", params.adaptiveThreshWinSizeMax, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshWinSizeStep", params.adaptiveThreshWinSizeStep, readNode, writeStorage); + check |= readWriteParameter("adaptiveThreshConstant", params.adaptiveThreshConstant, readNode, writeStorage); + check |= readWriteParameter("minMarkerPerimeterRate", params.minMarkerPerimeterRate, readNode, writeStorage); + check |= readWriteParameter("maxMarkerPerimeterRate", params.maxMarkerPerimeterRate, readNode, writeStorage); + check |= readWriteParameter("polygonalApproxAccuracyRate", params.polygonalApproxAccuracyRate, + readNode, writeStorage); + check |= readWriteParameter("minCornerDistanceRate", params.minCornerDistanceRate, readNode, writeStorage); + check |= readWriteParameter("minDistanceToBorder", params.minDistanceToBorder, readNode, writeStorage); + check |= readWriteParameter("minMarkerDistanceRate", params.minMarkerDistanceRate, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementMethod", params.cornerRefinementMethod, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementWinSize", params.cornerRefinementWinSize, readNode, writeStorage); + check |= readWriteParameter("cornerRefinementMaxIterations", params.cornerRefinementMaxIterations, + readNode, writeStorage); + check |= readWriteParameter("cornerRefinementMinAccuracy", params.cornerRefinementMinAccuracy, + readNode, writeStorage); + check |= readWriteParameter("markerBorderBits", params.markerBorderBits, readNode, writeStorage); + check |= readWriteParameter("perspectiveRemovePixelPerCell", params.perspectiveRemovePixelPerCell, + readNode, writeStorage); + check |= readWriteParameter("perspectiveRemoveIgnoredMarginPerCell", params.perspectiveRemoveIgnoredMarginPerCell, + readNode, writeStorage); + check |= readWriteParameter("maxErroneousBitsInBorderRate", params.maxErroneousBitsInBorderRate, + readNode, writeStorage); + check |= readWriteParameter("minOtsuStdDev", params.minOtsuStdDev, readNode, writeStorage); + check |= readWriteParameter("errorCorrectionRate", params.errorCorrectionRate, readNode, writeStorage); + // new aruco 3 functionality + check |= readWriteParameter("useAruco3Detection", params.useAruco3Detection, readNode, writeStorage); + check |= readWriteParameter("minSideLengthCanonicalImg", params.minSideLengthCanonicalImg, readNode, writeStorage); + check |= readWriteParameter("minMarkerLengthRatioOriginalImg", params.minMarkerLengthRatioOriginalImg, + readNode, writeStorage); + return check; +} + +bool DetectorParameters::readDetectorParameters(const FileNode& fn) { + if(fn.empty()) + return false; + Ptr pfn = makePtr(fn); + return readWrite(*this, pfn); +} + +bool DetectorParameters::writeDetectorParameters(const Ptr& fs) +{ + if (fs.empty() && !fs->isOpened()) + return false; + return readWrite(*this, nullptr, fs); +} + +static inline bool readWrite(RefineParameters& refineParameters, const Ptr& readNode, + const Ptr& writeStorage = nullptr) { + CV_Assert(!readNode.empty() || !writeStorage.empty()); + bool check = false; + + check |= readWriteParameter("minRepDistance", refineParameters.minRepDistance, readNode, writeStorage); + check |= readWriteParameter("errorCorrectionRate", refineParameters.errorCorrectionRate, readNode, writeStorage); + check |= readWriteParameter("checkAllOrders", refineParameters.checkAllOrders, readNode, writeStorage); + return check; +} + +bool RefineParameters::readRefineParameters(const FileNode &fn) { + if(fn.empty()) + return false; + Ptr pfn = makePtr(fn); + return readWrite(*this, pfn); +} + +bool RefineParameters::writeRefineParameters(const Ptr &fs) { + if(fs.empty()) + return false; + return readWrite(*this, nullptr, fs); +} + +/** + * @brief Threshold input image using adaptive thresholding + */ +static void _threshold(InputArray _in, OutputArray _out, int winSize, double constant) { + + CV_Assert(winSize >= 3); + if(winSize % 2 == 0) winSize++; // win size must be odd + adaptiveThreshold(_in, _out, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, winSize, constant); +} + + +/** + * @brief Given a tresholded image, find the contours, calculate their polygonal approximation + * and take those that accomplish some conditions + */ +static void _findMarkerContours(const Mat &in, vector< vector< Point2f > > &candidates, + vector< vector< Point > > &contoursOut, double minPerimeterRate, + double maxPerimeterRate, double accuracyRate, + double minCornerDistanceRate, int minDistanceToBorder, int minSize) { + + CV_Assert(minPerimeterRate > 0 && maxPerimeterRate > 0 && accuracyRate > 0 && + minCornerDistanceRate >= 0 && minDistanceToBorder >= 0); + + // calculate maximum and minimum sizes in pixels + unsigned int minPerimeterPixels = + (unsigned int)(minPerimeterRate * max(in.cols, in.rows)); + unsigned int maxPerimeterPixels = + (unsigned int)(maxPerimeterRate * max(in.cols, in.rows)); + + // for aruco3 functionality + if (minSize != 0) { + minPerimeterPixels = 4*minSize; + } + + Mat contoursImg; + in.copyTo(contoursImg); + vector< vector< Point > > contours; + findContours(contoursImg, contours, RETR_LIST, CHAIN_APPROX_NONE); + // now filter list of contours + for(unsigned int i = 0; i < contours.size(); i++) { + // check perimeter + if(contours[i].size() < minPerimeterPixels || contours[i].size() > maxPerimeterPixels) + continue; + + // check is square and is convex + vector< Point > approxCurve; + approxPolyDP(contours[i], approxCurve, double(contours[i].size()) * accuracyRate, true); + if(approxCurve.size() != 4 || !isContourConvex(approxCurve)) continue; + + // check min distance between corners + double minDistSq = + max(contoursImg.cols, contoursImg.rows) * max(contoursImg.cols, contoursImg.rows); + for(int j = 0; j < 4; j++) { + double d = (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) * + (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) + + (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y) * + (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y); + minDistSq = min(minDistSq, d); + } + double minCornerDistancePixels = double(contours[i].size()) * minCornerDistanceRate; + if(minDistSq < minCornerDistancePixels * minCornerDistancePixels) continue; + + // check if it is too near to the image border + bool tooNearBorder = false; + for(int j = 0; j < 4; j++) { + if(approxCurve[j].x < minDistanceToBorder || approxCurve[j].y < minDistanceToBorder || + approxCurve[j].x > contoursImg.cols - 1 - minDistanceToBorder || + approxCurve[j].y > contoursImg.rows - 1 - minDistanceToBorder) + tooNearBorder = true; + } + if(tooNearBorder) continue; + + // if it passes all the test, add to candidates vector + vector< Point2f > currentCandidate; + currentCandidate.resize(4); + for(int j = 0; j < 4; j++) { + currentCandidate[j] = Point2f((float)approxCurve[j].x, (float)approxCurve[j].y); + } + candidates.push_back(currentCandidate); + contoursOut.push_back(contours[i]); + } +} + + +/** + * @brief Assure order of candidate corners is clockwise direction + */ +static void _reorderCandidatesCorners(vector< vector< Point2f > > &candidates) { + + for(unsigned int i = 0; i < candidates.size(); i++) { + double dx1 = candidates[i][1].x - candidates[i][0].x; + double dy1 = candidates[i][1].y - candidates[i][0].y; + double dx2 = candidates[i][2].x - candidates[i][0].x; + double dy2 = candidates[i][2].y - candidates[i][0].y; + double crossProduct = (dx1 * dy2) - (dy1 * dx2); + + if(crossProduct < 0.0) { // not clockwise direction + swap(candidates[i][1], candidates[i][3]); + } + } +} + +/** + * @brief to make sure that the corner's order of both candidates (default/white) is the same + */ +static vector alignContourOrder(Point2f corner, vector< Point2f > candidate) { + uint8_t r=0; + double min = cv::norm( Vec2f( corner - candidate[0] ), NORM_L2SQR); + for(uint8_t pos=1; pos < 4; pos++) { + double nDiff = cv::norm( Vec2f( corner - candidate[pos] ), NORM_L2SQR); + if(nDiff < min){ + r = pos; + min =nDiff; + } + } + std::rotate(candidate.begin(), candidate.begin() + r, candidate.end()); + return candidate; +} + +/** + * @brief Check candidates that are too close to each other, save the potential candidates + * (i.e. biggest/smallest contour) and remove the rest + */ +static void _filterTooCloseCandidates(const vector< vector< Point2f > > &candidatesIn, + vector< vector< vector< Point2f > > > &candidatesSetOut, + const vector< vector< Point > > &contoursIn, + vector< vector< vector< Point > > > &contoursSetOut, + double minMarkerDistanceRate, bool detectInvertedMarker) { + + CV_Assert(minMarkerDistanceRate >= 0); + vector candGroup; + candGroup.resize(candidatesIn.size(), -1); + vector< vector > groupedCandidates; + for(unsigned int i = 0; i < candidatesIn.size(); i++) { + bool isSingleContour = true; + for(unsigned int j = i + 1; j < candidatesIn.size(); j++) { + + int minimumPerimeter = min((int)contoursIn[i].size(), (int)contoursIn[j].size() ); + + // fc is the first corner considered on one of the markers, 4 combinations are possible + for(int fc = 0; fc < 4; fc++) { + double distSq = 0; + for(int c = 0; c < 4; c++) { + // modC is the corner considering first corner is fc + int modC = (c + fc) % 4; + distSq += (candidatesIn[i][modC].x - candidatesIn[j][c].x) * + (candidatesIn[i][modC].x - candidatesIn[j][c].x) + + (candidatesIn[i][modC].y - candidatesIn[j][c].y) * + (candidatesIn[i][modC].y - candidatesIn[j][c].y); + } + distSq /= 4.; + + // if mean square distance is too low, remove the smaller one of the two markers + double minMarkerDistancePixels = double(minimumPerimeter) * minMarkerDistanceRate; + if(distSq < minMarkerDistancePixels * minMarkerDistancePixels) { + isSingleContour = false; + // i and j are not related to a group + if(candGroup[i]<0 && candGroup[j]<0){ + // mark candidates with their corresponding group number + candGroup[i] = candGroup[j] = (int)groupedCandidates.size(); + + // create group + vector grouped; + grouped.push_back(i); + grouped.push_back(j); + groupedCandidates.push_back( grouped ); + } + // i is related to a group + else if(candGroup[i] > -1 && candGroup[j] == -1){ + int group = candGroup[i]; + candGroup[j] = group; + + // add to group + groupedCandidates[group].push_back( j ); + } + // j is related to a group + else if(candGroup[j] > -1 && candGroup[i] == -1){ + int group = candGroup[j]; + candGroup[i] = group; + + // add to group + groupedCandidates[group].push_back( i ); + } + } + } + } + if (isSingleContour && candGroup[i] < 0) + { + candGroup[i] = (int)groupedCandidates.size(); + vector grouped; + grouped.push_back(i); + grouped.push_back(i); // step "save possible candidates" require minimum 2 elements + groupedCandidates.push_back(grouped); + } + } + + // save possible candidates + candidatesSetOut.clear(); + contoursSetOut.clear(); + + vector< vector< Point2f > > biggerCandidates; + vector< vector< Point > > biggerContours; + vector< vector< Point2f > > smallerCandidates; + vector< vector< Point > > smallerContours; + + // save possible candidates + for(unsigned int i = 0; i < groupedCandidates.size(); i++) { + unsigned int smallerIdx = groupedCandidates[i][0]; + unsigned int biggerIdx = smallerIdx; + double smallerArea = contourArea(candidatesIn[smallerIdx]); + double biggerArea = smallerArea; + + // evaluate group elements + for(unsigned int j = 1; j < groupedCandidates[i].size(); j++) { + unsigned int currIdx = groupedCandidates[i][j]; + double currArea = contourArea(candidatesIn[currIdx]); + + // check if current contour is bigger + if(currArea >= biggerArea) { + biggerIdx = currIdx; + biggerArea = currArea; + } + + // check if current contour is smaller + if(currArea < smallerArea && detectInvertedMarker) { + smallerIdx = currIdx; + smallerArea = currArea; + } + } + + // add contours and candidates + biggerCandidates.push_back(candidatesIn[biggerIdx]); + biggerContours.push_back(contoursIn[biggerIdx]); + if(detectInvertedMarker) { + smallerCandidates.push_back(alignContourOrder(candidatesIn[biggerIdx][0], candidatesIn[smallerIdx])); + smallerContours.push_back(contoursIn[smallerIdx]); + } + } + // to preserve the structure :: candidateSet< defaultCandidates, whiteCandidates > + // default candidates + candidatesSetOut.push_back(biggerCandidates); + contoursSetOut.push_back(biggerContours); + // white candidates + candidatesSetOut.push_back(smallerCandidates); + contoursSetOut.push_back(smallerContours); +} + +/** + * @brief Initial steps on finding square candidates + */ +static void _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > > &candidates, + vector< vector< Point > > &contours, + const Ptr ¶ms) { + + CV_Assert(params->adaptiveThreshWinSizeMin >= 3 && params->adaptiveThreshWinSizeMax >= 3); + CV_Assert(params->adaptiveThreshWinSizeMax >= params->adaptiveThreshWinSizeMin); + CV_Assert(params->adaptiveThreshWinSizeStep > 0); + + // number of window sizes (scales) to apply adaptive thresholding + int nScales = (params->adaptiveThreshWinSizeMax - params->adaptiveThreshWinSizeMin) / + params->adaptiveThreshWinSizeStep + 1; + + vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); + vector< vector< vector< Point > > > contoursArrays((size_t) nScales); + + ////for each value in the interval of thresholding window sizes + parallel_for_(Range(0, nScales), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; + + for (int i = begin; i < end; i++) { + int currScale = params->adaptiveThreshWinSizeMin + i * params->adaptiveThreshWinSizeStep; + // threshold + Mat thresh; + _threshold(grey, thresh, currScale, params->adaptiveThreshConstant); + + // detect rectangles + _findMarkerContours(thresh, candidatesArrays[i], contoursArrays[i], + params->minMarkerPerimeterRate, params->maxMarkerPerimeterRate, + params->polygonalApproxAccuracyRate, params->minCornerDistanceRate, + params->minDistanceToBorder, params->minSideLengthCanonicalImg); + } + }); + // join candidates + for(int i = 0; i < nScales; i++) { + for(unsigned int j = 0; j < candidatesArrays[i].size(); j++) { + candidates.push_back(candidatesArrays[i][j]); + contours.push_back(contoursArrays[i][j]); + } + } +} + + +/** + * @brief Detect square candidates in the input image + */ +static void _detectCandidates(InputArray _grayImage, vector< vector< vector< Point2f > > >& candidatesSetOut, + vector< vector< vector< Point > > >& contoursSetOut, const Ptr &_params) { + Mat grey = _grayImage.getMat(); + CV_DbgAssert(grey.total() != 0); + CV_DbgAssert(grey.type() == CV_8UC1); + + /// 1. DETECT FIRST SET OF CANDIDATES + vector< vector< Point2f > > candidates; + vector< vector< Point > > contours; + _detectInitialCandidates(grey, candidates, contours, _params); + /// 2. SORT CORNERS + _reorderCandidatesCorners(candidates); + + /// 3. FILTER OUT NEAR CANDIDATE PAIRS + // save the outter/inner border (i.e. potential candidates) + _filterTooCloseCandidates(candidates, candidatesSetOut, contours, contoursSetOut, + _params->minMarkerDistanceRate, _params->detectInvertedMarker); +} + + +/** + * @brief Given an input image and candidate corners, extract the bits of the candidate, including + * the border bits + */ +static Mat _extractBits(InputArray _image, const vector& corners, int markerSize, + int markerBorderBits, int cellSize, double cellMarginRate, double minStdDevOtsu) { + CV_Assert(_image.getMat().channels() == 1); + CV_Assert(corners.size() == 4ull); + CV_Assert(markerBorderBits > 0 && cellSize > 0 && cellMarginRate >= 0 && cellMarginRate <= 1); + CV_Assert(minStdDevOtsu >= 0); + + // number of bits in the marker + int markerSizeWithBorders = markerSize + 2 * markerBorderBits; + int cellMarginPixels = int(cellMarginRate * cellSize); + + Mat resultImg; // marker image after removing perspective + int resultImgSize = markerSizeWithBorders * cellSize; + Mat resultImgCorners(4, 1, CV_32FC2); + resultImgCorners.ptr< Point2f >(0)[0] = Point2f(0, 0); + resultImgCorners.ptr< Point2f >(0)[1] = Point2f((float)resultImgSize - 1, 0); + resultImgCorners.ptr< Point2f >(0)[2] = + Point2f((float)resultImgSize - 1, (float)resultImgSize - 1); + resultImgCorners.ptr< Point2f >(0)[3] = Point2f(0, (float)resultImgSize - 1); + + // remove perspective + Mat transformation = getPerspectiveTransform(corners, resultImgCorners); + warpPerspective(_image, resultImg, transformation, Size(resultImgSize, resultImgSize), + INTER_NEAREST); + + // output image containing the bits + Mat bits(markerSizeWithBorders, markerSizeWithBorders, CV_8UC1, Scalar::all(0)); + + // check if standard deviation is enough to apply Otsu + // if not enough, it probably means all bits are the same color (black or white) + Mat mean, stddev; + // Remove some border just to avoid border noise from perspective transformation + Mat innerRegion = resultImg.colRange(cellSize / 2, resultImg.cols - cellSize / 2) + .rowRange(cellSize / 2, resultImg.rows - cellSize / 2); + meanStdDev(innerRegion, mean, stddev); + if(stddev.ptr< double >(0)[0] < minStdDevOtsu) { + // all black or all white, depending on mean value + if(mean.ptr< double >(0)[0] > 127) + bits.setTo(1); + else + bits.setTo(0); + return bits; + } + + // now extract code, first threshold using Otsu + threshold(resultImg, resultImg, 125, 255, THRESH_BINARY | THRESH_OTSU); + + // for each cell + for(int y = 0; y < markerSizeWithBorders; y++) { + for(int x = 0; x < markerSizeWithBorders; x++) { + int Xstart = x * (cellSize) + cellMarginPixels; + int Ystart = y * (cellSize) + cellMarginPixels; + Mat square = resultImg(Rect(Xstart, Ystart, cellSize - 2 * cellMarginPixels, + cellSize - 2 * cellMarginPixels)); + // count white pixels on each cell to assign its value + size_t nZ = (size_t) countNonZero(square); + if(nZ > square.total() / 2) bits.at< unsigned char >(y, x) = 1; + } + } + + return bits; +} + + + +/** + * @brief Return number of erroneous bits in border, i.e. number of white bits in border. + */ +static int _getBorderErrors(const Mat &bits, int markerSize, int borderSize) { + + int sizeWithBorders = markerSize + 2 * borderSize; + + CV_Assert(markerSize > 0 && bits.cols == sizeWithBorders && bits.rows == sizeWithBorders); + + int totalErrors = 0; + for(int y = 0; y < sizeWithBorders; y++) { + for(int k = 0; k < borderSize; k++) { + if(bits.ptr< unsigned char >(y)[k] != 0) totalErrors++; + if(bits.ptr< unsigned char >(y)[sizeWithBorders - 1 - k] != 0) totalErrors++; + } + } + for(int x = borderSize; x < sizeWithBorders - borderSize; x++) { + for(int k = 0; k < borderSize; k++) { + if(bits.ptr< unsigned char >(k)[x] != 0) totalErrors++; + if(bits.ptr< unsigned char >(sizeWithBorders - 1 - k)[x] != 0) totalErrors++; + } + } + return totalErrors; +} + + +/** + * @brief Tries to identify one candidate given the dictionary + * @return candidate typ. zero if the candidate is not valid, + * 1 if the candidate is a black candidate (default candidate) + * 2 if the candidate is a white candidate + */ +static uint8_t _identifyOneCandidate(const Ptr& dictionary, InputArray _image, + const vector& _corners, int& idx, + const Ptr& params, int& rotation, + const float scale = 1.f) { + CV_DbgAssert(_corners.size() == 4); + CV_DbgAssert(_image.getMat().total() != 0); + CV_DbgAssert(params->markerBorderBits > 0); + uint8_t typ=1; + // get bits + // scale corners to the correct size to search on the corresponding image pyramid + vector scaled_corners(4); + for (int i = 0; i < 4; ++i) { + scaled_corners[i].x = _corners[i].x * scale; + scaled_corners[i].y = _corners[i].y * scale; + } + + Mat candidateBits = + _extractBits(_image, scaled_corners, dictionary->markerSize, params->markerBorderBits, + params->perspectiveRemovePixelPerCell, + params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); + + // analyze border bits + int maximumErrorsInBorder = + int(dictionary->markerSize * dictionary->markerSize * params->maxErroneousBitsInBorderRate); + int borderErrors = + _getBorderErrors(candidateBits, dictionary->markerSize, params->markerBorderBits); + + // check if it is a white marker + if(params->detectInvertedMarker){ + // to get from 255 to 1 + Mat invertedImg = ~candidateBits-254; + int invBError = _getBorderErrors(invertedImg, dictionary->markerSize, params->markerBorderBits); + // white marker + if(invBError maximumErrorsInBorder) return 0; // border is wrong + + // take only inner bits + Mat onlyBits = + candidateBits.rowRange(params->markerBorderBits, + candidateBits.rows - params->markerBorderBits) + .colRange(params->markerBorderBits, candidateBits.cols - params->markerBorderBits); + + // try to indentify the marker + if(!dictionary->identify(onlyBits, idx, rotation, params->errorCorrectionRate)) + return 0; + + return typ; +} + +/** + * @brief rotate the initial corner to get to the right position + */ +static void correctCornerPosition( vector< Point2f >& _candidate, int rotate){ + std::rotate(_candidate.begin(), _candidate.begin() + 4 - rotate, _candidate.end()); +} + +static size_t _findOptPyrImageForCanonicalImg( + const std::vector& img_pyr, + const int scaled_width, + const int cur_perimeter, + const int min_perimeter) { + CV_Assert(scaled_width > 0); + size_t optLevel = 0; + float dist = std::numeric_limits::max(); + for (size_t i = 0; i < img_pyr.size(); ++i) { + const float scale = img_pyr[i].cols / static_cast(scaled_width); + const float perimeter_scaled = cur_perimeter * scale; + // instead of std::abs() favor the larger pyramid level by checking if the distance is postive + // will slow down the algorithm but find more corners in the end + const float new_dist = perimeter_scaled - min_perimeter; + if (new_dist < dist && new_dist > 0.f) { + dist = new_dist; + optLevel = i; + } + } + return optLevel; +} + +/** + * @brief Identify square candidates according to a marker dictionary + */ + +static void _identifyCandidates(InputArray grey, + const std::vector& image_pyr, + vector< vector< vector< Point2f > > >& _candidatesSet, + vector< vector< vector > >& _contoursSet, const Ptr &_dictionary, + vector< vector< Point2f > >& _accepted, vector< vector >& _contours, vector< int >& ids, + const Ptr ¶ms, + OutputArrayOfArrays _rejected = noArray()) { + CV_DbgAssert(grey.getMat().total() != 0); + CV_DbgAssert(grey.getMat().type() == CV_8UC1); + int ncandidates = (int)_candidatesSet[0].size(); + vector< vector< Point2f > > accepted; + vector< vector< Point2f > > rejected; + vector< vector< Point > > contours; + + vector< int > idsTmp(ncandidates, -1); + vector< int > rotated(ncandidates, 0); + vector< uint8_t > validCandidates(ncandidates, 0); + + //// Analyze each of the candidates + parallel_for_(Range(0, ncandidates), [&](const Range &range) { + const int begin = range.start; + const int end = range.end; + + vector< vector< Point2f > >& candidates = params->detectInvertedMarker ? _candidatesSet[1] : _candidatesSet[0]; + vector< vector< Point > >& contourS = params->detectInvertedMarker ? _contoursSet[1] : _contoursSet[0]; + + for(int i = begin; i < end; i++) { + int currId = -1; + // implements equation (4) + if (params->useAruco3Detection) { + const int perimeterOfContour = static_cast(contourS[i].size()); + const int min_perimeter = params->minSideLengthCanonicalImg * 4; + const size_t nearestImgId = _findOptPyrImageForCanonicalImg(image_pyr, grey.cols(), perimeterOfContour, min_perimeter); + const float scale = image_pyr[nearestImgId].cols / static_cast(grey.cols()); + + validCandidates[i] = _identifyOneCandidate(_dictionary, image_pyr[nearestImgId], candidates[i], currId, params, rotated[i], scale); + } + else { + validCandidates[i] = _identifyOneCandidate(_dictionary, grey, candidates[i], currId, params, rotated[i]); + } + + if(validCandidates[i] > 0) + idsTmp[i] = currId; + } + }); + + for(int i = 0; i < ncandidates; i++) { + if(validCandidates[i] > 0) { + // to choose the right set of candidates :: 0 for default, 1 for white markers + uint8_t set = validCandidates[i]-1; + + // shift corner positions to the correct rotation + correctCornerPosition(_candidatesSet[set][i], rotated[i]); + + if( !params->detectInvertedMarker && validCandidates[i] == 2 ) + continue; + + // add valid candidate + accepted.push_back(_candidatesSet[set][i]); + ids.push_back(idsTmp[i]); + + contours.push_back(_contoursSet[set][i]); + + } else { + rejected.push_back(_candidatesSet[0][i]); + } + } + + // parse output + _accepted = accepted; + + _contours= contours; + + if(_rejected.needed()) { + _copyVector2Output(rejected, _rejected); + } +} + +/** + * Line fitting A * B = C :: Called from function refineCandidateLines + * @param nContours, contour-container + */ +static Point3f _interpolate2Dline(const std::vector& nContours){ + CV_Assert(nContours.size() >= 2); + float minX, minY, maxX, maxY; + minX = maxX = nContours[0].x; + minY = maxY = nContours[0].y; + + for(unsigned int i = 0; i< nContours.size(); i++){ + minX = nContours[i].x < minX ? nContours[i].x : minX; + minY = nContours[i].y < minY ? nContours[i].y : minY; + maxX = nContours[i].x > maxX ? nContours[i].x : maxX; + maxY = nContours[i].y > maxY ? nContours[i].y : maxY; + } + + Mat A = Mat::ones((int)nContours.size(), 2, CV_32F); // Coefficient Matrix (N x 2) + Mat B((int)nContours.size(), 1, CV_32F); // Variables Matrix (N x 1) + Mat C; // Constant + + if(maxX - minX > maxY - minY){ + for(unsigned int i =0; i < nContours.size(); i++){ + A.at(i,0)= nContours[i].x; + B.at(i,0)= nContours[i].y; + } + + solve(A, B, C, DECOMP_NORMAL); + + return Point3f(C.at(0, 0), -1., C.at(1, 0)); + } + else{ + for(unsigned int i =0; i < nContours.size(); i++){ + A.at(i,0)= nContours[i].y; + B.at(i,0)= nContours[i].x; + } + + solve(A, B, C, DECOMP_NORMAL); + + return Point3f(-1., C.at(0, 0), C.at(1, 0)); + } + +} + +/** + * Find the Point where the lines crosses :: Called from function refineCandidateLines + * @param nLine1 + * @param nLine2 + * @return Crossed Point + */ +static Point2f _getCrossPoint(Point3f nLine1, Point3f nLine2){ + Matx22f A(nLine1.x, nLine1.y, nLine2.x, nLine2.y); + Vec2f B(-nLine1.z, -nLine2.z); + return Vec2f(A.solve(B).val); +} + +/** + * Refine Corners using the contour vector :: Called from function detectMarkers + * @param nContours, contour-container + * @param nCorners, candidate Corners + * @param camMatrix, cameraMatrix input 3x3 floating-point camera matrix + * @param distCoeff, distCoeffs vector of distortion coefficient + */ +static void _refineCandidateLines(std::vector& nContours, std::vector& nCorners){ + vector contour2f(nContours.begin(), nContours.end()); + /* 5 groups :: to group the edges + * 4 - classified by its corner + * extra group - (temporary) if contours do not begin with a corner + */ + vector cntPts[5]; + int cornerIndex[4]={-1}; + int group=4; + + for ( unsigned int i =0; i < nContours.size(); i++ ) { + for(unsigned int j=0; j<4; j++){ + if ( nCorners[j] == contour2f[i] ){ + cornerIndex[j] = i; + group=j; + } + } + cntPts[group].push_back(contour2f[i]); + } + for (int i = 0; i < 4; i++) + { + CV_Assert(cornerIndex[i] != -1); + } + // saves extra group into corresponding + if( !cntPts[4].empty() ){ + for( unsigned int i=0; i < cntPts[4].size() ; i++ ) + cntPts[group].push_back(cntPts[4].at(i)); + cntPts[4].clear(); + } + + //Evaluate contour direction :: using the position of the detected corners + int inc=1; + + inc = ( (cornerIndex[0] > cornerIndex[1]) && (cornerIndex[3] > cornerIndex[0]) ) ? -1:inc; + inc = ( (cornerIndex[2] > cornerIndex[3]) && (cornerIndex[1] > cornerIndex[2]) ) ? -1:inc; + + // calculate the line :: who passes through the grouped points + Point3f lines[4]; + for(int i=0; i<4; i++){ + lines[i]=_interpolate2Dline(cntPts[i]); + } + + /* + * calculate the corner :: where the lines crosses to each other + * clockwise direction no clockwise direction + * 0 1 + * .---. 1 .---. 2 + * | | | | + * 3 .___. 0 .___. + * 2 3 + */ + for(int i=0; i < 4; i++){ + if(inc<0) + nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+1)%4 ]); // 01 12 23 30 + else + nCorners[i] = _getCrossPoint(lines[ i ], lines[ (i+3)%4 ]); // 30 01 12 23 + } +} + +static inline void findCornerInPyrImage(const float scale_init, const int closest_pyr_image_idx, + const std::vector& grey_pyramid, Mat corners, + const Ptr& params) { + // scale them to the closest pyramid level + if (scale_init != 1.f) + corners *= scale_init; // scale_init * scale_pyr + for (int idx = closest_pyr_image_idx - 1; idx >= 0; --idx) { + // scale them to new pyramid level + corners *= 2.f; // *= scale_pyr; + // use larger win size for larger images + const int subpix_win_size = std::max(grey_pyramid[idx].cols, grey_pyramid[idx].rows) > 1080 ? 5 : 3; + cornerSubPix(grey_pyramid[idx], corners, + Size(subpix_win_size, subpix_win_size), + Size(-1, -1), + TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, + params->cornerRefinementMaxIterations, + params->cornerRefinementMinAccuracy)); + } +} + +void ArucoDetector::detectMarkers(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids, + OutputArrayOfArrays _rejectedImgPoints) { + CV_Assert(!_image.empty()); + CV_Assert(params->markerBorderBits > 0); + // check that the parameters are set correctly if Aruco3 is used + CV_Assert(!(params->useAruco3Detection == true && + params->minSideLengthCanonicalImg == 0 && + params->minMarkerLengthRatioOriginalImg == 0.0)); + + Mat grey; + _convertToGrey(_image.getMat(), grey); + + // Aruco3 functionality is the extension of Aruco. + // The description can be found in: + // [1] Speeded up detection of squared fiducial markers, 2018, FJ Romera-Ramirez et al. + // if Aruco3 functionality if not wanted + // change some parameters to be sure to turn it off + if (!params->useAruco3Detection) { + params->minMarkerLengthRatioOriginalImg = 0.0; + params->minSideLengthCanonicalImg = 0; + } + else { + // always turn on corner refinement in case of Aruco3, due to upsampling + params->cornerRefinementMethod = CORNER_REFINE_SUBPIX; + // only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection + // Todo: update other CORNER_REFINE methods + } + + /// Step 0: equation (2) from paper [1] + const float fxfy = (!params->useAruco3Detection ? 1.f : params->minSideLengthCanonicalImg / + (params->minSideLengthCanonicalImg + std::max(grey.cols, grey.rows)*params->minMarkerLengthRatioOriginalImg)); + + /// Step 1: create image pyramid. Section 3.4. in [1] + std::vector grey_pyramid; + int closest_pyr_image_idx = 0, num_levels = 0; + //// Step 1.1: resize image with equation (1) from paper [1] + if (params->useAruco3Detection) { + const float scale_pyr = 2.f; + const float img_area = static_cast(grey.rows*grey.cols); + const float min_area_marker = static_cast(params->minSideLengthCanonicalImg*params->minSideLengthCanonicalImg); + // find max level + num_levels = static_cast(log2(img_area / min_area_marker)/scale_pyr); + // the closest pyramid image to the downsampled segmentation image + // will later be used as start index for corner upsampling + const float scale_img_area = img_area * fxfy * fxfy; + closest_pyr_image_idx = cvRound(log2(img_area / scale_img_area)/scale_pyr); + } + cv::buildPyramid(grey, grey_pyramid, num_levels); + + // resize to segmentation image + // in this reduces size the contours will be detected + if (fxfy != 1.f) + cv::resize(grey, grey, cv::Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows))); + + /// STEP 2: Detect marker candidates + vector< vector< Point2f > > candidates; + vector< vector< Point > > contours; + vector< int > ids; + + vector< vector< vector< Point2f > > > candidatesSet; + vector< vector< vector< Point > > > contoursSet; + + /// STEP 2.a Detect marker candidates :: using AprilTag + if(params->cornerRefinementMethod == CORNER_REFINE_APRILTAG){ + _apriltag(grey, params, candidates, contours); + + candidatesSet.push_back(candidates); + contoursSet.push_back(contours); + } + /// STEP 2.b Detect marker candidates :: traditional way + else + _detectCandidates(grey, candidatesSet, contoursSet, params); + + /// STEP 2: Check candidate codification (identify markers) + _identifyCandidates(grey, grey_pyramid, candidatesSet, contoursSet, dictionary, + candidates, contours, ids, params, _rejectedImgPoints); + + /// STEP 3: Corner refinement :: use corner subpix + if( params->cornerRefinementMethod == CORNER_REFINE_SUBPIX ) { + CV_Assert(params->cornerRefinementWinSize > 0 && params->cornerRefinementMaxIterations > 0 && + params->cornerRefinementMinAccuracy > 0); + // Do subpixel estimation. In Aruco3 start on the lowest pyramid level and upscale the corners + parallel_for_(Range(0, (int)candidates.size()), [&](const Range& range) { + const int begin = range.start; + const int end = range.end; + + for (int i = begin; i < end; i++) { + if (params->useAruco3Detection) { + const float scale_init = (float) grey_pyramid[closest_pyr_image_idx].cols / grey.cols; + findCornerInPyrImage(scale_init, closest_pyr_image_idx, grey_pyramid, Mat(candidates[i]), params); + } + else + cornerSubPix(grey, Mat(candidates[i]), + Size(params->cornerRefinementWinSize, params->cornerRefinementWinSize), + Size(-1, -1), + TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, + params->cornerRefinementMaxIterations, + params->cornerRefinementMinAccuracy)); + } + }); + } + + /// STEP 3, Optional : Corner refinement :: use contour container + if( params->cornerRefinementMethod == CORNER_REFINE_CONTOUR){ + + if(! _ids.empty()){ + + // do corner refinement using the contours for each detected markers + parallel_for_(Range(0, (int)candidates.size()), [&](const Range& range) { + for (int i = range.start; i < range.end; i++) { + _refineCandidateLines(contours[i], candidates[i]); + } + }); + } + } + + if (params->cornerRefinementMethod != CORNER_REFINE_SUBPIX && fxfy != 1.f) { + // only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection + // Todo: update other CORNER_REFINE methods + + // scale to orignal size, this however will lead to inaccurate detections! + for (auto &vecPoints : candidates) + for (auto &point : vecPoints) + point *= 1.f/fxfy; + } + + // copy to output arrays + _copyVector2Output(candidates, _corners); + Mat(ids).copyTo(_ids); +} + +/** + * Project board markers that are not included in the list of detected markers + */ +static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, + InputOutputArray _detectedIds, InputArray _cameraMatrix, InputArray _distCoeffs, + vector >& _undetectedMarkersProjectedCorners, + OutputArray _undetectedMarkersIds) { + // first estimate board pose with the current avaible markers + Mat rvec, tvec; + int boardDetectedMarkers = aruco::estimatePoseBoard(_detectedCorners, _detectedIds, _board, + _cameraMatrix, _distCoeffs, rvec, tvec); + + // at least one marker from board so rvec and tvec are valid + if(boardDetectedMarkers == 0) return; + + // search undetected markers and project them using the previous pose + vector > undetectedCorners; + vector undetectedIds; + for(unsigned int i = 0; i < _board->getIds().size(); i++) { + int foundIdx = -1; + for(unsigned int j = 0; j < _detectedIds.total(); j++) { + if(_board->getIds()[i] == _detectedIds.getMat().ptr< int >()[j]) { + foundIdx = j; + break; + } + } + + // not detected + if(foundIdx == -1) { + undetectedCorners.push_back(vector()); + undetectedIds.push_back(_board->getIds()[i]); + projectPoints(_board->getObjPoints()[i], rvec, tvec, _cameraMatrix, _distCoeffs, + undetectedCorners.back()); + } + } + // parse output + Mat(undetectedIds).copyTo(_undetectedMarkersIds); + _undetectedMarkersProjectedCorners = undetectedCorners; +} + +/** + * Interpolate board markers that are not included in the list of detected markers using + * global homography + */ +static void _projectUndetectedMarkers(const Ptr &_board, InputOutputArrayOfArrays _detectedCorners, + InputOutputArray _detectedIds, + vector >& _undetectedMarkersProjectedCorners, + OutputArray _undetectedMarkersIds) { + // check board points are in the same plane, if not, global homography cannot be applied + CV_Assert(_board->getObjPoints().size() > 0); + CV_Assert(_board->getObjPoints()[0].size() > 0); + float boardZ = _board->getObjPoints()[0][0].z; + for(unsigned int i = 0; i < _board->getObjPoints().size(); i++) { + for(unsigned int j = 0; j < _board->getObjPoints()[i].size(); j++) + CV_Assert(boardZ == _board->getObjPoints()[i][j].z); + } + + vector detectedMarkersObj2DAll; // Object coordinates (without Z) of all the detected + // marker corners in a single vector + vector imageCornersAll; // Image corners of all detected markers in a single vector + vector > undetectedMarkersObj2D; // Object coordinates (without Z) of all + // missing markers in different vectors + vector undetectedMarkersIds; // ids of missing markers + // find markers included in board, and missing markers from board. Fill the previous vectors + for(unsigned int j = 0; j < _board->getIds().size(); j++) { + bool found = false; + for(unsigned int i = 0; i < _detectedIds.total(); i++) { + if(_detectedIds.getMat().ptr< int >()[i] == _board->getIds()[j]) { + for(int c = 0; c < 4; c++) { + imageCornersAll.push_back(_detectedCorners.getMat(i).ptr< Point2f >()[c]); + detectedMarkersObj2DAll.push_back( + Point2f(_board->getObjPoints()[j][c].x, _board->getObjPoints()[j][c].y)); + } + found = true; + break; + } + } + if(!found) { + undetectedMarkersObj2D.push_back(vector()); + for(int c = 0; c < 4; c++) { + undetectedMarkersObj2D.back().push_back( + Point2f(_board->getObjPoints()[j][c].x, _board->getObjPoints()[j][c].y)); + } + undetectedMarkersIds.push_back(_board->getIds()[j]); + } + } + if(imageCornersAll.size() == 0) return; + + // get homography from detected markers + Mat transformation = findHomography(detectedMarkersObj2DAll, imageCornersAll); + + _undetectedMarkersProjectedCorners.resize(undetectedMarkersIds.size()); + + // for each undetected marker, apply transformation + for(unsigned int i = 0; i < undetectedMarkersObj2D.size(); i++) { + perspectiveTransform(undetectedMarkersObj2D[i], _undetectedMarkersProjectedCorners[i], transformation); + } + Mat(undetectedMarkersIds).copyTo(_undetectedMarkersIds); +} + + +void ArucoDetector::refineDetectedMarkers(InputArray _image, const Ptr &_board, + InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds, + InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix, + InputArray _distCoeffs, OutputArray _recoveredIdxs) { + CV_Assert(refineParams->minRepDistance > 0); + + if(_detectedIds.total() == 0 || _rejectedCorners.total() == 0) return; + + // get projections of missing markers in the board + vector< vector< Point2f > > undetectedMarkersCorners; + vector< int > undetectedMarkersIds; + if(_cameraMatrix.total() != 0) { + // reproject based on camera projection model + _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, _cameraMatrix, _distCoeffs, + undetectedMarkersCorners, undetectedMarkersIds); + + } else { + // reproject based on global homography + _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, undetectedMarkersCorners, + undetectedMarkersIds); + } + + // list of missing markers indicating if they have been assigned to a candidate + vector< bool > alreadyIdentified(_rejectedCorners.total(), false); + + // maximum bits that can be corrected + int maxCorrectionRecalculated = + int(double(dictionary->maxCorrectionBits) * refineParams->errorCorrectionRate); + + Mat grey; + _convertToGrey(_image, grey); + + // vector of final detected marker corners and ids + vector > finalAcceptedCorners; + vector< int > finalAcceptedIds; + // fill with the current markers + finalAcceptedCorners.resize(_detectedCorners.total()); + finalAcceptedIds.resize(_detectedIds.total()); + for(unsigned int i = 0; i < _detectedIds.total(); i++) { + finalAcceptedCorners[i] = _detectedCorners.getMat(i).clone(); + finalAcceptedIds[i] = _detectedIds.getMat().ptr< int >()[i]; + } + vector< int > recoveredIdxs; // original indexes of accepted markers in _rejectedCorners + + // for each missing marker, try to find a correspondence + for(unsigned int i = 0; i < undetectedMarkersIds.size(); i++) { + + // best match at the moment + int closestCandidateIdx = -1; + double closestCandidateDistance = refineParams->minRepDistance * refineParams->minRepDistance + 1; + Mat closestRotatedMarker; + + for(unsigned int j = 0; j < _rejectedCorners.total(); j++) { + if(alreadyIdentified[j]) continue; + + // check distance + double minDistance = closestCandidateDistance + 1; + bool valid = false; + int validRot = 0; + for(int c = 0; c < 4; c++) { // first corner in rejected candidate + double currentMaxDistance = 0; + for(int k = 0; k < 4; k++) { + Point2f rejCorner = _rejectedCorners.getMat(j).ptr< Point2f >()[(c + k) % 4]; + Point2f distVector = undetectedMarkersCorners[i][k] - rejCorner; + double cornerDist = distVector.x * distVector.x + distVector.y * distVector.y; + currentMaxDistance = max(currentMaxDistance, cornerDist); + } + // if distance is better than current best distance + if(currentMaxDistance < closestCandidateDistance) { + valid = true; + validRot = c; + minDistance = currentMaxDistance; + } + if(!refineParams->checkAllOrders) break; + } + + if(!valid) continue; + + // apply rotation + Mat rotatedMarker; + if(refineParams->checkAllOrders) { + rotatedMarker = Mat(4, 1, CV_32FC2); + for(int c = 0; c < 4; c++) + rotatedMarker.ptr< Point2f >()[c] = + _rejectedCorners.getMat(j).ptr< Point2f >()[(c + 4 + validRot) % 4]; + } + else rotatedMarker = _rejectedCorners.getMat(j); + + // last filter, check if inner code is close enough to the assigned marker code + int codeDistance = 0; + // if errorCorrectionRate, dont check code + if(refineParams->errorCorrectionRate >= 0) { + + // extract bits + Mat bits = _extractBits( + grey, rotatedMarker, dictionary->markerSize, params->markerBorderBits, + params->perspectiveRemovePixelPerCell, + params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); + + Mat onlyBits = + bits.rowRange(params->markerBorderBits, bits.rows - params->markerBorderBits) + .colRange(params->markerBorderBits, bits.rows - params->markerBorderBits); + + codeDistance = + dictionary->getDistanceToId(onlyBits, undetectedMarkersIds[i], false); + } + + // if everythin is ok, assign values to current best match + if(refineParams->errorCorrectionRate < 0 || codeDistance < maxCorrectionRecalculated) { + closestCandidateIdx = j; + closestCandidateDistance = minDistance; + closestRotatedMarker = rotatedMarker; + } + } + + // if at least one good match, we have rescue the missing marker + if(closestCandidateIdx >= 0) { + + // subpixel refinement + if(params->cornerRefinementMethod == CORNER_REFINE_SUBPIX) { + CV_Assert(params->cornerRefinementWinSize > 0 && + params->cornerRefinementMaxIterations > 0 && + params->cornerRefinementMinAccuracy > 0); + cornerSubPix(grey, closestRotatedMarker, + Size(params->cornerRefinementWinSize, params->cornerRefinementWinSize), + Size(-1, -1), TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, + params->cornerRefinementMaxIterations, + params->cornerRefinementMinAccuracy)); + } + + // remove from rejected + alreadyIdentified[closestCandidateIdx] = true; + + // add to detected + finalAcceptedCorners.push_back(closestRotatedMarker); + finalAcceptedIds.push_back(undetectedMarkersIds[i]); + + // add the original index of the candidate + recoveredIdxs.push_back(closestCandidateIdx); + } + } + + // parse output + if(finalAcceptedIds.size() != _detectedIds.total()) { + // parse output + Mat(finalAcceptedIds).copyTo(_detectedIds); + _copyVector2Output(finalAcceptedCorners, _detectedCorners); + + // recalculate _rejectedCorners based on alreadyIdentified + vector > finalRejected; + for(unsigned int i = 0; i < alreadyIdentified.size(); i++) { + if(!alreadyIdentified[i]) { + finalRejected.push_back(_rejectedCorners.getMat(i).clone()); + } + } + _copyVector2Output(finalRejected, _rejectedCorners); + + if(_recoveredIdxs.needed()) { + Mat(recoveredIdxs).copyTo(_recoveredIdxs); + } + } +} + + +void drawDetectedMarkers(InputOutputArray _image, InputArrayOfArrays _corners, + InputArray _ids, Scalar borderColor) { + CV_Assert(_image.getMat().total() != 0 && + (_image.getMat().channels() == 1 || _image.getMat().channels() == 3)); + CV_Assert((_corners.total() == _ids.total()) || _ids.total() == 0); + + // calculate colors + Scalar textColor, cornerColor; + textColor = cornerColor = borderColor; + swap(textColor.val[0], textColor.val[1]); // text color just sawp G and R + swap(cornerColor.val[1], cornerColor.val[2]); // corner color just sawp G and B + + int nMarkers = (int)_corners.total(); + for(int i = 0; i < nMarkers; i++) { + Mat currentMarker = _corners.getMat(i); + CV_Assert(currentMarker.total() == 4 && currentMarker.type() == CV_32FC2); + + // draw marker sides + for(int j = 0; j < 4; j++) { + Point2f p0, p1; + p0 = currentMarker.ptr< Point2f >(0)[j]; + p1 = currentMarker.ptr< Point2f >(0)[(j + 1) % 4]; + line(_image, p0, p1, borderColor, 1); + } + // draw first corner mark + rectangle(_image, currentMarker.ptr< Point2f >(0)[0] - Point2f(3, 3), + currentMarker.ptr< Point2f >(0)[0] + Point2f(3, 3), cornerColor, 1, LINE_AA); + + // draw ID + if(_ids.total() != 0) { + Point2f cent(0, 0); + for(int p = 0; p < 4; p++) + cent += currentMarker.ptr< Point2f >(0)[p]; + cent = cent / 4.; + stringstream s; + s << "id=" << _ids.getMat().ptr< int >(0)[i]; + putText(_image, s.str(), cent, FONT_HERSHEY_SIMPLEX, 0.5, textColor, 2); + } + } +} + +void drawMarker(const Ptr &dictionary, int id, int sidePixels, OutputArray _img, int borderBits) { + dictionary->drawMarker(id, sidePixels, _img, borderBits); +} + +} +} diff --git a/modules/aruco/src/aruco_utils.cpp b/modules/aruco/src/aruco_utils.cpp new file mode 100644 index 00000000000..8d680848caa --- /dev/null +++ b/modules/aruco/src/aruco_utils.cpp @@ -0,0 +1,50 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include "aruco_utils.hpp" +#include + +namespace cv { +namespace aruco { +using namespace std; + +void _copyVector2Output(std::vector > &vec, OutputArrayOfArrays out, const float scale) { + out.create((int)vec.size(), 1, CV_32FC2); + if(out.isMatVector()) { + for (unsigned int i = 0; i < vec.size(); i++) { + out.create(4, 1, CV_32FC2, i); + Mat &m = out.getMatRef(i); + Mat(Mat(vec[i]).t()*scale).copyTo(m); + } + } + else if(out.isUMatVector()) { + for (unsigned int i = 0; i < vec.size(); i++) { + out.create(4, 1, CV_32FC2, i); + UMat &m = out.getUMatRef(i); + Mat(Mat(vec[i]).t()*scale).copyTo(m); + } + } + else if(out.kind() == _OutputArray::STD_VECTOR_VECTOR){ + for (unsigned int i = 0; i < vec.size(); i++) { + out.create(4, 1, CV_32FC2, i); + Mat m = out.getMat(i); + Mat(Mat(vec[i]).t()*scale).copyTo(m); + } + } + else { + CV_Error(cv::Error::StsNotImplemented, + "Only Mat vector, UMat vector, and vector OutputArrays are currently supported."); + } +} + +void _convertToGrey(InputArray _in, OutputArray _out) { + CV_Assert(_in.type() == CV_8UC1 || _in.type() == CV_8UC3); + if(_in.type() == CV_8UC3) + cvtColor(_in, _out, COLOR_BGR2GRAY); + else + _in.copyTo(_out); +} + +} +} diff --git a/modules/aruco/src/aruco_utils.hpp b/modules/aruco/src/aruco_utils.hpp new file mode 100644 index 00000000000..029cd3fa81d --- /dev/null +++ b/modules/aruco/src/aruco_utils.hpp @@ -0,0 +1,44 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html +#ifndef __OPENCV_ARUCO_UTILS_HPP__ +#define __OPENCV_ARUCO_UTILS_HPP__ + +#include +#include + +namespace cv { +namespace aruco { + +/** + * @brief Copy the contents of a corners vector to an OutputArray, settings its size. + */ +void _copyVector2Output(std::vector > &vec, OutputArrayOfArrays out, const float scale = 1.f); + +/** + * @brief Convert input image to gray if it is a 3-channels image + */ +void _convertToGrey(InputArray _in, OutputArray _out); + +template +inline bool readParameter(const std::string& name, T& parameter, const FileNode& node) +{ + if (!node.empty() && !node[name].empty()) { + node[name] >> parameter; + return true; + } + return false; +} + +template +inline bool readWriteParameter(const std::string& name, T& parameter, const Ptr readNode = nullptr, + const Ptr writeStorage = nullptr) { + if (!readNode.empty()) + return readParameter(name, parameter, *readNode); + *writeStorage << name << parameter; + return true; +} + +} +} +#endif diff --git a/modules/aruco/src/board.cpp b/modules/aruco/src/board.cpp new file mode 100644 index 00000000000..53352f35f58 --- /dev/null +++ b/modules/aruco/src/board.cpp @@ -0,0 +1,467 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#include +#include +#include + +namespace cv { +namespace aruco { +using namespace std; + +/** @brief Implementation of drawPlanarBoard that accepts a raw Board pointer. + */ +static void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, int marginSize, int borderBits) { + CV_Assert(!outSize.empty()); + CV_Assert(marginSize >= 0); + + _img.create(outSize, CV_8UC1); + Mat out = _img.getMat(); + out.setTo(Scalar::all(255)); + out.adjustROI(-marginSize, -marginSize, -marginSize, -marginSize); + + // calculate max and min values in XY plane + CV_Assert(_board->getObjPoints().size() > 0); + float minX, maxX, minY, maxY; + minX = maxX = _board->getObjPoints()[0][0].x; + minY = maxY = _board->getObjPoints()[0][0].y; + + for(unsigned int i = 0; i < _board->getObjPoints().size(); i++) { + for(int j = 0; j < 4; j++) { + minX = min(minX, _board->getObjPoints()[i][j].x); + maxX = max(maxX, _board->getObjPoints()[i][j].x); + minY = min(minY, _board->getObjPoints()[i][j].y); + maxY = max(maxY, _board->getObjPoints()[i][j].y); + } + } + + float sizeX = maxX - minX; + float sizeY = maxY - minY; + + // proportion transformations + float xReduction = sizeX / float(out.cols); + float yReduction = sizeY / float(out.rows); + + // determine the zone where the markers are placed + if(xReduction > yReduction) { + int nRows = int(sizeY / xReduction); + int rowsMargins = (out.rows - nRows) / 2; + out.adjustROI(-rowsMargins, -rowsMargins, 0, 0); + } else { + int nCols = int(sizeX / yReduction); + int colsMargins = (out.cols - nCols) / 2; + out.adjustROI(0, 0, -colsMargins, -colsMargins); + } + + // now paint each marker + Dictionary &dictionary = *(_board->getDictionary()); + Mat marker; + Point2f outCorners[3]; + Point2f inCorners[3]; + for(unsigned int m = 0; m < _board->getObjPoints().size(); m++) { + // transform corners to markerZone coordinates + for(int j = 0; j < 3; j++) { + Point2f pf = Point2f(_board->getObjPoints()[m][j].x, _board->getObjPoints()[m][j].y); + // move top left to 0, 0 + pf -= Point2f(minX, minY); + pf.x = pf.x / sizeX * float(out.cols); + pf.y = pf.y / sizeY * float(out.rows); + outCorners[j] = pf; + } + + // get marker + Size dst_sz(outCorners[2] - outCorners[0]); // assuming CCW order + dst_sz.width = dst_sz.height = std::min(dst_sz.width, dst_sz.height); //marker should be square + dictionary.drawMarker(_board->getIds()[m], dst_sz.width, marker, borderBits); + + if((outCorners[0].y == outCorners[1].y) && (outCorners[1].x == outCorners[2].x)) { + // marker is aligned to image axes + marker.copyTo(out(Rect(outCorners[0], dst_sz))); + continue; + } + + // interpolate tiny marker to marker position in markerZone + inCorners[0] = Point2f(-0.5f, -0.5f); + inCorners[1] = Point2f(marker.cols - 0.5f, -0.5f); + inCorners[2] = Point2f(marker.cols - 0.5f, marker.rows - 0.5f); + + // remove perspective + Mat transformation = getAffineTransform(inCorners, outCorners); + warpAffine(marker, out, transformation, out.size(), INTER_LINEAR, + BORDER_TRANSPARENT); + } +} + +void drawPlanarBoard(const Ptr &_board, Size outSize, OutputArray _img, int marginSize, + int borderBits) { + _drawPlanarBoardImpl(_board, outSize, _img, marginSize, borderBits); +} + +struct GridBoard::GridImpl { + GridImpl(){}; + // number of markers in X and Y directions + int sizeX = 3, sizeY = 3; + + // marker side length (normally in meters) + float markerLength = 1.f; + + // separation between markers in the grid + float markerSeparation = .5f; +}; + +GridBoard::GridBoard(): gridImpl(makePtr()) {} + +Board::Board(): dictionary(makePtr(getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME::DICT_4X4_50))) {} + +Ptr Board::create(InputArrayOfArrays objPoints, const Ptr &dictionary, InputArray ids) { + CV_Assert(objPoints.total() == ids.total()); + CV_Assert(objPoints.type() == CV_32FC3 || objPoints.type() == CV_32FC1); + + std::vector > obj_points_vector; + Point3f rightBottomBorder = Point3f(0.f, 0.f, 0.f); + for (unsigned int i = 0; i < objPoints.total(); i++) { + std::vector corners; + Mat corners_mat = objPoints.getMat(i); + + if (corners_mat.type() == CV_32FC1) + corners_mat = corners_mat.reshape(3); + CV_Assert(corners_mat.total() == 4); + + for (int j = 0; j < 4; j++) { + const Point3f &corner = corners_mat.at(j); + corners.push_back(corner); + rightBottomBorder.x = std::max(rightBottomBorder.x, corner.x); + rightBottomBorder.y = std::max(rightBottomBorder.y, corner.y); + rightBottomBorder.z = std::max(rightBottomBorder.z, corner.z); + } + obj_points_vector.push_back(corners); + } + Ptr res = makePtr(); + ids.copyTo(res->ids); + res->objPoints = obj_points_vector; + res->dictionary = cv::makePtr(dictionary); + res->rightBottomBorder = rightBottomBorder; + return res; +} + +void Board::setIds(InputArray ids_) { + CV_Assert(objPoints.size() == ids_.total()); + ids_.copyTo(this->ids); +} + +Ptr Board::getDictionary() const { + return this->dictionary; +} + +void Board::setDictionary(const Ptr &_dictionary) { + this->dictionary = _dictionary; +} + +const std::vector >& Board::getObjPoints() const { + return this->objPoints; +} + +void Board::setObjPoints(const vector> &_objPoints) { + CV_Assert(!_objPoints.empty()); + this->objPoints = _objPoints; + rightBottomBorder = _objPoints.front().front(); + for (size_t i = 0; i < this->objPoints.size(); i++) { + for (int j = 0; j < 4; j++) { + const Point3f &corner = this->objPoints[i][j]; + rightBottomBorder.x = std::max(rightBottomBorder.x, corner.x); + rightBottomBorder.y = std::max(rightBottomBorder.y, corner.y); + rightBottomBorder.z = std::max(rightBottomBorder.z, corner.z); + } + } +} + +const Point3f& Board::getRightBottomBorder() const { + return this->rightBottomBorder; +} + +const std::vector& Board::getIds() const { + return this->ids; +} + +void Board::changeId(int index, int newId) { + CV_Assert(index >= 0 && index < (int)ids.size()); + CV_Assert(newId >= 0 && newId < dictionary->bytesList.rows); + this->ids[index] = newId; +} + +Ptr GridBoard::create(int markersX, int markersY, float markerLength, float markerSeparation, + const Ptr &dictionary, int firstMarker) { + CV_Assert(markersX > 0 && markersY > 0 && markerLength > 0 && markerSeparation > 0); + Ptr res = makePtr(); + res->gridImpl->sizeX = markersX; + res->gridImpl->sizeY = markersY; + res->gridImpl->markerLength = markerLength; + res->gridImpl->markerSeparation = markerSeparation; + res->setDictionary(dictionary); + + size_t totalMarkers = (size_t) markersX * markersY; + res->ids.resize(totalMarkers); + std::vector > objPoints; + objPoints.reserve(totalMarkers); + + // fill ids with first identifiers + for (unsigned int i = 0; i < totalMarkers; i++) { + res->ids[i] = i + firstMarker; + } + + // calculate Board objPoints + for (int y = 0; y < markersY; y++) { + for (int x = 0; x < markersX; x++) { + vector corners(4); + corners[0] = Point3f(x * (markerLength + markerSeparation), + y * (markerLength + markerSeparation), 0); + corners[1] = corners[0] + Point3f(markerLength, 0, 0); + corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); + corners[3] = corners[0] + Point3f(0, markerLength, 0); + objPoints.push_back(corners); + } + } + res->setObjPoints(objPoints); + res->rightBottomBorder = Point3f(markersX * markerLength + markerSeparation * (markersX - 1), + markersY * markerLength + markerSeparation * (markersY - 1), 0.f); + return res; +} + +void GridBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { + _drawPlanarBoardImpl((Board*)this, outSize, _img, marginSize, borderBits); +} + +Size GridBoard::getGridSize() const { + return Size(gridImpl->sizeX, gridImpl->sizeY); +} + +float GridBoard::getMarkerLength() const { + return gridImpl->markerLength; +} + +float GridBoard::getMarkerSeparation() const { + return gridImpl->markerSeparation; +} + +struct CharucoBoard::CharucoImpl : GridBoard::GridImpl { + // size of chessboard squares side (normally in meters) + float squareLength; + + // marker side length (normally in meters) + float markerLength; +}; + +CharucoBoard::CharucoBoard(): charucoImpl(makePtr()) {} + +void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { + CV_Assert(!outSize.empty()); + CV_Assert(marginSize >= 0); + + _img.create(outSize, CV_8UC1); + _img.setTo(255); + Mat out = _img.getMat(); + Mat noMarginsImg = + out.colRange(marginSize, out.cols - marginSize).rowRange(marginSize, out.rows - marginSize); + + double totalLengthX, totalLengthY; + totalLengthX = charucoImpl->squareLength * charucoImpl->sizeX; + totalLengthY = charucoImpl->squareLength * charucoImpl->sizeY; + + // proportional transformation + double xReduction = totalLengthX / double(noMarginsImg.cols); + double yReduction = totalLengthY / double(noMarginsImg.rows); + + // determine the zone where the chessboard is placed + Mat chessboardZoneImg; + if(xReduction > yReduction) { + int nRows = int(totalLengthY / xReduction); + int rowsMargins = (noMarginsImg.rows - nRows) / 2; + chessboardZoneImg = noMarginsImg.rowRange(rowsMargins, noMarginsImg.rows - rowsMargins); + } else { + int nCols = int(totalLengthX / yReduction); + int colsMargins = (noMarginsImg.cols - nCols) / 2; + chessboardZoneImg = noMarginsImg.colRange(colsMargins, noMarginsImg.cols - colsMargins); + } + + // determine the margins to draw only the markers + // take the minimum just to be sure + double squareSizePixels = min(double(chessboardZoneImg.cols) / double(charucoImpl->sizeX), + double(chessboardZoneImg.rows) / double(charucoImpl->sizeY)); + + double diffSquareMarkerLength = (charucoImpl->squareLength - charucoImpl->markerLength) / 2; + int diffSquareMarkerLengthPixels = + int(diffSquareMarkerLength * squareSizePixels / charucoImpl->squareLength); + + // draw markers + Mat markersImg; + _drawPlanarBoardImpl(this, chessboardZoneImg.size(), markersImg, diffSquareMarkerLengthPixels, borderBits); + markersImg.copyTo(chessboardZoneImg); + + // now draw black squares + for(int y = 0; y < charucoImpl->sizeY; y++) { + for(int x = 0; x < charucoImpl->sizeX; x++) { + + if(y % 2 != x % 2) continue; // white corner, dont do anything + + double startX, startY; + startX = squareSizePixels * double(x); + startY = squareSizePixels * double(y); + + Mat squareZone = chessboardZoneImg.rowRange(int(startY), int(startY + squareSizePixels)) + .colRange(int(startX), int(startX + squareSizePixels)); + + squareZone.setTo(0); + } + } +} + +/** + * Fill nearestMarkerIdx and nearestMarkerCorners arrays + */ +static inline void _getNearestMarkerCorners(CharucoBoard &board, float squareLength) { + board.nearestMarkerIdx.resize(board.chessboardCorners.size()); + board.nearestMarkerCorners.resize(board.chessboardCorners.size()); + + unsigned int nMarkers = (unsigned int)board.getIds().size(); + unsigned int nCharucoCorners = (unsigned int)board.chessboardCorners.size(); + for(unsigned int i = 0; i < nCharucoCorners; i++) { + double minDist = -1; // distance of closest markers + Point3f charucoCorner = board.chessboardCorners[i]; + for(unsigned int j = 0; j < nMarkers; j++) { + // calculate distance from marker center to charuco corner + Point3f center = Point3f(0, 0, 0); + for(unsigned int k = 0; k < 4; k++) + center += board.getObjPoints()[j][k]; + center /= 4.; + double sqDistance; + Point3f distVector = charucoCorner - center; + sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; + if(j == 0 || fabs(sqDistance - minDist) < cv::pow(0.01 * squareLength, 2)) { + // if same minimum distance (or first iteration), add to nearestMarkerIdx vector + board.nearestMarkerIdx[i].push_back(j); + minDist = sqDistance; + } else if(sqDistance < minDist) { + // if finding a closest marker to the charuco corner + board.nearestMarkerIdx[i].clear(); // remove any previous added marker + board.nearestMarkerIdx[i].push_back(j); // add the new closest marker index + minDist = sqDistance; + } + } + // for each of the closest markers, search the marker corner index closer + // to the charuco corner + for(unsigned int j = 0; j < board.nearestMarkerIdx[i].size(); j++) { + board.nearestMarkerCorners[i].resize(board.nearestMarkerIdx[i].size()); + double minDistCorner = -1; + for(unsigned int k = 0; k < 4; k++) { + double sqDistance; + Point3f distVector = charucoCorner - board.getObjPoints()[board.nearestMarkerIdx[i][j]][k]; + sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; + if(k == 0 || sqDistance < minDistCorner) { + // if this corner is closer to the charuco corner, assing its index + // to nearestMarkerCorners + minDistCorner = sqDistance; + board.nearestMarkerCorners[i][j] = k; + } + } + } + } +} + +Ptr CharucoBoard::create(int squaresX, int squaresY, float squareLength, + float markerLength, const Ptr &dictionary) { + CV_Assert(squaresX > 1 && squaresY > 1 && markerLength > 0 && squareLength > markerLength); + Ptr res = makePtr(); + + res->charucoImpl->sizeX = squaresX; + res->charucoImpl->sizeY = squaresY; + res->charucoImpl->squareLength = squareLength; + res->charucoImpl->markerLength = markerLength; + res->setDictionary(dictionary); + std::vector > objPoints; + + float diffSquareMarkerLength = (squareLength - markerLength) / 2; + // calculate Board objPoints + for(int y = 0; y < squaresY; y++) { + for(int x = 0; x < squaresX; x++) { + + if(y % 2 == x % 2) continue; // black corner, no marker here + + vector corners(4); + corners[0] = Point3f(x * squareLength + diffSquareMarkerLength, + y * squareLength + diffSquareMarkerLength, 0); + corners[1] = corners[0] + Point3f(markerLength, 0, 0); + corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); + corners[3] = corners[0] + Point3f(0, markerLength, 0); + objPoints.push_back(corners); + // first ids in dictionary + int nextId = (int)res->ids.size(); + res->ids.push_back(nextId); + } + } + res->setObjPoints(objPoints); + + // now fill chessboardCorners + for(int y = 0; y < squaresY - 1; y++) { + for(int x = 0; x < squaresX - 1; x++) { + Point3f corner; + corner.x = (x + 1) * squareLength; + corner.y = (y + 1) * squareLength; + corner.z = 0; + res->chessboardCorners.push_back(corner); + } + } + res->rightBottomBorder = Point3f(squaresX * squareLength, + squaresY * squareLength, 0.f); + _getNearestMarkerCorners(*res, res->charucoImpl->squareLength); + return res; +} + +Size CharucoBoard::getChessboardSize() const { return Size(charucoImpl->sizeX, charucoImpl->sizeY); } + +float CharucoBoard::getSquareLength() const { return charucoImpl->squareLength; } + +float CharucoBoard::getMarkerLength() const { return charucoImpl->markerLength; } + +bool testCharucoCornersCollinear(const Ptr &_board, InputArray _charucoIds) { + unsigned int nCharucoCorners = (unsigned int)_charucoIds.getMat().total(); + if (nCharucoCorners <= 2) + return true; + + // only test if there are 3 or more corners + CV_Assert( _board->chessboardCorners.size() >= _charucoIds.getMat().total()); + + Vec point0( _board->chessboardCorners[_charucoIds.getMat().at< int >(0)].x, + _board->chessboardCorners[_charucoIds.getMat().at< int >(0)].y, 1); + + Vec point1( _board->chessboardCorners[_charucoIds.getMat().at< int >(1)].x, + _board->chessboardCorners[_charucoIds.getMat().at< int >(1)].y, 1); + + // create a line from the first two points. + Vec testLine = point0.cross(point1); + Vec testPoint(0, 0, 1); + + double divisor = sqrt(testLine[0]*testLine[0] + testLine[1]*testLine[1]); + CV_Assert(divisor != 0.0); + + // normalize the line with normal + testLine /= divisor; + + double dotProduct; + for (unsigned int i = 2; i < nCharucoCorners; i++){ + testPoint(0) = _board->chessboardCorners[_charucoIds.getMat().at< int >(i)].x; + testPoint(1) = _board->chessboardCorners[_charucoIds.getMat().at< int >(i)].y; + + // if testPoint is on testLine, dotProduct will be zero (or very, very close) + dotProduct = testPoint.dot(testLine); + + if (std::abs(dotProduct) > 1e-6){ + return false; + } + } + // no points found that were off of testLine, return true that all points collinear. + return true; +} + +} +} diff --git a/modules/aruco/src/charuco.cpp b/modules/aruco/src/charuco.cpp index b179bc63055..a61e0e70567 100644 --- a/modules/aruco/src/charuco.cpp +++ b/modules/aruco/src/charuco.cpp @@ -1,40 +1,6 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #include "precomp.hpp" #include "opencv2/aruco/charuco.hpp" @@ -45,187 +11,6 @@ namespace cv { namespace aruco { using namespace std; - - - -/** - */ -void CharucoBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { - - CV_Assert(!outSize.empty()); - CV_Assert(marginSize >= 0); - - _img.create(outSize, CV_8UC1); - _img.setTo(255); - Mat out = _img.getMat(); - Mat noMarginsImg = - out.colRange(marginSize, out.cols - marginSize).rowRange(marginSize, out.rows - marginSize); - - double totalLengthX, totalLengthY; - totalLengthX = _squareLength * _squaresX; - totalLengthY = _squareLength * _squaresY; - - // proportional transformation - double xReduction = totalLengthX / double(noMarginsImg.cols); - double yReduction = totalLengthY / double(noMarginsImg.rows); - - // determine the zone where the chessboard is placed - Mat chessboardZoneImg; - if(xReduction > yReduction) { - int nRows = int(totalLengthY / xReduction); - int rowsMargins = (noMarginsImg.rows - nRows) / 2; - chessboardZoneImg = noMarginsImg.rowRange(rowsMargins, noMarginsImg.rows - rowsMargins); - } else { - int nCols = int(totalLengthX / yReduction); - int colsMargins = (noMarginsImg.cols - nCols) / 2; - chessboardZoneImg = noMarginsImg.colRange(colsMargins, noMarginsImg.cols - colsMargins); - } - - // determine the margins to draw only the markers - // take the minimum just to be sure - double squareSizePixels = min(double(chessboardZoneImg.cols) / double(_squaresX), - double(chessboardZoneImg.rows) / double(_squaresY)); - - double diffSquareMarkerLength = (_squareLength - _markerLength) / 2; - int diffSquareMarkerLengthPixels = - int(diffSquareMarkerLength * squareSizePixels / _squareLength); - - // draw markers - Mat markersImg; - aruco::_drawPlanarBoardImpl(this, chessboardZoneImg.size(), markersImg, - diffSquareMarkerLengthPixels, borderBits); - - markersImg.copyTo(chessboardZoneImg); - - // now draw black squares - for(int y = 0; y < _squaresY; y++) { - for(int x = 0; x < _squaresX; x++) { - - if(y % 2 != x % 2) continue; // white corner, dont do anything - - double startX, startY; - startX = squareSizePixels * double(x); - startY = squareSizePixels * double(y); - - Mat squareZone = chessboardZoneImg.rowRange(int(startY), int(startY + squareSizePixels)) - .colRange(int(startX), int(startX + squareSizePixels)); - - squareZone.setTo(0); - } - } -} - - - -/** - */ -Ptr CharucoBoard::create(int squaresX, int squaresY, float squareLength, - float markerLength, const Ptr &dictionary) { - - CV_Assert(squaresX > 1 && squaresY > 1 && markerLength > 0 && squareLength > markerLength); - Ptr res = makePtr(); - - res->_squaresX = squaresX; - res->_squaresY = squaresY; - res->_squareLength = squareLength; - res->_markerLength = markerLength; - res->dictionary = dictionary; - - float diffSquareMarkerLength = (squareLength - markerLength) / 2; - - // calculate Board objPoints - for(int y = 0; y < squaresY; y++) { - for(int x = 0; x < squaresX; x++) { - - if(y % 2 == x % 2) continue; // black corner, no marker here - - vector corners(4); - corners[0] = Point3f(x * squareLength + diffSquareMarkerLength, - y * squareLength + diffSquareMarkerLength, 0); - corners[1] = corners[0] + Point3f(markerLength, 0, 0); - corners[2] = corners[0] + Point3f(markerLength, markerLength, 0); - corners[3] = corners[0] + Point3f(0, markerLength, 0); - res->objPoints.push_back(corners); - // first ids in dictionary - int nextId = (int)res->ids.size(); - res->ids.push_back(nextId); - } - } - - // now fill chessboardCorners - for(int y = 0; y < squaresY - 1; y++) { - for(int x = 0; x < squaresX - 1; x++) { - Point3f corner; - corner.x = (x + 1) * squareLength; - corner.y = (y + 1) * squareLength; - corner.z = 0; - res->chessboardCorners.push_back(corner); - } - } - res->rightBottomBorder = Point3f(squaresX * squareLength, - squaresY * squareLength, 0.f); - res->_getNearestMarkerCorners(); - - return res; -} - - - -/** - * Fill nearestMarkerIdx and nearestMarkerCorners arrays - */ -void CharucoBoard::_getNearestMarkerCorners() { - - nearestMarkerIdx.resize(chessboardCorners.size()); - nearestMarkerCorners.resize(chessboardCorners.size()); - - unsigned int nMarkers = (unsigned int)ids.size(); - unsigned int nCharucoCorners = (unsigned int)chessboardCorners.size(); - for(unsigned int i = 0; i < nCharucoCorners; i++) { - double minDist = -1; // distance of closest markers - Point3f charucoCorner = chessboardCorners[i]; - for(unsigned int j = 0; j < nMarkers; j++) { - // calculate distance from marker center to charuco corner - Point3f center = Point3f(0, 0, 0); - for(unsigned int k = 0; k < 4; k++) - center += objPoints[j][k]; - center /= 4.; - double sqDistance; - Point3f distVector = charucoCorner - center; - sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; - if(j == 0 || fabs(sqDistance - minDist) < cv::pow(0.01 * _squareLength, 2)) { - // if same minimum distance (or first iteration), add to nearestMarkerIdx vector - nearestMarkerIdx[i].push_back(j); - minDist = sqDistance; - } else if(sqDistance < minDist) { - // if finding a closest marker to the charuco corner - nearestMarkerIdx[i].clear(); // remove any previous added marker - nearestMarkerIdx[i].push_back(j); // add the new closest marker index - minDist = sqDistance; - } - } - - // for each of the closest markers, search the marker corner index closer - // to the charuco corner - for(unsigned int j = 0; j < nearestMarkerIdx[i].size(); j++) { - nearestMarkerCorners[i].resize(nearestMarkerIdx[i].size()); - double minDistCorner = -1; - for(unsigned int k = 0; k < 4; k++) { - double sqDistance; - Point3f distVector = charucoCorner - objPoints[nearestMarkerIdx[i][j]][k]; - sqDistance = distVector.x * distVector.x + distVector.y * distVector.y; - if(k == 0 || sqDistance < minDistCorner) { - // if this corner is closer to the charuco corner, assing its index - // to nearestMarkerCorners - minDistCorner = sqDistance; - nearestMarkerCorners[i][j] = k; - } - } - } - } -} - - /** * Remove charuco corners if any of their minMarkers closest markers has not been detected */ @@ -246,7 +31,7 @@ static int _filterCornersWithoutMinMarkers(const Ptr &_board, int totalMarkers = 0; // nomber of closest marker detected // look for closest markers for(unsigned int m = 0; m < _board->nearestMarkerIdx[currentCharucoId].size(); m++) { - int markerId = _board->ids[_board->nearestMarkerIdx[currentCharucoId][m]]; + int markerId = _board->getIds()[_board->nearestMarkerIdx[currentCharucoId][m]]; bool found = false; for(unsigned int k = 0; k < _allArucoIds.getMat().total(); k++) { if(_allArucoIds.getMat().at< int >(k) == markerId) { @@ -356,7 +141,7 @@ static void _getMaximumSubPixWindowSizes(InputArrayOfArrays markerCorners, Input // calculate the distance to each of the closest corner of each closest marker for(unsigned int j = 0; j < board->nearestMarkerIdx[i].size(); j++) { // find marker - int markerId = board->ids[board->nearestMarkerIdx[i][j]]; + int markerId = board->getIds()[board->nearestMarkerIdx[i][j]]; int markerIdx = -1; for(unsigned int k = 0; k < markerIds.getMat().total(); k++) { if(markerIds.getMat().at< int >(k) == markerId) { @@ -388,7 +173,6 @@ static void _getMaximumSubPixWindowSizes(InputArrayOfArrays markerCorners, Input } - /** * Interpolate charuco corners using approximated pose estimation */ @@ -432,7 +216,6 @@ static int _interpolateCornersCharucoApproxCalib(InputArrayOfArrays _markerCorne } - /** * Interpolate charuco corners using local homography */ @@ -454,16 +237,18 @@ static int _interpolateCornersCharucoLocalHom(InputArrayOfArrays _markerCorners, vector< bool > validTransform(nMarkers, false); + const auto& ids = _board->getIds(); for(unsigned int i = 0; i < nMarkers; i++) { - vector< Point2f > markerObjPoints2D; - int markerId = _markerIds.getMat().at< int >(i); - vector< int >::const_iterator it = find(_board->ids.begin(), _board->ids.end(), markerId); - if(it == _board->ids.end()) continue; - int boardIdx = (int)std::distance::const_iterator>(_board->ids.begin(), it); + vector markerObjPoints2D; + int markerId = _markerIds.getMat().at(i); + + auto it = find(ids.begin(), ids.end(), markerId); + if(it == ids.end()) continue; + auto boardIdx = it - ids.begin(); markerObjPoints2D.resize(4); for(unsigned int j = 0; j < 4; j++) markerObjPoints2D[j] = - Point2f(_board->objPoints[boardIdx][j].x, _board->objPoints[boardIdx][j].y); + Point2f(_board->getObjPoints()[boardIdx][j].x, _board->getObjPoints()[boardIdx][j].y); transformations[i] = getPerspectiveTransform(markerObjPoints2D, _markerCorners.getMat(i)); @@ -482,7 +267,7 @@ static int _interpolateCornersCharucoLocalHom(InputArrayOfArrays _markerCorners, vector< Point2f > interpolatedPositions; for(unsigned int j = 0; j < _board->nearestMarkerIdx[i].size(); j++) { - int markerId = _board->ids[_board->nearestMarkerIdx[i][j]]; + int markerId = _board->getIds()[_board->nearestMarkerIdx[i][j]]; int markerIdx = -1; for(unsigned int k = 0; k < _markerIds.getMat().total(); k++) { if(_markerIds.getMat().at< int >(k) == markerId) { @@ -524,9 +309,6 @@ static int _interpolateCornersCharucoLocalHom(InputArrayOfArrays _markerCorners, } - -/** - */ int interpolateCornersCharuco(InputArrayOfArrays _markerCorners, InputArray _markerIds, InputArray _image, const Ptr &_board, OutputArray _charucoCorners, OutputArray _charucoIds, @@ -550,9 +332,6 @@ int interpolateCornersCharuco(InputArrayOfArrays _markerCorners, InputArray _mar } - -/** - */ void drawDetectedCornersCharuco(InputOutputArray _image, InputArray _charucoCorners, InputArray _charucoIds, Scalar cornerColor) { @@ -580,128 +359,10 @@ void drawDetectedCornersCharuco(InputOutputArray _image, InputArray _charucoCorn } -/** - * Check if a set of 3d points are enough for calibration. Z coordinate is ignored. - * Only axis parallel lines are considered - */ -static bool _arePointsEnoughForPoseEstimation(const vector< Point3f > &points) { - - if(points.size() < 4) return false; - - vector< double > sameXValue; // different x values in points - vector< int > sameXCounter; // number of points with the x value in sameXValue - for(unsigned int i = 0; i < points.size(); i++) { - bool found = false; - for(unsigned int j = 0; j < sameXValue.size(); j++) { - if(sameXValue[j] == points[i].x) { - found = true; - sameXCounter[j]++; - } - } - if(!found) { - sameXValue.push_back(points[i].x); - sameXCounter.push_back(1); - } - } - - // count how many x values has more than 2 points - int moreThan2 = 0; - for(unsigned int i = 0; i < sameXCounter.size(); i++) { - if(sameXCounter[i] >= 2) moreThan2++; - } - - // if we have more than 1 two xvalues with more than 2 points, calibration is ok - if(moreThan2 > 1) - return true; - else - return false; -} - - -/** - */ -bool estimatePoseCharucoBoard(InputArray _charucoCorners, InputArray _charucoIds, - const Ptr &_board, InputArray _cameraMatrix, InputArray _distCoeffs, - InputOutputArray _rvec, InputOutputArray _tvec, bool useExtrinsicGuess) { - - CV_Assert((_charucoCorners.getMat().total() == _charucoIds.getMat().total())); - - // need, at least, 4 corners - if(_charucoIds.getMat().total() < 4) return false; - - vector< Point3f > objPoints; - objPoints.reserve(_charucoIds.getMat().total()); - for(unsigned int i = 0; i < _charucoIds.getMat().total(); i++) { - int currId = _charucoIds.getMat().at< int >(i); - CV_Assert(currId >= 0 && currId < (int)_board->chessboardCorners.size()); - objPoints.push_back(_board->chessboardCorners[currId]); - } - - // points need to be in different lines, check if detected points are enough - if(!_arePointsEnoughForPoseEstimation(objPoints)) return false; - - solvePnP(objPoints, _charucoCorners, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); - - return true; -} - - - - -/** - */ -double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, - const Ptr &_board, Size imageSize, - InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, - OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, - OutputArray _stdDeviationsIntrinsics, - OutputArray _stdDeviationsExtrinsics, - OutputArray _perViewErrors, - int flags, TermCriteria criteria) { - - CV_Assert(_charucoIds.total() > 0 && (_charucoIds.total() == _charucoCorners.total())); - - // Join object points of charuco corners in a single vector for calibrateCamera() function - vector< vector< Point3f > > allObjPoints; - allObjPoints.resize(_charucoIds.total()); - for(unsigned int i = 0; i < _charucoIds.total(); i++) { - unsigned int nCorners = (unsigned int)_charucoIds.getMat(i).total(); - CV_Assert(nCorners > 0 && nCorners == _charucoCorners.getMat(i).total()); - allObjPoints[i].reserve(nCorners); - - for(unsigned int j = 0; j < nCorners; j++) { - int pointId = _charucoIds.getMat(i).at< int >(j); - CV_Assert(pointId >= 0 && pointId < (int)_board->chessboardCorners.size()); - allObjPoints[i].push_back(_board->chessboardCorners[pointId]); - } - } - - return calibrateCamera(allObjPoints, _charucoCorners, imageSize, _cameraMatrix, _distCoeffs, - _rvecs, _tvecs, _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, - _perViewErrors, flags, criteria); -} - - - -/** - */ -double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds, - const Ptr &_board, Size imageSize, - InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, - OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, - TermCriteria criteria) { - return calibrateCameraCharuco(_charucoCorners, _charucoIds, _board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, - _tvecs, noArray(), noArray(), noArray(), flags, criteria); -} - - -/** - */ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, InputArray _markerIds, float squareMarkerLengthRate, OutputArrayOfArrays _diamondCorners, OutputArray _diamondIds, InputArray _cameraMatrix, InputArray _distCoeffs, Ptr dictionary) { - CV_Assert(_markerIds.total() > 0 && _markerIds.total() == _markerCorners.total()); const float minRepDistanceRate = 1.302455f; @@ -758,20 +419,19 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, } } if(candidates.size() < 3) break; // we need at least 3 free markers - // modify charuco layout id to make sure all the ids are different than current id for(int k = 1; k < 4; k++) - _charucoDiamondLayout->ids[k] = currentId + 1 + k; + _charucoDiamondLayout->changeId(k, currentId + 1 + k); // current id is assigned to [0], so it is the marker on the top - _charucoDiamondLayout->ids[0] = currentId; + _charucoDiamondLayout->changeId(0, currentId); // try to find the rest of markers in the diamond vector< int > acceptedIdxs; Ptr _b = _charucoDiamondLayout.staticCast(); - aruco::refineDetectedMarkers(grey, _b, - currentMarker, currentMarkerId, - candidates, noArray(), noArray(), minRepDistance, -1, false, - acceptedIdxs); + Ptr refineParameters = makePtr(minRepDistance, -1, false); + ArucoDetector detector(dictionary, DetectorParameters::create(), refineParameters); + detector.refineDetectedMarkers(grey, _b, currentMarker, currentMarkerId, candidates, noArray(), noArray(), + acceptedIdxs); // if found, we have a diamond if(currentMarker.size() == 4) { @@ -826,13 +486,8 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, } - - -/** - */ void drawCharucoDiamond(const Ptr &dictionary, Vec4i ids, int squareLength, int markerLength, OutputArray _img, int marginSize, int borderBits) { - CV_Assert(squareLength > 0 && markerLength > 0 && squareLength > markerLength); CV_Assert(marginSize >= 0 && borderBits > 0); @@ -842,19 +497,15 @@ void drawCharucoDiamond(const Ptr &dictionary, Vec4i ids, int square // assign the charuco marker ids for(int i = 0; i < 4; i++) - board->ids[i] = ids[i]; + board->changeId(i, ids[i]); Size outSize(3 * squareLength + 2 * marginSize, 3 * squareLength + 2 * marginSize); board->draw(outSize, _img, marginSize, borderBits); } -/** - */ void drawDetectedDiamonds(InputOutputArray _image, InputArrayOfArrays _corners, InputArray _ids, Scalar borderColor) { - - CV_Assert(_image.getMat().total() != 0 && (_image.getMat().channels() == 1 || _image.getMat().channels() == 3)); CV_Assert((_corners.total() == _ids.total()) || _ids.total() == 0); @@ -895,59 +546,5 @@ void drawDetectedDiamonds(InputOutputArray _image, InputArrayOfArrays _corners, } } -/** - @param board layout of ChArUco board. - * @param image charucoIds list of identifiers for each corner in charucoCorners. - * @return bool value, 1 (true) for detected corners form a line, 0 for non-linear. - solvePnP will fail if the corners are collinear (true). - * Check that the set of charuco markers in _charucoIds does not identify a straight line on - the charuco board. Axis parallel, as well as diagonal and other straight lines detected. - */ - bool testCharucoCornersCollinear(const Ptr &_board, InputArray _charucoIds){ - - unsigned int nCharucoCorners = (unsigned int)_charucoIds.getMat().total(); - - if (nCharucoCorners <= 2) - return true; - - // only test if there are 3 or more corners - CV_Assert( _board->chessboardCorners.size() >= _charucoIds.getMat().total()); - - Vec point0( _board->chessboardCorners[_charucoIds.getMat().at< int >(0)].x, - _board->chessboardCorners[_charucoIds.getMat().at< int >(0)].y, - 1); - - Vec point1( _board->chessboardCorners[_charucoIds.getMat().at< int >(1)].x, - _board->chessboardCorners[_charucoIds.getMat().at< int >(1)].y, - 1); - - // create a line from the first two points. - Vec testLine = point0.cross(point1); - - Vec testPoint(0, 0, 1); - - double divisor = sqrt(testLine[0]*testLine[0] + testLine[1]*testLine[1]); - - CV_Assert( divisor != 0); - - // normalize the line with normal - testLine /= divisor; - - double dotProduct; - for (unsigned int i = 2; i < nCharucoCorners; i++){ - testPoint(0) = _board->chessboardCorners[_charucoIds.getMat().at< int >(i)].x; - testPoint(1) = _board->chessboardCorners[_charucoIds.getMat().at< int >(i)].y; - - // if testPoint is on testLine, dotProduct will be zero (or very, very close) - dotProduct = testPoint.dot(testLine); - - if (std::abs(dotProduct) > 1e-6){ - return false; - } - } - - // no points found that were off of testLine, return true that all points collinear. - return true; -} } } diff --git a/modules/aruco/src/dictionary.cpp b/modules/aruco/src/dictionary.cpp index 20d2526a47d..5672538f293 100644 --- a/modules/aruco/src/dictionary.cpp +++ b/modules/aruco/src/dictionary.cpp @@ -1,57 +1,22 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html -#include "precomp.hpp" -#include "opencv2/aruco/dictionary.hpp" -#include #include -#include "predefined_dictionaries.hpp" -#include "predefined_dictionaries_apriltag.hpp" #include "opencv2/core/hal/hal.hpp" +#include "precomp.hpp" +#include "aruco_utils.hpp" +#include "predefined_dictionaries.hpp" +#include "apriltag/predefined_dictionaries_apriltag.hpp" +#include + namespace cv { namespace aruco { using namespace std; -/** - */ Dictionary::Dictionary(const Ptr &_dictionary) { markerSize = _dictionary->markerSize; maxCorrectionBits = _dictionary->maxCorrectionBits; @@ -59,8 +24,6 @@ Dictionary::Dictionary(const Ptr &_dictionary) { } -/** - */ Dictionary::Dictionary(const Mat &_bytesList, int _markerSize, int _maxcorr) { markerSize = _markerSize; maxCorrectionBits = _maxcorr; @@ -68,54 +31,40 @@ Dictionary::Dictionary(const Mat &_bytesList, int _markerSize, int _maxcorr) { } -/** - */ Ptr Dictionary::create(int nMarkers, int markerSize, int randomSeed) { const Ptr baseDictionary = makePtr(); return create(nMarkers, markerSize, baseDictionary, randomSeed); } -/** - */ Ptr Dictionary::create(int nMarkers, int markerSize, const Ptr &baseDictionary, int randomSeed) { - return generateCustomDictionary(nMarkers, markerSize, baseDictionary, randomSeed); } -template -static inline bool readParameter(const FileNode& node, T& parameter) -{ - if (!node.empty()) { - node >> parameter; - return true; - } - return false; -} -bool Dictionary::readDictionary(const cv::FileNode& fn) -{ +bool Dictionary::readDictionary(const cv::FileNode& fn) { int nMarkers = 0, _markerSize = 0; - if (fn.empty() || !readParameter(fn["nmarkers"], nMarkers) || !readParameter(fn["markersize"], _markerSize)) + if (fn.empty() || !readParameter("nmarkers", nMarkers, fn) || !readParameter("markersize", _markerSize, fn)) return false; Mat bytes(0, 0, CV_8UC1), marker(_markerSize, _markerSize, CV_8UC1); std::string markerString; for (int i = 0; i < nMarkers; i++) { std::ostringstream ostr; ostr << i; - if (!readParameter(fn["marker_" + ostr.str()], markerString)) + if (!readParameter("marker_" + ostr.str(), markerString, fn)) return false; for (int j = 0; j < (int) markerString.size(); j++) marker.at(j) = (markerString[j] == '0') ? 0 : 1; bytes.push_back(Dictionary::getByteListFromBits(marker)); } int _maxCorrectionBits = 0; - readParameter(fn["maxCorrectionBits"], _maxCorrectionBits); + readParameter("maxCorrectionBits", _maxCorrectionBits, fn); *this = Dictionary(bytes, _markerSize, _maxCorrectionBits); return true; } + void Dictionary::writeDictionary(Ptr& fs) { *fs << "nmarkers" << bytesList.rows; *fs << "markersize" << markerSize; @@ -133,18 +82,13 @@ void Dictionary::writeDictionary(Ptr& fs) { } } -/** - */ + Ptr Dictionary::get(int dict) { return getPredefinedDictionary(dict); } -/** - */ -bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation, - double maxCorrectionRate) const { - +bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation, double maxCorrectionRate) const { CV_Assert(onlyBits.rows == markerSize && onlyBits.cols == markerSize); int maxCorrectionRecalculed = int(double(maxCorrectionBits) * maxCorrectionRate); @@ -182,8 +126,6 @@ bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation, } -/** - */ int Dictionary::getDistanceToId(InputArray bits, int id, bool allRotations) const { CV_Assert(id >= 0 && id < bytesList.rows); @@ -207,12 +149,7 @@ int Dictionary::getDistanceToId(InputArray bits, int id, bool allRotations) cons } - -/** - * @brief Draw a canonical marker image - */ void Dictionary::drawMarker(int id, int sidePixels, OutputArray _img, int borderBits) const { - CV_Assert(sidePixels >= (markerSize + 2*borderBits)); CV_Assert(id < bytesList.rows); CV_Assert(borderBits > 0); @@ -234,11 +171,6 @@ void Dictionary::drawMarker(int id, int sidePixels, OutputArray _img, int border } - - -/** - * @brief Transform matrix of bits to list of bytes in the 4 rotations - */ Mat Dictionary::getByteListFromBits(const Mat &bits) { // integer ceil int nbytes = (bits.cols * bits.rows + 8 - 1) / 8; @@ -277,10 +209,6 @@ Mat Dictionary::getByteListFromBits(const Mat &bits) { } - -/** - * @brief Transform list of bytes to matrix of bits - */ Mat Dictionary::getBitsFromByteList(const Mat &byteList, int markerSize) { CV_Assert(byteList.total() > 0 && byteList.total() >= (unsigned int)markerSize * markerSize / 8 && @@ -315,9 +243,7 @@ Mat Dictionary::getBitsFromByteList(const Mat &byteList, int markerSize) { } - -Ptr getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME name) -{ +Ptr getPredefinedDictionary(PREDEFINED_DICTIONARY_NAME name) { // DictionaryData constructors calls // moved out of globals so construted on first use, which allows lazy-loading of opencv dll static const Dictionary DICT_ARUCO_DATA = Dictionary(Mat(1024, (5 * 5 + 7) / 8, CV_8UC4, (uchar*)DICT_ARUCO_BYTES), 5, 0); @@ -438,8 +364,7 @@ static int _getSelfDistance(const Mat &marker) { return minHamming; } -/** - */ + Ptr generateCustomDictionary(int nMarkers, int markerSize, const Ptr &baseDictionary, int randomSeed) { RNG rng((uint64)(randomSeed)); @@ -530,8 +455,6 @@ Ptr generateCustomDictionary(int nMarkers, int markerSize, } -/** - */ Ptr generateCustomDictionary(int nMarkers, int markerSize, int randomSeed) { Ptr baseDictionary = makePtr(); return generateCustomDictionary(nMarkers, markerSize, baseDictionary, randomSeed); diff --git a/modules/aruco/src/precomp.hpp b/modules/aruco/src/precomp.hpp index b45771a6644..955d9f3ed8a 100644 --- a/modules/aruco/src/precomp.hpp +++ b/modules/aruco/src/precomp.hpp @@ -1,43 +1,6 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// - // - // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. - // - // By downloading, copying, installing or using the software you agree to this license. - // If you do not agree to this license, do not download, install, - // copy or use the software. - // - // - // License Agreement - // For Open Source Computer Vision Library - // - // Copyright (C) 2014, OpenCV Foundation, all rights reserved. - // Third party copyrights are property of their respective owners. - // - // Redistribution and use in source and binary forms, with or without modification, - // are permitted provided that the following conditions are met: - // - // * Redistribution's of source code must retain the above copyright notice, - // this list of conditions and the following disclaimer. - // - // * Redistribution's in binary form must reproduce the above copyright notice, - // this list of conditions and the following disclaimer in the documentation - // and/or other materials provided with the distribution. - // - // * The name of the copyright holders may not be used to endorse or promote products - // derived from this software without specific prior written permission. - // - // This software is provided by the copyright holders and contributors "as is" and - // any express or implied warranties, including, but not limited to, the implied - // warranties of merchantability and fitness for a particular purpose are disclaimed. - // In no event shall the Intel Corporation or contributors be liable for any direct, - // indirect, incidental, special, exemplary, or consequential damages - // (including, but not limited to, procurement of substitute goods or services; - // loss of use, data, or profits; or business interruption) however caused - // and on any theory of liability, whether in contract, strict liability, - // or tort (including negligence or otherwise) arising in any way out of - // the use of this software, even if advised of the possibility of such damage. - // - //M*/ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html #ifndef __OPENCV_CCALIB_PRECOMP__ #define __OPENCV_CCALIB_PRECOMP__ diff --git a/modules/aruco/src/predefined_dictionaries.hpp b/modules/aruco/src/predefined_dictionaries.hpp index ce2db45947a..f3431830598 100644 --- a/modules/aruco/src/predefined_dictionaries.hpp +++ b/modules/aruco/src/predefined_dictionaries.hpp @@ -1,40 +1,6 @@ -/* -By downloading, copying, installing or using the software you agree to this -license. If you do not agree to this license, do not download, install, -copy or use the software. - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2013, OpenCV Foundation, all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as is" and -any express or implied warranties, including, but not limited to, the implied -warranties of merchantability and fitness for a particular purpose are -disclaimed. In no event shall copyright holders or contributors be liable for -any direct, indirect, incidental, special, exemplary, or consequential damages -(including, but not limited to, procurement of substitute goods or services; -loss of use, data, or profits; or business interruption) however caused -and on any theory of liability, whether in contract, strict liability, -or tort (including negligence or otherwise) arising in any way out of -the use of this software, even if advised of the possibility of such damage. -*/ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html namespace { diff --git a/modules/aruco/test/test_aruco_utils.hpp b/modules/aruco/test/test_aruco_utils.hpp index 033d8f9bb4f..9ae94f789ef 100644 --- a/modules/aruco/test/test_aruco_utils.hpp +++ b/modules/aruco/test/test_aruco_utils.hpp @@ -65,16 +65,16 @@ static inline void projectMarker(Mat& img, Ptr board, int markerIn // canonical image Mat markerImg; const int markerSizePixels = 100; - aruco::drawMarker(board->dictionary, board->ids[markerIndex], markerSizePixels, markerImg, markerBorder); + aruco::drawMarker(board->getDictionary(), board->getIds()[markerIndex], markerSizePixels, markerImg, markerBorder); // projected corners Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0)); - vector< Point2f > corners; + vector corners; // get max coordinate of board - Point3f maxCoord = board->rightBottomBorder; + Point3f maxCoord = board->getRightBottomBorder(); // copy objPoints - vector objPoints = board->objPoints[markerIndex]; + vector objPoints = board->getObjPoints()[markerIndex]; // move the marker to the origin for (size_t i = 0; i < objPoints.size(); i++) objPoints[i] -= (maxCoord / 2.f); @@ -82,7 +82,7 @@ static inline void projectMarker(Mat& img, Ptr board, int markerIn projectPoints(objPoints, rvec, tvec, cameraMatrix, distCoeffs, corners); // get perspective transform - vector< Point2f > originalCorners; + vector originalCorners; originalCorners.push_back(Point2f(0, 0)); originalCorners.push_back(Point2f((float)markerSizePixels, 0)); originalCorners.push_back(Point2f((float)markerSizePixels, (float)markerSizePixels)); @@ -115,7 +115,7 @@ static inline Mat projectBoard(Ptr& board, Mat cameraMatrix, d getSyntheticRT(yaw, pitch, distance, rvec, tvec); Mat img = Mat(imageSize, CV_8UC1, Scalar::all(255)); - for (unsigned int index = 0; index < board->ids.size(); index++) { + for (unsigned int index = 0; index < board->getIds().size(); index++) { projectMarker(img, board.staticCast(), index, cameraMatrix, rvec, tvec, markerBorder); } diff --git a/modules/aruco/test/test_arucodetection.cpp b/modules/aruco/test/test_arucodetection.cpp index c2715cf2d51..78ec99cc054 100644 --- a/modules/aruco/test/test_arucodetection.cpp +++ b/modules/aruco/test/test_arucodetection.cpp @@ -57,8 +57,7 @@ CV_ArucoDetectionSimple::CV_ArucoDetectionSimple() {} void CV_ArucoDetectionSimple::run(int) { - - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250)); // 20 images for(int i = 0; i < 20; i++) { @@ -74,7 +73,7 @@ void CV_ArucoDetectionSimple::run(int) { for(int x = 0; x < 2; x++) { Mat marker; int id = i * 4 + y * 2 + x; - aruco::drawMarker(dictionary, id, markerSidePixels, marker); + aruco::drawMarker(detector.dictionary, id, markerSidePixels, marker); Point2f firstCorner = Point2f(markerSidePixels / 2.f + x * (1.5f * markerSidePixels), markerSidePixels / 2.f + y * (1.5f * markerSidePixels)); @@ -95,9 +94,8 @@ void CV_ArucoDetectionSimple::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); // check detection results for(unsigned int m = 0; m < groundTruthIds.size(); m++) { @@ -277,7 +275,9 @@ void CV_ArucoDetectionPerspective::run(int) { cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; cameraMatrix.at< double >(1, 2) = imgSize.height / 2; - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr params = aruco::DetectorParameters::create(); + params->minDistanceToBorder = 1; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); // detect from different positions for(double distance = 0.1; distance < 0.7; distance += 0.2) { @@ -288,13 +288,11 @@ void CV_ArucoDetectionPerspective::run(int) { iter++; vector< Point2f > groundTruthCorners; - Ptr params = aruco::DetectorParameters::create(); - params->minDistanceToBorder = 1; params->markerBorderBits = markerBorder; /// create synthetic image Mat img= - projectMarker(dictionary, currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch), + projectMarker(detector.dictionary, currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch), distance, imgSize, markerBorder, groundTruthCorners, szEnclosed); // marker :: Inverted if(ArucoAlgParams::DETECT_INVERTED_MARKER == arucoAlgParams){ @@ -314,7 +312,7 @@ void CV_ArucoDetectionPerspective::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); // check results if(ids.size() != 1 || (ids.size() == 1 && ids[0] != currentId)) { @@ -360,8 +358,8 @@ CV_ArucoDetectionMarkerSize::CV_ArucoDetectionMarkerSize() {} void CV_ArucoDetectionMarkerSize::run(int) { - - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr params = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); int markerSide = 20; int imageSize = 200; @@ -372,17 +370,16 @@ void CV_ArucoDetectionMarkerSize::run(int) { // create synthetic image Mat img = Mat(imageSize, imageSize, CV_8UC1, Scalar::all(255)); - aruco::drawMarker(dictionary, id, markerSide, marker); + aruco::drawMarker(detector.dictionary, id, markerSide, marker); Mat aux = img.colRange(30, 30 + markerSide).rowRange(50, 50 + markerSide); marker.copyTo(aux); vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); // set a invalid minMarkerPerimeterRate params->minMarkerPerimeterRate = min(4., (4. * markerSide) / float(imageSize) + 0.1); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); if(corners.size() != 0) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::minMarkerPerimeterRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -391,7 +388,7 @@ void CV_ArucoDetectionMarkerSize::run(int) { // set an valid minMarkerPerimeterRate params->minMarkerPerimeterRate = max(0., (4. * markerSide) / float(imageSize) - 0.1); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::minMarkerPerimeterRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -400,7 +397,7 @@ void CV_ArucoDetectionMarkerSize::run(int) { // set a invalid maxMarkerPerimeterRate params->maxMarkerPerimeterRate = min(4., (4. * markerSide) / float(imageSize) - 0.1); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); if(corners.size() != 0) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::maxMarkerPerimeterRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -409,7 +406,7 @@ void CV_ArucoDetectionMarkerSize::run(int) { // set an valid maxMarkerPerimeterRate params->maxMarkerPerimeterRate = max(0., (4. * markerSide) / float(imageSize) + 0.1); - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::maxMarkerPerimeterRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -436,30 +433,32 @@ CV_ArucoBitCorrection::CV_ArucoBitCorrection() {} void CV_ArucoBitCorrection::run(int) { - Ptr _dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - aruco::Dictionary &dictionary = *_dictionary; - aruco::Dictionary dictionary2 = *_dictionary; + Ptr _dictionary1 = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr _dictionary2 = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + aruco::Dictionary &dictionary1 = *_dictionary1; + aruco::Dictionary &dictionary2 = *_dictionary2; + Ptr params = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector1(_dictionary1, params); int markerSide = 50; int imageSize = 150; - Ptr params = aruco::DetectorParameters::create(); // 10 markers for(int l = 0; l < 10; l++) { Mat marker; int id = 10 + l * 20; - Mat currentCodeBytes = dictionary.bytesList.rowRange(id, id + 1); + Mat currentCodeBytes = dictionary1.bytesList.rowRange(id, id + 1); // 5 valid cases for(int i = 0; i < 5; i++) { // how many bit errors (the error is low enough so it can be corrected) params->errorCorrectionRate = 0.2 + i * 0.1; int errors = - (int)std::floor(dictionary.maxCorrectionBits * params->errorCorrectionRate - 1.); + (int)std::floor(dictionary1.maxCorrectionBits * params->errorCorrectionRate - 1.); // create erroneous marker in currentCodeBits Mat currentCodeBits = - aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary.markerSize); + aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary1.markerSize); for(int e = 0; e < errors; e++) { currentCodeBits.ptr< unsigned char >()[2 * e] = !currentCodeBits.ptr< unsigned char >()[2 * e]; @@ -476,7 +475,7 @@ void CV_ArucoBitCorrection::run(int) { // try to detect using original dictionary vector< vector< Point2f > > corners; vector< int > ids; - aruco::detectMarkers(img, _dictionary, corners, ids, params); + detector1.detectMarkers(img, corners, ids); if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) { ts->printf(cvtest::TS::LOG, "Error in bit correction"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -489,11 +488,11 @@ void CV_ArucoBitCorrection::run(int) { // how many bit errors (the error is too high to be corrected) params->errorCorrectionRate = 0.2 + i * 0.1; int errors = - (int)std::floor(dictionary.maxCorrectionBits * params->errorCorrectionRate + 1.); + (int)std::floor(dictionary1.maxCorrectionBits * params->errorCorrectionRate + 1.); // create erroneous marker in currentCodeBits Mat currentCodeBits = - aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary.markerSize); + aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary1.markerSize); for(int e = 0; e < errors; e++) { currentCodeBits.ptr< unsigned char >()[2 * e] = !currentCodeBits.ptr< unsigned char >()[2 * e]; @@ -502,9 +501,9 @@ void CV_ArucoBitCorrection::run(int) { // dictionary3 is only composed by the modified marker (in its original form) Ptr _dictionary3 = makePtr( dictionary2.bytesList.rowRange(id, id + 1).clone(), - dictionary.markerSize, - dictionary.maxCorrectionBits); - + dictionary1.markerSize, + dictionary1.maxCorrectionBits); + aruco::ArucoDetector detector3(_dictionary3, params); // add erroneous marker to dictionary2 in order to create the erroneous marker image Mat currentCodeBytesError = aruco::Dictionary::getByteListFromBits(currentCodeBits); currentCodeBytesError.copyTo(dictionary2.bytesList.rowRange(id, id + 1)); @@ -516,7 +515,7 @@ void CV_ArucoBitCorrection::run(int) { // try to detect using dictionary3, it should fail vector< vector< Point2f > > corners; vector< int > ids; - aruco::detectMarkers(img, _dictionary3, corners, ids, params); + detector3.detectMarkers(img, corners, ids); if(corners.size() != 0) { ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::errorCorrectionRate"); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); @@ -569,8 +568,7 @@ TEST(CV_ArucoTutorial, can_find_singlemarkersoriginal) { string img_path = cvtest::findDataFile("singlemarkersoriginal.jpg", false); Mat image = imread(img_path); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250)); vector< int > ids; vector< vector< Point2f > > corners, rejected; @@ -584,7 +582,7 @@ TEST(CV_ArucoTutorial, can_find_singlemarkersoriginal) for (size_t i = 0; i < N; i++) mapGoldCorners[goldCornersIds[i]] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); ASSERT_EQ(N, ids.size()); for (size_t i = 0; i < N; i++) @@ -609,9 +607,10 @@ TEST(CV_ArucoTutorial, can_find_gboriginal) FileStorage fs(dictPath, FileStorage::READ); dictionary->aruco::Dictionary::readDictionary(fs.root()); // set marker from tutorial_dict.yml - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(dictionary, detectorParams); + vector< int > ids; vector< vector< Point2f > > corners, rejected; const size_t N = 35ull; @@ -638,7 +637,7 @@ TEST(CV_ArucoTutorial, can_find_gboriginal) for (int i = 0; i < static_cast(N); i++) mapGoldCorners[i] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); ASSERT_EQ(N, ids.size()); @@ -657,8 +656,7 @@ TEST(CV_ArucoTutorial, can_find_gboriginal) TEST(CV_ArucoDetectMarkers, regression_3192) { - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_4X4_50); - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_4X4_50)); vector< int > markerIds; vector > markerCorners; string imgPath = cvtest::findDataFile("aruco/regression_3192.png"); @@ -670,7 +668,7 @@ TEST(CV_ArucoDetectMarkers, regression_3192) for (size_t i = 0; i < N; i++) mapGoldCorners[goldCornersIds[i]] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, markerCorners, markerIds, detectorParams); + detector.detectMarkers(image, markerCorners, markerIds); ASSERT_EQ(N, markerIds.size()); for (size_t i = 0; i < N; i++) @@ -688,9 +686,8 @@ TEST(CV_ArucoDetectMarkers, regression_3192) TEST(CV_ArucoDetectMarkers, regression_2492) { - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_5X5_50); - Ptr detectorParams = aruco::DetectorParameters::create(); - detectorParams->minMarkerDistanceRate = 0.026; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_5X5_50)); + detector.params->minMarkerDistanceRate = 0.026; vector< int > markerIds; vector > markerCorners; string imgPath = cvtest::findDataFile("aruco/regression_2492.png"); @@ -705,7 +702,7 @@ TEST(CV_ArucoDetectMarkers, regression_2492) for (size_t i = 0; i < N; i++) mapGoldCorners[goldCornersIds[i]].push_back(goldCorners[i]); - aruco::detectMarkers(image, dictionary, markerCorners, markerIds, detectorParams); + detector.detectMarkers(image, markerCorners, markerIds); ASSERT_EQ(N, markerIds.size()); for (size_t i = 0; i < N; i++) @@ -746,11 +743,10 @@ struct ArucoThreading: public testing::TestWithParam params = cv::aruco::DetectorParameters::create(); // We are not testing against different dictionaries // As we are interested mostly in small images, smaller // markers is better -> 4x4 - cv::Ptr dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_4X4_50)); // Height of the test image can be chosen quite freely // We aim to test against small images as in those the @@ -762,19 +758,19 @@ TEST_P(ArucoThreading, number_of_threads_does_not_change_results) // Create a test image cv::Mat img_marker; - cv::aruco::drawMarker(dictionary, 23, height_marker, img_marker, 1); + cv::aruco::drawMarker(detector.dictionary, 23, height_marker, img_marker, 1); // Copy to bigger image to get a white border cv::Mat img(height_img, height_img, CV_8UC1, cv::Scalar(255)); img_marker.copyTo(img(cv::Rect(shift, shift, height_marker, height_marker))); - params->cornerRefinementMethod = GetParam(); + detector.params->cornerRefinementMethod = GetParam(); std::vector > original_corners; std::vector original_ids; { NumThreadsSetter thread_num_setter(1); - cv::aruco::detectMarkers(img, dictionary, original_corners, original_ids, params); + detector.detectMarkers(img, original_corners, original_ids); } ASSERT_EQ(original_ids.size(), 1ull); @@ -787,7 +783,7 @@ TEST_P(ArucoThreading, number_of_threads_does_not_change_results) std::vector > corners; std::vector ids; - cv::aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.detectMarkers(img, corners, ids); // If we don't find any markers, the test is broken ASSERT_EQ(ids.size(), 1ull); diff --git a/modules/aruco/test/test_boarddetection.cpp b/modules/aruco/test/test_boarddetection.cpp index ed4ee161719..148c6d440d0 100644 --- a/modules/aruco/test/test_boarddetection.cpp +++ b/modules/aruco/test/test_boarddetection.cpp @@ -55,6 +55,8 @@ class CV_ArucoBoardPose : public cvtest::BaseTest { public: CV_ArucoBoardPose(ArucoAlgParams arucoAlgParams) { + Ptr params; + Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); params = aruco::DetectorParameters::create(); params->minDistanceToBorder = 3; if (arucoAlgParams == ArucoAlgParams::USE_ARUCO3) { @@ -63,10 +65,11 @@ class CV_ArucoBoardPose : public cvtest::BaseTest { params->minSideLengthCanonicalImg = 16; params->errorCorrectionRate = 0.8; } + detector = aruco::ArucoDetector(dictionary, params); } protected: - Ptr params; + aruco::ArucoDetector detector; void run(int); }; @@ -75,8 +78,7 @@ void CV_ArucoBoardPose::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr gridboard = aruco::GridBoard::create(3, 3, 0.02f, 0.005f, dictionary); + Ptr gridboard = aruco::GridBoard::create(3, 3, 0.02f, 0.005f, detector.dictionary); Ptr board = gridboard.staticCast(); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -87,41 +89,43 @@ void CV_ArucoBoardPose::run(int) { for(double distance = 0.2; distance <= 0.4; distance += 0.15) { for(int yaw = -55; yaw <= 50; yaw += 25) { for(int pitch = -55; pitch <= 50; pitch += 25) { - for(unsigned int i = 0; i < gridboard->ids.size(); i++) - gridboard->ids[i] = (iter + int(i)) % 250; + vector tmpIds; + for(unsigned int i = 0; i < gridboard->getIds().size(); i++) + tmpIds.push_back((iter + int(i)) % 250); + gridboard->setIds(tmpIds); int markerBorder = iter % 2 + 1; iter++; // create synthetic image Mat img = projectBoard(gridboard, cameraMatrix, deg2rad(yaw), deg2rad(pitch), distance, imgSize, markerBorder); - vector< vector< Point2f > > corners; - vector< int > ids; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params); + vector > corners; + vector ids; + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids); - ASSERT_EQ(ids.size(), gridboard->ids.size()); + ASSERT_EQ(ids.size(), gridboard->getIds().size()); // estimate pose Mat rvec, tvec; aruco::estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs, rvec, tvec); // check axes - vector axes = getAxis(cameraMatrix, distCoeffs, rvec, tvec, gridboard->rightBottomBorder.x); - vector topLeft = getMarkerById(gridboard->ids[0], corners, ids); + vector axes = getAxis(cameraMatrix, distCoeffs, rvec, tvec, gridboard->getRightBottomBorder().x); + vector topLeft = getMarkerById(gridboard->getIds()[0], corners, ids); ASSERT_NEAR(topLeft[0].x, axes[0].x, 2.f); ASSERT_NEAR(topLeft[0].y, axes[0].y, 2.f); - vector topRight = getMarkerById(gridboard->ids[2], corners, ids); + vector topRight = getMarkerById(gridboard->getIds()[2], corners, ids); ASSERT_NEAR(topRight[1].x, axes[1].x, 2.f); ASSERT_NEAR(topRight[1].y, axes[1].y, 2.f); - vector bottomLeft = getMarkerById(gridboard->ids[6], corners, ids); + vector bottomLeft = getMarkerById(gridboard->getIds()[6], corners, ids); ASSERT_NEAR(bottomLeft[3].x, axes[2].x, 2.f); ASSERT_NEAR(bottomLeft[3].y, axes[2].y, 2.f); // check estimate result for(unsigned int i = 0; i < ids.size(); i++) { int foundIdx = -1; - for(unsigned int j = 0; j < gridboard->ids.size(); j++) { - if(gridboard->ids[j] == ids[i]) { + for(unsigned int j = 0; j < gridboard->getIds().size(); j++) { + if(gridboard->getIds()[j] == ids[i]) { foundIdx = int(j); break; } @@ -134,7 +138,7 @@ void CV_ArucoBoardPose::run(int) { } vector< Point2f > projectedCorners; - projectPoints(gridboard->objPoints[foundIdx], rvec, tvec, cameraMatrix, distCoeffs, + projectPoints(gridboard->getObjPoints()[foundIdx], rvec, tvec, cameraMatrix, distCoeffs, projectedCorners); for(int c = 0; c < 4; c++) { @@ -160,15 +164,18 @@ class CV_ArucoRefine : public cvtest::BaseTest { public: CV_ArucoRefine(ArucoAlgParams arucoAlgParams) { - params = aruco::DetectorParameters::create(); + Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr params = aruco::DetectorParameters::create(); params->minDistanceToBorder = 3; params->cornerRefinementMethod = aruco::CORNER_REFINE_SUBPIX; if (arucoAlgParams == ArucoAlgParams::USE_ARUCO3) params->useAruco3Detection = true; + Ptr refineParams = makePtr(10, 3., true); + detector = aruco::ArucoDetector(dictionary, params, refineParams); } protected: - Ptr params; + aruco::ArucoDetector detector; void run(int); }; @@ -178,8 +185,7 @@ void CV_ArucoRefine::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr gridboard = aruco::GridBoard::create(3, 3, 0.02f, 0.005f, dictionary); + Ptr gridboard = aruco::GridBoard::create(3, 3, 0.02f, 0.005f, detector.dictionary); Ptr board = gridboard.staticCast(); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -190,8 +196,10 @@ void CV_ArucoRefine::run(int) { for(double distance = 0.2; distance <= 0.4; distance += 0.2) { for(int yaw = -60; yaw < 60; yaw += 30) { for(int pitch = -60; pitch <= 60; pitch += 30) { - for(unsigned int i = 0; i < gridboard->ids.size(); i++) - gridboard->ids[i] = (iter + int(i)) % 250; + vector tmpIds; + for(unsigned int i = 0; i < gridboard->getIds().size(); i++) + tmpIds.push_back(iter + int(i) % 250); + gridboard->setIds(tmpIds); int markerBorder = iter % 2 + 1; iter++; @@ -199,10 +207,10 @@ void CV_ArucoRefine::run(int) { Mat img = projectBoard(gridboard, cameraMatrix, deg2rad(yaw), deg2rad(pitch), distance, imgSize, markerBorder); // detect markers - vector< vector< Point2f > > corners, rejected; - vector< int > ids; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params, rejected); + vector > corners, rejected; + vector ids; + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids, rejected); // remove a marker from detection int markersBeforeDelete = (int)ids.size(); @@ -213,8 +221,8 @@ void CV_ArucoRefine::run(int) { ids.erase(ids.begin(), ids.begin() + 1); // try to refind the erased marker - aruco::refineDetectedMarkers(img, board, corners, ids, rejected, cameraMatrix, - distCoeffs, 10, 3., true, noArray(), params); + detector.refineDetectedMarkers(img, board, corners, ids, rejected, cameraMatrix, + distCoeffs, noArray()); // check result if((int)ids.size() < markersBeforeDelete) { @@ -260,23 +268,22 @@ TEST(CV_ArucoBoardPose, CheckNegativeZ) 0., 0., 1 }; cv::Mat cameraMatrix = cv::Mat(3, 3, CV_64F, matrixData); - cv::Ptr boardPtr(new cv::aruco::Board); + cv::Ptr boardPtr = makePtr(); cv::aruco::Board& board = *boardPtr; - board.ids.push_back(0); - board.ids.push_back(1); - - vector pts3d; - pts3d.push_back(cv::Point3f(0.326198f, -0.030621f, 0.303620f)); - pts3d.push_back(cv::Point3f(0.325340f, -0.100594f, 0.301862f)); - pts3d.push_back(cv::Point3f(0.255859f, -0.099530f, 0.293416f)); - pts3d.push_back(cv::Point3f(0.256717f, -0.029557f, 0.295174f)); - board.objPoints.push_back(pts3d); - pts3d.clear(); - pts3d.push_back(cv::Point3f(-0.033144f, -0.034819f, 0.245216f)); - pts3d.push_back(cv::Point3f(-0.035507f, -0.104705f, 0.241987f)); - pts3d.push_back(cv::Point3f(-0.105289f, -0.102120f, 0.237120f)); - pts3d.push_back(cv::Point3f(-0.102926f, -0.032235f, 0.240349f)); - board.objPoints.push_back(pts3d); + + vector pts3d1, pts3d2; + pts3d1.push_back(cv::Point3f(0.326198f, -0.030621f, 0.303620f)); + pts3d1.push_back(cv::Point3f(0.325340f, -0.100594f, 0.301862f)); + pts3d1.push_back(cv::Point3f(0.255859f, -0.099530f, 0.293416f)); + pts3d1.push_back(cv::Point3f(0.256717f, -0.029557f, 0.295174f)); + + pts3d2.push_back(cv::Point3f(-0.033144f, -0.034819f, 0.245216f)); + pts3d2.push_back(cv::Point3f(-0.035507f, -0.104705f, 0.241987f)); + pts3d2.push_back(cv::Point3f(-0.105289f, -0.102120f, 0.237120f)); + pts3d2.push_back(cv::Point3f(-0.102926f, -0.032235f, 0.240349f)); + + board.setObjPoints({pts3d1, pts3d2}); + board.setIds(vector{0, 1}); vector > corners; vector pts2d; @@ -293,12 +300,12 @@ TEST(CV_ArucoBoardPose, CheckNegativeZ) corners.push_back(pts2d); Vec3d rvec, tvec; - int nUsed = cv::aruco::estimatePoseBoard(corners, board.ids, boardPtr, cameraMatrix, Mat(), rvec, tvec); + int nUsed = cv::aruco::estimatePoseBoard(corners, board.getIds(), boardPtr, cameraMatrix, Mat(), rvec, tvec); ASSERT_EQ(nUsed, 2); cv::Matx33d rotm; cv::Point3d out; cv::Rodrigues(rvec, rotm); - out = cv::Point3d(tvec) + rotm*Point3d(board.objPoints[0][0]); + out = cv::Point3d(tvec) + rotm*Point3d(board.getObjPoints()[0][0]); ASSERT_GT(out.z, 0); corners.clear(); pts2d.clear(); @@ -314,11 +321,11 @@ TEST(CV_ArucoBoardPose, CheckNegativeZ) pts2d.push_back(cv::Point2f(586.3f, 188.5f)); corners.push_back(pts2d); - nUsed = cv::aruco::estimatePoseBoard(corners, board.ids, boardPtr, cameraMatrix, Mat(), rvec, tvec, true); + nUsed = cv::aruco::estimatePoseBoard(corners, board.getIds(), boardPtr, cameraMatrix, Mat(), rvec, tvec, true); ASSERT_EQ(nUsed, 2); cv::Rodrigues(rvec, rotm); - out = cv::Point3d(tvec) + rotm*Point3d(board.objPoints[0][0]); + out = cv::Point3d(tvec) + rotm*Point3d(board.getObjPoints()[0][0]); ASSERT_GT(out.z, 0); } diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index 91b4002d701..99b87264df5 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -93,7 +93,7 @@ static Mat projectCharucoBoard(Ptr &board, Mat cameraMatrix // project markers Mat img = Mat(imageSize, CV_8UC1, Scalar::all(255)); - for(unsigned int indexMarker = 0; indexMarker < board->ids.size(); indexMarker++) { + for(unsigned int indexMarker = 0; indexMarker < board->getIds().size(); indexMarker++) { projectMarker(img, board.staticCast(), indexMarker, cameraMatrix, rvec, tvec, markerBorder); } @@ -132,12 +132,14 @@ void CV_CharucoDetection::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, dictionary); + Ptr params = aruco::DetectorParameters::create(); + params->minDistanceToBorder = 3; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); + Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, detector.dictionary); - cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 600; - cameraMatrix.at< double >(0, 2) = imgSize.width / 2; - cameraMatrix.at< double >(1, 2) = imgSize.height / 2; + cameraMatrix.at(0, 0) = cameraMatrix.at(1, 1) = 600; + cameraMatrix.at(0, 2) = imgSize.width / 2; + cameraMatrix.at(1, 2) = imgSize.height / 2; Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0)); @@ -155,12 +157,11 @@ void CV_CharucoDetection::run(int) { distance, imgSize, markerBorder, rvec, tvec); // detect markers - vector< vector< Point2f > > corners; - vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); - params->minDistanceToBorder = 3; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params); + vector > corners; + vector ids; + + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids); if(ids.size() == 0) { ts->printf(cvtest::TS::LOG, "Marker detection failed"); @@ -169,8 +170,8 @@ void CV_CharucoDetection::run(int) { } // interpolate charuco corners - vector< Point2f > charucoCorners; - vector< int > charucoIds; + vector charucoCorners; + vector charucoIds; if(iter % 2 == 0) { aruco::interpolateCornersCharuco(corners, ids, img, board, charucoCorners, @@ -187,7 +188,7 @@ void CV_CharucoDetection::run(int) { vector copyChessboardCorners = board->chessboardCorners; // move copyChessboardCorners points for (size_t i = 0; i < copyChessboardCorners.size(); i++) - copyChessboardCorners[i] -= board->rightBottomBorder / 2.f; + copyChessboardCorners[i] -= board->getRightBottomBorder() / 2.f; projectPoints(copyChessboardCorners, rvec, tvec, cameraMatrix, distCoeffs, projectedCharucoCorners); @@ -237,8 +238,10 @@ void CV_CharucoPoseEstimation::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, dictionary); + Ptr params = aruco::DetectorParameters::create(); + params->minDistanceToBorder = 3; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); + Ptr board = aruco::CharucoBoard::create(4, 4, 0.03f, 0.015f, detector.dictionary); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -261,12 +264,10 @@ void CV_CharucoPoseEstimation::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); - params->minDistanceToBorder = 3; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids); - ASSERT_EQ(ids.size(), board->ids.size()); + ASSERT_EQ(ids.size(), board->getIds().size()); // interpolate charuco corners vector< Point2f > charucoCorners; @@ -290,10 +291,10 @@ void CV_CharucoPoseEstimation::run(int) { // check axes const float offset = (board->getSquareLength() - board->getMarkerLength()) / 2.f; vector axes = getAxis(cameraMatrix, distCoeffs, rvec, tvec, board->getSquareLength(), offset); - vector topLeft = getMarkerById(board->ids[0], corners, ids); + vector topLeft = getMarkerById(board->getIds()[0], corners, ids); ASSERT_NEAR(topLeft[0].x, axes[1].x, 3.f); ASSERT_NEAR(topLeft[0].y, axes[1].y, 3.f); - vector bottomLeft = getMarkerById(board->ids[2], corners, ids); + vector bottomLeft = getMarkerById(board->getIds()[2], corners, ids); ASSERT_NEAR(bottomLeft[0].x, axes[2].x, 3.f); ASSERT_NEAR(bottomLeft[0].y, axes[2].y, 3.f); @@ -348,11 +349,13 @@ void CV_CharucoDiamondDetection::run(int) { int iter = 0; Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Size imgSize(500, 500); - Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); + Ptr params = aruco::DetectorParameters::create(); + params->minDistanceToBorder = 0; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params); float squareLength = 0.03f; float markerLength = 0.015f; Ptr board = - aruco::CharucoBoard::create(3, 3, squareLength, markerLength, dictionary); + aruco::CharucoBoard::create(3, 3, squareLength, markerLength, detector.dictionary); cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 2) = imgSize.width / 2; @@ -365,8 +368,10 @@ void CV_CharucoDiamondDetection::run(int) { for(int pitch = -50; pitch <= 50; pitch += 25) { int markerBorder = iter % 2 + 1; + vector idsTmp; for(int i = 0; i < 4; i++) - board->ids[i] = 4 * iter + i; + idsTmp.push_back(4 * iter + i); + board->setIds(idsTmp); iter++; // get synthetic image @@ -377,22 +382,19 @@ void CV_CharucoDiamondDetection::run(int) { // detect markers vector< vector< Point2f > > corners; vector< int > ids; - Ptr params = aruco::DetectorParameters::create(); - params->minDistanceToBorder = 0; - params->markerBorderBits = markerBorder; - aruco::detectMarkers(img, dictionary, corners, ids, params); + detector.params->markerBorderBits = markerBorder; + detector.detectMarkers(img, corners, ids); if(ids.size() != 4) { ts->printf(cvtest::TS::LOG, "Not enough markers for diamond detection"); ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH); return; } - // detect diamonds vector< vector< Point2f > > diamondCorners; vector< Vec4i > diamondIds; - aruco::detectCharucoDiamond(img, corners, ids, squareLength / markerLength, - diamondCorners, diamondIds, cameraMatrix, distCoeffs); + aruco::detectCharucoDiamond(img, corners, ids, squareLength / markerLength, diamondCorners, diamondIds, + cameraMatrix, distCoeffs, detector.dictionary); // check results if(diamondIds.size() != 1) { @@ -402,7 +404,7 @@ void CV_CharucoDiamondDetection::run(int) { } for(int i = 0; i < 4; i++) { - if(diamondIds[0][i] != board->ids[i]) { + if(diamondIds[0][i] != board->getIds()[i]) { ts->printf(cvtest::TS::LOG, "Incorrect diamond ids"); ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH); return; @@ -416,7 +418,7 @@ void CV_CharucoDiamondDetection::run(int) { vector copyChessboardCorners = board->chessboardCorners; // move copyChessboardCorners points for (size_t i = 0; i < copyChessboardCorners.size(); i++) - copyChessboardCorners[i] -= board->rightBottomBorder / 2.f; + copyChessboardCorners[i] -= board->getRightBottomBorder() / 2.f; projectPoints(copyChessboardCorners, rvec, tvec, cameraMatrix, distCoeffs, projectedDiamondCorners); @@ -440,7 +442,7 @@ void CV_CharucoDiamondDetection::run(int) { } Ptr estimateParameters = aruco::EstimateParameters::create(); - estimateParameters->pattern = aruco::CW_top_left_corner; + estimateParameters->pattern = aruco::ARUCO_CW_TOP_LEFT_CORNER; // estimate diamond pose vector< Vec3d > estimatedRvec, estimatedTvec; aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, cameraMatrix, distCoeffs, estimatedRvec, @@ -646,10 +648,12 @@ TEST(Charuco, testBoardSubpixelCoords) auto params = cv::aruco::DetectorParameters::create(); params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_APRILTAG; + aruco::ArucoDetector detector(dict, params); + std::vector ids; std::vector> corners, rejected; - cv::aruco::detectMarkers(gray, dict, corners, ids, params, rejected); + detector.detectMarkers(gray, corners, ids, rejected); ASSERT_EQ(ids.size(), size_t(8)); @@ -671,8 +675,7 @@ TEST(CV_ArucoTutorial, can_find_choriginal) { string imgPath = cvtest::findDataFile("choriginal.jpg", false); Mat image = imread(imgPath); - cv::Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250)); vector< int > ids; vector< vector< Point2f > > corners, rejected; @@ -691,7 +694,7 @@ TEST(CV_ArucoTutorial, can_find_choriginal) for (int i = 0; i < static_cast(N); i++) mapGoldCorners[i] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); ASSERT_EQ(N, ids.size()); for (size_t i = 0; i < N; i++) @@ -711,8 +714,7 @@ TEST(CV_ArucoTutorial, can_find_chocclusion) { string imgPath = cvtest::findDataFile("chocclusion_original.jpg", false); Mat image = imread(imgPath); - cv::Ptr dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); - Ptr detectorParams = aruco::DetectorParameters::create(); + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250)); vector< int > ids; vector< vector< Point2f > > corners, rejected; @@ -730,7 +732,7 @@ TEST(CV_ArucoTutorial, can_find_chocclusion) for (int i = 0; i < static_cast(N); i++) mapGoldCorners[goldCornersIds[i]] = goldCorners[i]; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); ASSERT_EQ(N, ids.size()); for (size_t i = 0; i < N; i++) @@ -762,6 +764,8 @@ TEST(CV_ArucoTutorial, can_find_diamondmarkers) detectorParams->readDetectorParameters(fs.root()); detectorParams->cornerRefinementMethod = 3; + aruco::ArucoDetector detector(dictionary, detectorParams); + vector< int > ids; vector< vector< Point2f > > corners, rejected; const size_t N = 12ull; @@ -771,7 +775,7 @@ TEST(CV_ArucoTutorial, can_find_diamondmarkers) for (int i = 0; i < static_cast(N); i++) counterGoldCornersIds[goldCornersIds[i]]++; - aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected); + detector.detectMarkers(image, corners, ids, rejected); map counterRes; for (size_t i = 0; i < N; i++) { @@ -788,16 +792,16 @@ TEST(Charuco, issue_14014) string imgPath = cvtest::findDataFile("aruco/recover.png"); Mat img = imread(imgPath); - Ptr dict = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(cv::aruco::DICT_7X7_250)); - Ptr board = aruco::CharucoBoard::create(8, 5, 0.03455f, 0.02164f, dict); Ptr detectorParams = aruco::DetectorParameters::create(); detectorParams->cornerRefinementMethod = aruco::CORNER_REFINE_SUBPIX; detectorParams->cornerRefinementMinAccuracy = 0.01; + aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_7X7_250), detectorParams); + Ptr board = aruco::CharucoBoard::create(8, 5, 0.03455f, 0.02164f, detector.dictionary); vector corners, rejectedPoints; vector ids; - aruco::detectMarkers(img, dict, corners, ids, detectorParams, rejectedPoints); + detector.detectMarkers(img, corners, ids, rejectedPoints); ASSERT_EQ(corners.size(), 19ull); EXPECT_EQ(Size(4, 1), corners[0].size()); // check dimension of detected corners @@ -806,7 +810,7 @@ TEST(Charuco, issue_14014) ASSERT_EQ(rejectedPoints.size(), 26ull); // optional check to track regressions EXPECT_EQ(Size(4, 1), rejectedPoints[0].size()); // check dimension of detected corners - aruco::refineDetectedMarkers(img, board, corners, ids, rejectedPoints); + detector.refineDetectedMarkers(img, board, corners, ids, rejectedPoints); ASSERT_EQ(corners.size(), 20ull); EXPECT_EQ(Size(4, 1), corners[0].size()); // check dimension of rejected corners after successfully refine diff --git a/modules/aruco/test/test_precomp.hpp b/modules/aruco/test/test_precomp.hpp index d8c903f4c7b..86595ccbda8 100644 --- a/modules/aruco/test/test_precomp.hpp +++ b/modules/aruco/test/test_precomp.hpp @@ -7,7 +7,6 @@ #include "opencv2/ts.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/calib3d.hpp" -#include "opencv2/aruco.hpp" #include #endif diff --git a/modules/ovis/samples/aruco_ar_demo.cpp b/modules/ovis/samples/aruco_ar_demo.cpp index 8e2464046c1..2398a7182a3 100644 --- a/modules/ovis/samples/aruco_ar_demo.cpp +++ b/modules/ovis/samples/aruco_ar_demo.cpp @@ -3,7 +3,7 @@ #include #include -#include +#include #include